]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/tg3.c
[TG3]: Increase the PCI MRRS
[linux-2.6-omap-h63xx.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.85"
68 #define DRV_MODULE_RELDATE      "October 18, 2007"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
206         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213         {}
214 };
215
216 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
218 static const struct {
219         const char string[ETH_GSTRING_LEN];
220 } ethtool_stats_keys[TG3_NUM_STATS] = {
221         { "rx_octets" },
222         { "rx_fragments" },
223         { "rx_ucast_packets" },
224         { "rx_mcast_packets" },
225         { "rx_bcast_packets" },
226         { "rx_fcs_errors" },
227         { "rx_align_errors" },
228         { "rx_xon_pause_rcvd" },
229         { "rx_xoff_pause_rcvd" },
230         { "rx_mac_ctrl_rcvd" },
231         { "rx_xoff_entered" },
232         { "rx_frame_too_long_errors" },
233         { "rx_jabbers" },
234         { "rx_undersize_packets" },
235         { "rx_in_length_errors" },
236         { "rx_out_length_errors" },
237         { "rx_64_or_less_octet_packets" },
238         { "rx_65_to_127_octet_packets" },
239         { "rx_128_to_255_octet_packets" },
240         { "rx_256_to_511_octet_packets" },
241         { "rx_512_to_1023_octet_packets" },
242         { "rx_1024_to_1522_octet_packets" },
243         { "rx_1523_to_2047_octet_packets" },
244         { "rx_2048_to_4095_octet_packets" },
245         { "rx_4096_to_8191_octet_packets" },
246         { "rx_8192_to_9022_octet_packets" },
247
248         { "tx_octets" },
249         { "tx_collisions" },
250
251         { "tx_xon_sent" },
252         { "tx_xoff_sent" },
253         { "tx_flow_control" },
254         { "tx_mac_errors" },
255         { "tx_single_collisions" },
256         { "tx_mult_collisions" },
257         { "tx_deferred" },
258         { "tx_excessive_collisions" },
259         { "tx_late_collisions" },
260         { "tx_collide_2times" },
261         { "tx_collide_3times" },
262         { "tx_collide_4times" },
263         { "tx_collide_5times" },
264         { "tx_collide_6times" },
265         { "tx_collide_7times" },
266         { "tx_collide_8times" },
267         { "tx_collide_9times" },
268         { "tx_collide_10times" },
269         { "tx_collide_11times" },
270         { "tx_collide_12times" },
271         { "tx_collide_13times" },
272         { "tx_collide_14times" },
273         { "tx_collide_15times" },
274         { "tx_ucast_packets" },
275         { "tx_mcast_packets" },
276         { "tx_bcast_packets" },
277         { "tx_carrier_sense_errors" },
278         { "tx_discards" },
279         { "tx_errors" },
280
281         { "dma_writeq_full" },
282         { "dma_write_prioq_full" },
283         { "rxbds_empty" },
284         { "rx_discards" },
285         { "rx_errors" },
286         { "rx_threshold_hit" },
287
288         { "dma_readq_full" },
289         { "dma_read_prioq_full" },
290         { "tx_comp_queue_full" },
291
292         { "ring_set_send_prod_index" },
293         { "ring_status_update" },
294         { "nic_irqs" },
295         { "nic_avoided_irqs" },
296         { "nic_tx_threshold_hit" }
297 };
298
299 static const struct {
300         const char string[ETH_GSTRING_LEN];
301 } ethtool_test_keys[TG3_NUM_TEST] = {
302         { "nvram test     (online) " },
303         { "link test      (online) " },
304         { "register test  (offline)" },
305         { "memory test    (offline)" },
306         { "loopback test  (offline)" },
307         { "interrupt test (offline)" },
308 };
309
310 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311 {
312         writel(val, tp->regs + off);
313 }
314
315 static u32 tg3_read32(struct tg3 *tp, u32 off)
316 {
317         return (readl(tp->regs + off));
318 }
319
320 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321 {
322         writel(val, tp->aperegs + off);
323 }
324
325 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326 {
327         return (readl(tp->aperegs + off));
328 }
329
330 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331 {
332         unsigned long flags;
333
334         spin_lock_irqsave(&tp->indirect_lock, flags);
335         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
337         spin_unlock_irqrestore(&tp->indirect_lock, flags);
338 }
339
340 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341 {
342         writel(val, tp->regs + off);
343         readl(tp->regs + off);
344 }
345
346 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
347 {
348         unsigned long flags;
349         u32 val;
350
351         spin_lock_irqsave(&tp->indirect_lock, flags);
352         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354         spin_unlock_irqrestore(&tp->indirect_lock, flags);
355         return val;
356 }
357
358 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359 {
360         unsigned long flags;
361
362         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364                                        TG3_64BIT_REG_LOW, val);
365                 return;
366         }
367         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369                                        TG3_64BIT_REG_LOW, val);
370                 return;
371         }
372
373         spin_lock_irqsave(&tp->indirect_lock, flags);
374         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376         spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378         /* In indirect mode when disabling interrupts, we also need
379          * to clear the interrupt bit in the GRC local ctrl register.
380          */
381         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382             (val == 0x1)) {
383                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385         }
386 }
387
388 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389 {
390         unsigned long flags;
391         u32 val;
392
393         spin_lock_irqsave(&tp->indirect_lock, flags);
394         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396         spin_unlock_irqrestore(&tp->indirect_lock, flags);
397         return val;
398 }
399
400 /* usec_wait specifies the wait time in usec when writing to certain registers
401  * where it is unsafe to read back the register without some delay.
402  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404  */
405 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
406 {
407         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409                 /* Non-posted methods */
410                 tp->write32(tp, off, val);
411         else {
412                 /* Posted method */
413                 tg3_write32(tp, off, val);
414                 if (usec_wait)
415                         udelay(usec_wait);
416                 tp->read32(tp, off);
417         }
418         /* Wait again after the read for the posted method to guarantee that
419          * the wait time is met.
420          */
421         if (usec_wait)
422                 udelay(usec_wait);
423 }
424
425 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426 {
427         tp->write32_mbox(tp, off, val);
428         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430                 tp->read32_mbox(tp, off);
431 }
432
433 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
434 {
435         void __iomem *mbox = tp->regs + off;
436         writel(val, mbox);
437         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438                 writel(val, mbox);
439         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440                 readl(mbox);
441 }
442
443 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444 {
445         return (readl(tp->regs + off + GRCMBOX_BASE));
446 }
447
448 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449 {
450         writel(val, tp->regs + off + GRCMBOX_BASE);
451 }
452
453 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
454 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
455 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
456 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
457 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
458
459 #define tw32(reg,val)           tp->write32(tp, reg, val)
460 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
461 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
462 #define tr32(reg)               tp->read32(tp, reg)
463
464 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470                 return;
471
472         spin_lock_irqsave(&tp->indirect_lock, flags);
473         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
476
477                 /* Always leave this as zero. */
478                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479         } else {
480                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
482
483                 /* Always leave this as zero. */
484                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485         }
486         spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 }
488
489 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490 {
491         unsigned long flags;
492
493         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495                 *val = 0;
496                 return;
497         }
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
503
504                 /* Always leave this as zero. */
505                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506         } else {
507                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508                 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510                 /* Always leave this as zero. */
511                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512         }
513         spin_unlock_irqrestore(&tp->indirect_lock, flags);
514 }
515
516 static void tg3_ape_lock_init(struct tg3 *tp)
517 {
518         int i;
519
520         /* Make sure the driver hasn't any stale locks. */
521         for (i = 0; i < 8; i++)
522                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523                                 APE_LOCK_GRANT_DRIVER);
524 }
525
526 static int tg3_ape_lock(struct tg3 *tp, int locknum)
527 {
528         int i, off;
529         int ret = 0;
530         u32 status;
531
532         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533                 return 0;
534
535         switch (locknum) {
536                 case TG3_APE_LOCK_MEM:
537                         break;
538                 default:
539                         return -EINVAL;
540         }
541
542         off = 4 * locknum;
543
544         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546         /* Wait for up to 1 millisecond to acquire lock. */
547         for (i = 0; i < 100; i++) {
548                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549                 if (status == APE_LOCK_GRANT_DRIVER)
550                         break;
551                 udelay(10);
552         }
553
554         if (status != APE_LOCK_GRANT_DRIVER) {
555                 /* Revoke the lock request. */
556                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557                                 APE_LOCK_GRANT_DRIVER);
558
559                 ret = -EBUSY;
560         }
561
562         return ret;
563 }
564
565 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566 {
567         int off;
568
569         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570                 return;
571
572         switch (locknum) {
573                 case TG3_APE_LOCK_MEM:
574                         break;
575                 default:
576                         return;
577         }
578
579         off = 4 * locknum;
580         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581 }
582
583 static void tg3_disable_ints(struct tg3 *tp)
584 {
585         tw32(TG3PCI_MISC_HOST_CTRL,
586              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
587         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
588 }
589
590 static inline void tg3_cond_int(struct tg3 *tp)
591 {
592         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593             (tp->hw_status->status & SD_STATUS_UPDATED))
594                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
595         else
596                 tw32(HOSTCC_MODE, tp->coalesce_mode |
597                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
598 }
599
600 static void tg3_enable_ints(struct tg3 *tp)
601 {
602         tp->irq_sync = 0;
603         wmb();
604
605         tw32(TG3PCI_MISC_HOST_CTRL,
606              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
607         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608                        (tp->last_tag << 24));
609         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611                                (tp->last_tag << 24));
612         tg3_cond_int(tp);
613 }
614
615 static inline unsigned int tg3_has_work(struct tg3 *tp)
616 {
617         struct tg3_hw_status *sblk = tp->hw_status;
618         unsigned int work_exists = 0;
619
620         /* check for phy events */
621         if (!(tp->tg3_flags &
622               (TG3_FLAG_USE_LINKCHG_REG |
623                TG3_FLAG_POLL_SERDES))) {
624                 if (sblk->status & SD_STATUS_LINK_CHG)
625                         work_exists = 1;
626         }
627         /* check for RX/TX work to do */
628         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630                 work_exists = 1;
631
632         return work_exists;
633 }
634
635 /* tg3_restart_ints
636  *  similar to tg3_enable_ints, but it accurately determines whether there
637  *  is new work pending and can return without flushing the PIO write
638  *  which reenables interrupts
639  */
640 static void tg3_restart_ints(struct tg3 *tp)
641 {
642         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643                      tp->last_tag << 24);
644         mmiowb();
645
646         /* When doing tagged status, this work check is unnecessary.
647          * The last_tag we write above tells the chip which piece of
648          * work we've completed.
649          */
650         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651             tg3_has_work(tp))
652                 tw32(HOSTCC_MODE, tp->coalesce_mode |
653                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
654 }
655
656 static inline void tg3_netif_stop(struct tg3 *tp)
657 {
658         tp->dev->trans_start = jiffies; /* prevent tx timeout */
659         napi_disable(&tp->napi);
660         netif_tx_disable(tp->dev);
661 }
662
663 static inline void tg3_netif_start(struct tg3 *tp)
664 {
665         netif_wake_queue(tp->dev);
666         /* NOTE: unconditional netif_wake_queue is only appropriate
667          * so long as all callers are assured to have free tx slots
668          * (such as after tg3_init_hw)
669          */
670         napi_enable(&tp->napi);
671         tp->hw_status->status |= SD_STATUS_UPDATED;
672         tg3_enable_ints(tp);
673 }
674
675 static void tg3_switch_clocks(struct tg3 *tp)
676 {
677         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678         u32 orig_clock_ctrl;
679
680         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
682                 return;
683
684         orig_clock_ctrl = clock_ctrl;
685         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686                        CLOCK_CTRL_CLKRUN_OENABLE |
687                        0x1f);
688         tp->pci_clock_ctrl = clock_ctrl;
689
690         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
692                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
693                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
694                 }
695         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
696                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697                             clock_ctrl |
698                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699                             40);
700                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
702                             40);
703         }
704         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
705 }
706
707 #define PHY_BUSY_LOOPS  5000
708
709 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710 {
711         u32 frame_val;
712         unsigned int loops;
713         int ret;
714
715         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716                 tw32_f(MAC_MI_MODE,
717                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718                 udelay(80);
719         }
720
721         *val = 0x0;
722
723         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724                       MI_COM_PHY_ADDR_MASK);
725         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726                       MI_COM_REG_ADDR_MASK);
727         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
728
729         tw32_f(MAC_MI_COM, frame_val);
730
731         loops = PHY_BUSY_LOOPS;
732         while (loops != 0) {
733                 udelay(10);
734                 frame_val = tr32(MAC_MI_COM);
735
736                 if ((frame_val & MI_COM_BUSY) == 0) {
737                         udelay(5);
738                         frame_val = tr32(MAC_MI_COM);
739                         break;
740                 }
741                 loops -= 1;
742         }
743
744         ret = -EBUSY;
745         if (loops != 0) {
746                 *val = frame_val & MI_COM_DATA_MASK;
747                 ret = 0;
748         }
749
750         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751                 tw32_f(MAC_MI_MODE, tp->mi_mode);
752                 udelay(80);
753         }
754
755         return ret;
756 }
757
758 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759 {
760         u32 frame_val;
761         unsigned int loops;
762         int ret;
763
764         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766                 return 0;
767
768         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769                 tw32_f(MAC_MI_MODE,
770                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771                 udelay(80);
772         }
773
774         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775                       MI_COM_PHY_ADDR_MASK);
776         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777                       MI_COM_REG_ADDR_MASK);
778         frame_val |= (val & MI_COM_DATA_MASK);
779         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
780
781         tw32_f(MAC_MI_COM, frame_val);
782
783         loops = PHY_BUSY_LOOPS;
784         while (loops != 0) {
785                 udelay(10);
786                 frame_val = tr32(MAC_MI_COM);
787                 if ((frame_val & MI_COM_BUSY) == 0) {
788                         udelay(5);
789                         frame_val = tr32(MAC_MI_COM);
790                         break;
791                 }
792                 loops -= 1;
793         }
794
795         ret = -EBUSY;
796         if (loops != 0)
797                 ret = 0;
798
799         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800                 tw32_f(MAC_MI_MODE, tp->mi_mode);
801                 udelay(80);
802         }
803
804         return ret;
805 }
806
807 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
808 {
809         u32 phy;
810
811         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
812             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
813                 return;
814
815         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
816                 u32 ephy;
817
818                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
819                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
820                                      ephy | MII_TG3_EPHY_SHADOW_EN);
821                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
822                                 if (enable)
823                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
824                                 else
825                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
826                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
827                         }
828                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
829                 }
830         } else {
831                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
832                       MII_TG3_AUXCTL_SHDWSEL_MISC;
833                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
834                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
835                         if (enable)
836                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
837                         else
838                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
839                         phy |= MII_TG3_AUXCTL_MISC_WREN;
840                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
841                 }
842         }
843 }
844
845 static void tg3_phy_set_wirespeed(struct tg3 *tp)
846 {
847         u32 val;
848
849         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
850                 return;
851
852         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
853             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
854                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
855                              (val | (1 << 15) | (1 << 4)));
856 }
857
858 static int tg3_bmcr_reset(struct tg3 *tp)
859 {
860         u32 phy_control;
861         int limit, err;
862
863         /* OK, reset it, and poll the BMCR_RESET bit until it
864          * clears or we time out.
865          */
866         phy_control = BMCR_RESET;
867         err = tg3_writephy(tp, MII_BMCR, phy_control);
868         if (err != 0)
869                 return -EBUSY;
870
871         limit = 5000;
872         while (limit--) {
873                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
874                 if (err != 0)
875                         return -EBUSY;
876
877                 if ((phy_control & BMCR_RESET) == 0) {
878                         udelay(40);
879                         break;
880                 }
881                 udelay(10);
882         }
883         if (limit <= 0)
884                 return -EBUSY;
885
886         return 0;
887 }
888
889 static int tg3_wait_macro_done(struct tg3 *tp)
890 {
891         int limit = 100;
892
893         while (limit--) {
894                 u32 tmp32;
895
896                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
897                         if ((tmp32 & 0x1000) == 0)
898                                 break;
899                 }
900         }
901         if (limit <= 0)
902                 return -EBUSY;
903
904         return 0;
905 }
906
907 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
908 {
909         static const u32 test_pat[4][6] = {
910         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
911         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
912         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
913         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
914         };
915         int chan;
916
917         for (chan = 0; chan < 4; chan++) {
918                 int i;
919
920                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
921                              (chan * 0x2000) | 0x0200);
922                 tg3_writephy(tp, 0x16, 0x0002);
923
924                 for (i = 0; i < 6; i++)
925                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
926                                      test_pat[chan][i]);
927
928                 tg3_writephy(tp, 0x16, 0x0202);
929                 if (tg3_wait_macro_done(tp)) {
930                         *resetp = 1;
931                         return -EBUSY;
932                 }
933
934                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
935                              (chan * 0x2000) | 0x0200);
936                 tg3_writephy(tp, 0x16, 0x0082);
937                 if (tg3_wait_macro_done(tp)) {
938                         *resetp = 1;
939                         return -EBUSY;
940                 }
941
942                 tg3_writephy(tp, 0x16, 0x0802);
943                 if (tg3_wait_macro_done(tp)) {
944                         *resetp = 1;
945                         return -EBUSY;
946                 }
947
948                 for (i = 0; i < 6; i += 2) {
949                         u32 low, high;
950
951                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
952                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
953                             tg3_wait_macro_done(tp)) {
954                                 *resetp = 1;
955                                 return -EBUSY;
956                         }
957                         low &= 0x7fff;
958                         high &= 0x000f;
959                         if (low != test_pat[chan][i] ||
960                             high != test_pat[chan][i+1]) {
961                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
962                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
963                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
964
965                                 return -EBUSY;
966                         }
967                 }
968         }
969
970         return 0;
971 }
972
973 static int tg3_phy_reset_chanpat(struct tg3 *tp)
974 {
975         int chan;
976
977         for (chan = 0; chan < 4; chan++) {
978                 int i;
979
980                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
981                              (chan * 0x2000) | 0x0200);
982                 tg3_writephy(tp, 0x16, 0x0002);
983                 for (i = 0; i < 6; i++)
984                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
985                 tg3_writephy(tp, 0x16, 0x0202);
986                 if (tg3_wait_macro_done(tp))
987                         return -EBUSY;
988         }
989
990         return 0;
991 }
992
993 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
994 {
995         u32 reg32, phy9_orig;
996         int retries, do_phy_reset, err;
997
998         retries = 10;
999         do_phy_reset = 1;
1000         do {
1001                 if (do_phy_reset) {
1002                         err = tg3_bmcr_reset(tp);
1003                         if (err)
1004                                 return err;
1005                         do_phy_reset = 0;
1006                 }
1007
1008                 /* Disable transmitter and interrupt.  */
1009                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1010                         continue;
1011
1012                 reg32 |= 0x3000;
1013                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1014
1015                 /* Set full-duplex, 1000 mbps.  */
1016                 tg3_writephy(tp, MII_BMCR,
1017                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1018
1019                 /* Set to master mode.  */
1020                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1021                         continue;
1022
1023                 tg3_writephy(tp, MII_TG3_CTRL,
1024                              (MII_TG3_CTRL_AS_MASTER |
1025                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1026
1027                 /* Enable SM_DSP_CLOCK and 6dB.  */
1028                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1029
1030                 /* Block the PHY control access.  */
1031                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1032                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1033
1034                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1035                 if (!err)
1036                         break;
1037         } while (--retries);
1038
1039         err = tg3_phy_reset_chanpat(tp);
1040         if (err)
1041                 return err;
1042
1043         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1044         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1045
1046         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1047         tg3_writephy(tp, 0x16, 0x0000);
1048
1049         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1050             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1051                 /* Set Extended packet length bit for jumbo frames */
1052                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1053         }
1054         else {
1055                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1056         }
1057
1058         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1059
1060         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1061                 reg32 &= ~0x3000;
1062                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1063         } else if (!err)
1064                 err = -EBUSY;
1065
1066         return err;
1067 }
1068
1069 static void tg3_link_report(struct tg3 *);
1070
1071 /* This will reset the tigon3 PHY if there is no valid
1072  * link unless the FORCE argument is non-zero.
1073  */
1074 static int tg3_phy_reset(struct tg3 *tp)
1075 {
1076         u32 phy_status;
1077         int err;
1078
1079         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1080                 u32 val;
1081
1082                 val = tr32(GRC_MISC_CFG);
1083                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1084                 udelay(40);
1085         }
1086         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1087         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1088         if (err != 0)
1089                 return -EBUSY;
1090
1091         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1092                 netif_carrier_off(tp->dev);
1093                 tg3_link_report(tp);
1094         }
1095
1096         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1097             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1098             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1099                 err = tg3_phy_reset_5703_4_5(tp);
1100                 if (err)
1101                         return err;
1102                 goto out;
1103         }
1104
1105         err = tg3_bmcr_reset(tp);
1106         if (err)
1107                 return err;
1108
1109         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
1110             tp->pci_chip_rev_id == CHIPREV_ID_5761_A0) {
1111                 u32 val;
1112
1113                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1114                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1115                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1116                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1117                         udelay(40);
1118                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1119                 }
1120
1121                 /* Disable GPHY autopowerdown. */
1122                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1123                              MII_TG3_MISC_SHDW_WREN |
1124                              MII_TG3_MISC_SHDW_APD_SEL |
1125                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1126         }
1127
1128 out:
1129         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1130                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1131                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1132                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1133                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1134                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1135                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1136         }
1137         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1138                 tg3_writephy(tp, 0x1c, 0x8d68);
1139                 tg3_writephy(tp, 0x1c, 0x8d68);
1140         }
1141         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1142                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1143                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1144                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1145                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1146                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1147                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1148                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1149                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1150         }
1151         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1152                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1153                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1154                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1155                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1156                         tg3_writephy(tp, MII_TG3_TEST1,
1157                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1158                 } else
1159                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1160                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1161         }
1162         /* Set Extended packet length bit (bit 14) on all chips that */
1163         /* support jumbo frames */
1164         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1165                 /* Cannot do read-modify-write on 5401 */
1166                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1167         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1168                 u32 phy_reg;
1169
1170                 /* Set bit 14 with read-modify-write to preserve other bits */
1171                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1172                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1173                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1174         }
1175
1176         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1177          * jumbo frames transmission.
1178          */
1179         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1180                 u32 phy_reg;
1181
1182                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1183                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1184                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1185         }
1186
1187         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1188                 /* adjust output voltage */
1189                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1190         }
1191
1192         tg3_phy_toggle_automdix(tp, 1);
1193         tg3_phy_set_wirespeed(tp);
1194         return 0;
1195 }
1196
1197 static void tg3_frob_aux_power(struct tg3 *tp)
1198 {
1199         struct tg3 *tp_peer = tp;
1200
1201         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1202                 return;
1203
1204         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1205             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1206                 struct net_device *dev_peer;
1207
1208                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1209                 /* remove_one() may have been run on the peer. */
1210                 if (!dev_peer)
1211                         tp_peer = tp;
1212                 else
1213                         tp_peer = netdev_priv(dev_peer);
1214         }
1215
1216         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1217             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1218             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1219             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1220                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1221                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1222                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1223                                     (GRC_LCLCTRL_GPIO_OE0 |
1224                                      GRC_LCLCTRL_GPIO_OE1 |
1225                                      GRC_LCLCTRL_GPIO_OE2 |
1226                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1227                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1228                                     100);
1229                 } else {
1230                         u32 no_gpio2;
1231                         u32 grc_local_ctrl = 0;
1232
1233                         if (tp_peer != tp &&
1234                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1235                                 return;
1236
1237                         /* Workaround to prevent overdrawing Amps. */
1238                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1239                             ASIC_REV_5714) {
1240                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1241                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1242                                             grc_local_ctrl, 100);
1243                         }
1244
1245                         /* On 5753 and variants, GPIO2 cannot be used. */
1246                         no_gpio2 = tp->nic_sram_data_cfg &
1247                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1248
1249                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1250                                          GRC_LCLCTRL_GPIO_OE1 |
1251                                          GRC_LCLCTRL_GPIO_OE2 |
1252                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1253                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1254                         if (no_gpio2) {
1255                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1256                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1257                         }
1258                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1259                                                     grc_local_ctrl, 100);
1260
1261                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1262
1263                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1264                                                     grc_local_ctrl, 100);
1265
1266                         if (!no_gpio2) {
1267                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1268                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1269                                             grc_local_ctrl, 100);
1270                         }
1271                 }
1272         } else {
1273                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1274                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1275                         if (tp_peer != tp &&
1276                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1277                                 return;
1278
1279                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1280                                     (GRC_LCLCTRL_GPIO_OE1 |
1281                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1282
1283                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1284                                     GRC_LCLCTRL_GPIO_OE1, 100);
1285
1286                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1287                                     (GRC_LCLCTRL_GPIO_OE1 |
1288                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1289                 }
1290         }
1291 }
1292
1293 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1294 {
1295         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1296                 return 1;
1297         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1298                 if (speed != SPEED_10)
1299                         return 1;
1300         } else if (speed == SPEED_10)
1301                 return 1;
1302
1303         return 0;
1304 }
1305
1306 static int tg3_setup_phy(struct tg3 *, int);
1307
1308 #define RESET_KIND_SHUTDOWN     0
1309 #define RESET_KIND_INIT         1
1310 #define RESET_KIND_SUSPEND      2
1311
1312 static void tg3_write_sig_post_reset(struct tg3 *, int);
1313 static int tg3_halt_cpu(struct tg3 *, u32);
1314 static int tg3_nvram_lock(struct tg3 *);
1315 static void tg3_nvram_unlock(struct tg3 *);
1316
1317 static void tg3_power_down_phy(struct tg3 *tp)
1318 {
1319         u32 val;
1320
1321         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1322                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1323                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1324                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1325
1326                         sg_dig_ctrl |=
1327                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1328                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1329                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1330                 }
1331                 return;
1332         }
1333
1334         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1335                 tg3_bmcr_reset(tp);
1336                 val = tr32(GRC_MISC_CFG);
1337                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1338                 udelay(40);
1339                 return;
1340         } else {
1341                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1342                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1343                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1344         }
1345
1346         /* The PHY should not be powered down on some chips because
1347          * of bugs.
1348          */
1349         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1350             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1351             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1352              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1353                 return;
1354
1355         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
1356             tp->pci_chip_rev_id == CHIPREV_ID_5761_A0) {
1357                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1358                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1359                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1360                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1361         }
1362
1363         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1364 }
1365
1366 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1367 {
1368         u32 misc_host_ctrl;
1369         u16 power_control, power_caps;
1370         int pm = tp->pm_cap;
1371
1372         /* Make sure register accesses (indirect or otherwise)
1373          * will function correctly.
1374          */
1375         pci_write_config_dword(tp->pdev,
1376                                TG3PCI_MISC_HOST_CTRL,
1377                                tp->misc_host_ctrl);
1378
1379         pci_read_config_word(tp->pdev,
1380                              pm + PCI_PM_CTRL,
1381                              &power_control);
1382         power_control |= PCI_PM_CTRL_PME_STATUS;
1383         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1384         switch (state) {
1385         case PCI_D0:
1386                 power_control |= 0;
1387                 pci_write_config_word(tp->pdev,
1388                                       pm + PCI_PM_CTRL,
1389                                       power_control);
1390                 udelay(100);    /* Delay after power state change */
1391
1392                 /* Switch out of Vaux if it is a NIC */
1393                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1394                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1395
1396                 return 0;
1397
1398         case PCI_D1:
1399                 power_control |= 1;
1400                 break;
1401
1402         case PCI_D2:
1403                 power_control |= 2;
1404                 break;
1405
1406         case PCI_D3hot:
1407                 power_control |= 3;
1408                 break;
1409
1410         default:
1411                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1412                        "requested.\n",
1413                        tp->dev->name, state);
1414                 return -EINVAL;
1415         };
1416
1417         power_control |= PCI_PM_CTRL_PME_ENABLE;
1418
1419         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1420         tw32(TG3PCI_MISC_HOST_CTRL,
1421              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1422
1423         if (tp->link_config.phy_is_low_power == 0) {
1424                 tp->link_config.phy_is_low_power = 1;
1425                 tp->link_config.orig_speed = tp->link_config.speed;
1426                 tp->link_config.orig_duplex = tp->link_config.duplex;
1427                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1428         }
1429
1430         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1431                 tp->link_config.speed = SPEED_10;
1432                 tp->link_config.duplex = DUPLEX_HALF;
1433                 tp->link_config.autoneg = AUTONEG_ENABLE;
1434                 tg3_setup_phy(tp, 0);
1435         }
1436
1437         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1438                 u32 val;
1439
1440                 val = tr32(GRC_VCPU_EXT_CTRL);
1441                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1442         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1443                 int i;
1444                 u32 val;
1445
1446                 for (i = 0; i < 200; i++) {
1447                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1448                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1449                                 break;
1450                         msleep(1);
1451                 }
1452         }
1453         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1454                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1455                                                      WOL_DRV_STATE_SHUTDOWN |
1456                                                      WOL_DRV_WOL |
1457                                                      WOL_SET_MAGIC_PKT);
1458
1459         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1460
1461         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1462                 u32 mac_mode;
1463
1464                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1465                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1466                         udelay(40);
1467
1468                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1469                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1470                         else
1471                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1472
1473                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1474                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1475                             ASIC_REV_5700) {
1476                                 u32 speed = (tp->tg3_flags &
1477                                              TG3_FLAG_WOL_SPEED_100MB) ?
1478                                              SPEED_100 : SPEED_10;
1479                                 if (tg3_5700_link_polarity(tp, speed))
1480                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1481                                 else
1482                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1483                         }
1484                 } else {
1485                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1486                 }
1487
1488                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1489                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1490
1491                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1492                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1493                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1494
1495                 tw32_f(MAC_MODE, mac_mode);
1496                 udelay(100);
1497
1498                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1499                 udelay(10);
1500         }
1501
1502         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1503             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1504              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1505                 u32 base_val;
1506
1507                 base_val = tp->pci_clock_ctrl;
1508                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1509                              CLOCK_CTRL_TXCLK_DISABLE);
1510
1511                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1512                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1513         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1514                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1515                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1516                 /* do nothing */
1517         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1518                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1519                 u32 newbits1, newbits2;
1520
1521                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1522                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1523                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1524                                     CLOCK_CTRL_TXCLK_DISABLE |
1525                                     CLOCK_CTRL_ALTCLK);
1526                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1527                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1528                         newbits1 = CLOCK_CTRL_625_CORE;
1529                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1530                 } else {
1531                         newbits1 = CLOCK_CTRL_ALTCLK;
1532                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1533                 }
1534
1535                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1536                             40);
1537
1538                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1539                             40);
1540
1541                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1542                         u32 newbits3;
1543
1544                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1545                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1546                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1547                                             CLOCK_CTRL_TXCLK_DISABLE |
1548                                             CLOCK_CTRL_44MHZ_CORE);
1549                         } else {
1550                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1551                         }
1552
1553                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1554                                     tp->pci_clock_ctrl | newbits3, 40);
1555                 }
1556         }
1557
1558         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1559             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1560             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1561                 tg3_power_down_phy(tp);
1562
1563         tg3_frob_aux_power(tp);
1564
1565         /* Workaround for unstable PLL clock */
1566         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1567             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1568                 u32 val = tr32(0x7d00);
1569
1570                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1571                 tw32(0x7d00, val);
1572                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1573                         int err;
1574
1575                         err = tg3_nvram_lock(tp);
1576                         tg3_halt_cpu(tp, RX_CPU_BASE);
1577                         if (!err)
1578                                 tg3_nvram_unlock(tp);
1579                 }
1580         }
1581
1582         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1583
1584         /* Finally, set the new power state. */
1585         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1586         udelay(100);    /* Delay after power state change */
1587
1588         return 0;
1589 }
1590
1591 static void tg3_link_report(struct tg3 *tp)
1592 {
1593         if (!netif_carrier_ok(tp->dev)) {
1594                 if (netif_msg_link(tp))
1595                         printk(KERN_INFO PFX "%s: Link is down.\n",
1596                                tp->dev->name);
1597         } else if (netif_msg_link(tp)) {
1598                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1599                        tp->dev->name,
1600                        (tp->link_config.active_speed == SPEED_1000 ?
1601                         1000 :
1602                         (tp->link_config.active_speed == SPEED_100 ?
1603                          100 : 10)),
1604                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1605                         "full" : "half"));
1606
1607                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1608                        "%s for RX.\n",
1609                        tp->dev->name,
1610                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1611                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1612         }
1613 }
1614
1615 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1616 {
1617         u32 new_tg3_flags = 0;
1618         u32 old_rx_mode = tp->rx_mode;
1619         u32 old_tx_mode = tp->tx_mode;
1620
1621         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1622
1623                 /* Convert 1000BaseX flow control bits to 1000BaseT
1624                  * bits before resolving flow control.
1625                  */
1626                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1627                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1628                                        ADVERTISE_PAUSE_ASYM);
1629                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1630
1631                         if (local_adv & ADVERTISE_1000XPAUSE)
1632                                 local_adv |= ADVERTISE_PAUSE_CAP;
1633                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1634                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1635                         if (remote_adv & LPA_1000XPAUSE)
1636                                 remote_adv |= LPA_PAUSE_CAP;
1637                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1638                                 remote_adv |= LPA_PAUSE_ASYM;
1639                 }
1640
1641                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1642                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1643                                 if (remote_adv & LPA_PAUSE_CAP)
1644                                         new_tg3_flags |=
1645                                                 (TG3_FLAG_RX_PAUSE |
1646                                                 TG3_FLAG_TX_PAUSE);
1647                                 else if (remote_adv & LPA_PAUSE_ASYM)
1648                                         new_tg3_flags |=
1649                                                 (TG3_FLAG_RX_PAUSE);
1650                         } else {
1651                                 if (remote_adv & LPA_PAUSE_CAP)
1652                                         new_tg3_flags |=
1653                                                 (TG3_FLAG_RX_PAUSE |
1654                                                 TG3_FLAG_TX_PAUSE);
1655                         }
1656                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1657                         if ((remote_adv & LPA_PAUSE_CAP) &&
1658                         (remote_adv & LPA_PAUSE_ASYM))
1659                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1660                 }
1661
1662                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1663                 tp->tg3_flags |= new_tg3_flags;
1664         } else {
1665                 new_tg3_flags = tp->tg3_flags;
1666         }
1667
1668         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1669                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1670         else
1671                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1672
1673         if (old_rx_mode != tp->rx_mode) {
1674                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1675         }
1676
1677         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1678                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1679         else
1680                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1681
1682         if (old_tx_mode != tp->tx_mode) {
1683                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1684         }
1685 }
1686
1687 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1688 {
1689         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1690         case MII_TG3_AUX_STAT_10HALF:
1691                 *speed = SPEED_10;
1692                 *duplex = DUPLEX_HALF;
1693                 break;
1694
1695         case MII_TG3_AUX_STAT_10FULL:
1696                 *speed = SPEED_10;
1697                 *duplex = DUPLEX_FULL;
1698                 break;
1699
1700         case MII_TG3_AUX_STAT_100HALF:
1701                 *speed = SPEED_100;
1702                 *duplex = DUPLEX_HALF;
1703                 break;
1704
1705         case MII_TG3_AUX_STAT_100FULL:
1706                 *speed = SPEED_100;
1707                 *duplex = DUPLEX_FULL;
1708                 break;
1709
1710         case MII_TG3_AUX_STAT_1000HALF:
1711                 *speed = SPEED_1000;
1712                 *duplex = DUPLEX_HALF;
1713                 break;
1714
1715         case MII_TG3_AUX_STAT_1000FULL:
1716                 *speed = SPEED_1000;
1717                 *duplex = DUPLEX_FULL;
1718                 break;
1719
1720         default:
1721                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1722                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1723                                  SPEED_10;
1724                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1725                                   DUPLEX_HALF;
1726                         break;
1727                 }
1728                 *speed = SPEED_INVALID;
1729                 *duplex = DUPLEX_INVALID;
1730                 break;
1731         };
1732 }
1733
1734 static void tg3_phy_copper_begin(struct tg3 *tp)
1735 {
1736         u32 new_adv;
1737         int i;
1738
1739         if (tp->link_config.phy_is_low_power) {
1740                 /* Entering low power mode.  Disable gigabit and
1741                  * 100baseT advertisements.
1742                  */
1743                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1744
1745                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1746                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1747                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1748                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1749
1750                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1751         } else if (tp->link_config.speed == SPEED_INVALID) {
1752                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1753                         tp->link_config.advertising &=
1754                                 ~(ADVERTISED_1000baseT_Half |
1755                                   ADVERTISED_1000baseT_Full);
1756
1757                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1758                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1759                         new_adv |= ADVERTISE_10HALF;
1760                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1761                         new_adv |= ADVERTISE_10FULL;
1762                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1763                         new_adv |= ADVERTISE_100HALF;
1764                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1765                         new_adv |= ADVERTISE_100FULL;
1766                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1767
1768                 if (tp->link_config.advertising &
1769                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1770                         new_adv = 0;
1771                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1772                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1773                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1774                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1775                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1776                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1777                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1778                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1779                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1780                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1781                 } else {
1782                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1783                 }
1784         } else {
1785                 /* Asking for a specific link mode. */
1786                 if (tp->link_config.speed == SPEED_1000) {
1787                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1788                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1789
1790                         if (tp->link_config.duplex == DUPLEX_FULL)
1791                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1792                         else
1793                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1794                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1795                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1796                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1797                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1798                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1799                 } else {
1800                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1801
1802                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1803                         if (tp->link_config.speed == SPEED_100) {
1804                                 if (tp->link_config.duplex == DUPLEX_FULL)
1805                                         new_adv |= ADVERTISE_100FULL;
1806                                 else
1807                                         new_adv |= ADVERTISE_100HALF;
1808                         } else {
1809                                 if (tp->link_config.duplex == DUPLEX_FULL)
1810                                         new_adv |= ADVERTISE_10FULL;
1811                                 else
1812                                         new_adv |= ADVERTISE_10HALF;
1813                         }
1814                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1815                 }
1816         }
1817
1818         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1819             tp->link_config.speed != SPEED_INVALID) {
1820                 u32 bmcr, orig_bmcr;
1821
1822                 tp->link_config.active_speed = tp->link_config.speed;
1823                 tp->link_config.active_duplex = tp->link_config.duplex;
1824
1825                 bmcr = 0;
1826                 switch (tp->link_config.speed) {
1827                 default:
1828                 case SPEED_10:
1829                         break;
1830
1831                 case SPEED_100:
1832                         bmcr |= BMCR_SPEED100;
1833                         break;
1834
1835                 case SPEED_1000:
1836                         bmcr |= TG3_BMCR_SPEED1000;
1837                         break;
1838                 };
1839
1840                 if (tp->link_config.duplex == DUPLEX_FULL)
1841                         bmcr |= BMCR_FULLDPLX;
1842
1843                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1844                     (bmcr != orig_bmcr)) {
1845                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1846                         for (i = 0; i < 1500; i++) {
1847                                 u32 tmp;
1848
1849                                 udelay(10);
1850                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1851                                     tg3_readphy(tp, MII_BMSR, &tmp))
1852                                         continue;
1853                                 if (!(tmp & BMSR_LSTATUS)) {
1854                                         udelay(40);
1855                                         break;
1856                                 }
1857                         }
1858                         tg3_writephy(tp, MII_BMCR, bmcr);
1859                         udelay(40);
1860                 }
1861         } else {
1862                 tg3_writephy(tp, MII_BMCR,
1863                              BMCR_ANENABLE | BMCR_ANRESTART);
1864         }
1865 }
1866
1867 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1868 {
1869         int err;
1870
1871         /* Turn off tap power management. */
1872         /* Set Extended packet length bit */
1873         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1874
1875         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1876         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1877
1878         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1879         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1880
1881         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1882         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1883
1884         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1885         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1886
1887         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1888         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1889
1890         udelay(40);
1891
1892         return err;
1893 }
1894
1895 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1896 {
1897         u32 adv_reg, all_mask = 0;
1898
1899         if (mask & ADVERTISED_10baseT_Half)
1900                 all_mask |= ADVERTISE_10HALF;
1901         if (mask & ADVERTISED_10baseT_Full)
1902                 all_mask |= ADVERTISE_10FULL;
1903         if (mask & ADVERTISED_100baseT_Half)
1904                 all_mask |= ADVERTISE_100HALF;
1905         if (mask & ADVERTISED_100baseT_Full)
1906                 all_mask |= ADVERTISE_100FULL;
1907
1908         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1909                 return 0;
1910
1911         if ((adv_reg & all_mask) != all_mask)
1912                 return 0;
1913         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1914                 u32 tg3_ctrl;
1915
1916                 all_mask = 0;
1917                 if (mask & ADVERTISED_1000baseT_Half)
1918                         all_mask |= ADVERTISE_1000HALF;
1919                 if (mask & ADVERTISED_1000baseT_Full)
1920                         all_mask |= ADVERTISE_1000FULL;
1921
1922                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1923                         return 0;
1924
1925                 if ((tg3_ctrl & all_mask) != all_mask)
1926                         return 0;
1927         }
1928         return 1;
1929 }
1930
1931 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1932 {
1933         int current_link_up;
1934         u32 bmsr, dummy;
1935         u16 current_speed;
1936         u8 current_duplex;
1937         int i, err;
1938
1939         tw32(MAC_EVENT, 0);
1940
1941         tw32_f(MAC_STATUS,
1942              (MAC_STATUS_SYNC_CHANGED |
1943               MAC_STATUS_CFG_CHANGED |
1944               MAC_STATUS_MI_COMPLETION |
1945               MAC_STATUS_LNKSTATE_CHANGED));
1946         udelay(40);
1947
1948         tp->mi_mode = MAC_MI_MODE_BASE;
1949         tw32_f(MAC_MI_MODE, tp->mi_mode);
1950         udelay(80);
1951
1952         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1953
1954         /* Some third-party PHYs need to be reset on link going
1955          * down.
1956          */
1957         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1958              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1959              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1960             netif_carrier_ok(tp->dev)) {
1961                 tg3_readphy(tp, MII_BMSR, &bmsr);
1962                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1963                     !(bmsr & BMSR_LSTATUS))
1964                         force_reset = 1;
1965         }
1966         if (force_reset)
1967                 tg3_phy_reset(tp);
1968
1969         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1970                 tg3_readphy(tp, MII_BMSR, &bmsr);
1971                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1972                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1973                         bmsr = 0;
1974
1975                 if (!(bmsr & BMSR_LSTATUS)) {
1976                         err = tg3_init_5401phy_dsp(tp);
1977                         if (err)
1978                                 return err;
1979
1980                         tg3_readphy(tp, MII_BMSR, &bmsr);
1981                         for (i = 0; i < 1000; i++) {
1982                                 udelay(10);
1983                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1984                                     (bmsr & BMSR_LSTATUS)) {
1985                                         udelay(40);
1986                                         break;
1987                                 }
1988                         }
1989
1990                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1991                             !(bmsr & BMSR_LSTATUS) &&
1992                             tp->link_config.active_speed == SPEED_1000) {
1993                                 err = tg3_phy_reset(tp);
1994                                 if (!err)
1995                                         err = tg3_init_5401phy_dsp(tp);
1996                                 if (err)
1997                                         return err;
1998                         }
1999                 }
2000         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2001                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2002                 /* 5701 {A0,B0} CRC bug workaround */
2003                 tg3_writephy(tp, 0x15, 0x0a75);
2004                 tg3_writephy(tp, 0x1c, 0x8c68);
2005                 tg3_writephy(tp, 0x1c, 0x8d68);
2006                 tg3_writephy(tp, 0x1c, 0x8c68);
2007         }
2008
2009         /* Clear pending interrupts... */
2010         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2011         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2012
2013         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2014                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2015         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2016                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2017
2018         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2019             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2020                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2021                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2022                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2023                 else
2024                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2025         }
2026
2027         current_link_up = 0;
2028         current_speed = SPEED_INVALID;
2029         current_duplex = DUPLEX_INVALID;
2030
2031         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2032                 u32 val;
2033
2034                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2035                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2036                 if (!(val & (1 << 10))) {
2037                         val |= (1 << 10);
2038                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2039                         goto relink;
2040                 }
2041         }
2042
2043         bmsr = 0;
2044         for (i = 0; i < 100; i++) {
2045                 tg3_readphy(tp, MII_BMSR, &bmsr);
2046                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2047                     (bmsr & BMSR_LSTATUS))
2048                         break;
2049                 udelay(40);
2050         }
2051
2052         if (bmsr & BMSR_LSTATUS) {
2053                 u32 aux_stat, bmcr;
2054
2055                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2056                 for (i = 0; i < 2000; i++) {
2057                         udelay(10);
2058                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2059                             aux_stat)
2060                                 break;
2061                 }
2062
2063                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2064                                              &current_speed,
2065                                              &current_duplex);
2066
2067                 bmcr = 0;
2068                 for (i = 0; i < 200; i++) {
2069                         tg3_readphy(tp, MII_BMCR, &bmcr);
2070                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2071                                 continue;
2072                         if (bmcr && bmcr != 0x7fff)
2073                                 break;
2074                         udelay(10);
2075                 }
2076
2077                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2078                         if (bmcr & BMCR_ANENABLE) {
2079                                 current_link_up = 1;
2080
2081                                 /* Force autoneg restart if we are exiting
2082                                  * low power mode.
2083                                  */
2084                                 if (!tg3_copper_is_advertising_all(tp,
2085                                                 tp->link_config.advertising))
2086                                         current_link_up = 0;
2087                         } else {
2088                                 current_link_up = 0;
2089                         }
2090                 } else {
2091                         if (!(bmcr & BMCR_ANENABLE) &&
2092                             tp->link_config.speed == current_speed &&
2093                             tp->link_config.duplex == current_duplex) {
2094                                 current_link_up = 1;
2095                         } else {
2096                                 current_link_up = 0;
2097                         }
2098                 }
2099
2100                 tp->link_config.active_speed = current_speed;
2101                 tp->link_config.active_duplex = current_duplex;
2102         }
2103
2104         if (current_link_up == 1 &&
2105             (tp->link_config.active_duplex == DUPLEX_FULL) &&
2106             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2107                 u32 local_adv, remote_adv;
2108
2109                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2110                         local_adv = 0;
2111                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2112
2113                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2114                         remote_adv = 0;
2115
2116                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
2117
2118                 /* If we are not advertising full pause capability,
2119                  * something is wrong.  Bring the link down and reconfigure.
2120                  */
2121                 if (local_adv != ADVERTISE_PAUSE_CAP) {
2122                         current_link_up = 0;
2123                 } else {
2124                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2125                 }
2126         }
2127 relink:
2128         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2129                 u32 tmp;
2130
2131                 tg3_phy_copper_begin(tp);
2132
2133                 tg3_readphy(tp, MII_BMSR, &tmp);
2134                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2135                     (tmp & BMSR_LSTATUS))
2136                         current_link_up = 1;
2137         }
2138
2139         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2140         if (current_link_up == 1) {
2141                 if (tp->link_config.active_speed == SPEED_100 ||
2142                     tp->link_config.active_speed == SPEED_10)
2143                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2144                 else
2145                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2146         } else
2147                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2148
2149         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2150         if (tp->link_config.active_duplex == DUPLEX_HALF)
2151                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2152
2153         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2154                 if (current_link_up == 1 &&
2155                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2156                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2157                 else
2158                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2159         }
2160
2161         /* ??? Without this setting Netgear GA302T PHY does not
2162          * ??? send/receive packets...
2163          */
2164         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2165             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2166                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2167                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2168                 udelay(80);
2169         }
2170
2171         tw32_f(MAC_MODE, tp->mac_mode);
2172         udelay(40);
2173
2174         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2175                 /* Polled via timer. */
2176                 tw32_f(MAC_EVENT, 0);
2177         } else {
2178                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2179         }
2180         udelay(40);
2181
2182         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2183             current_link_up == 1 &&
2184             tp->link_config.active_speed == SPEED_1000 &&
2185             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2186              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2187                 udelay(120);
2188                 tw32_f(MAC_STATUS,
2189                      (MAC_STATUS_SYNC_CHANGED |
2190                       MAC_STATUS_CFG_CHANGED));
2191                 udelay(40);
2192                 tg3_write_mem(tp,
2193                               NIC_SRAM_FIRMWARE_MBOX,
2194                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2195         }
2196
2197         if (current_link_up != netif_carrier_ok(tp->dev)) {
2198                 if (current_link_up)
2199                         netif_carrier_on(tp->dev);
2200                 else
2201                         netif_carrier_off(tp->dev);
2202                 tg3_link_report(tp);
2203         }
2204
2205         return 0;
2206 }
2207
2208 struct tg3_fiber_aneginfo {
2209         int state;
2210 #define ANEG_STATE_UNKNOWN              0
2211 #define ANEG_STATE_AN_ENABLE            1
2212 #define ANEG_STATE_RESTART_INIT         2
2213 #define ANEG_STATE_RESTART              3
2214 #define ANEG_STATE_DISABLE_LINK_OK      4
2215 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2216 #define ANEG_STATE_ABILITY_DETECT       6
2217 #define ANEG_STATE_ACK_DETECT_INIT      7
2218 #define ANEG_STATE_ACK_DETECT           8
2219 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2220 #define ANEG_STATE_COMPLETE_ACK         10
2221 #define ANEG_STATE_IDLE_DETECT_INIT     11
2222 #define ANEG_STATE_IDLE_DETECT          12
2223 #define ANEG_STATE_LINK_OK              13
2224 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2225 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2226
2227         u32 flags;
2228 #define MR_AN_ENABLE            0x00000001
2229 #define MR_RESTART_AN           0x00000002
2230 #define MR_AN_COMPLETE          0x00000004
2231 #define MR_PAGE_RX              0x00000008
2232 #define MR_NP_LOADED            0x00000010
2233 #define MR_TOGGLE_TX            0x00000020
2234 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2235 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2236 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2237 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2238 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2239 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2240 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2241 #define MR_TOGGLE_RX            0x00002000
2242 #define MR_NP_RX                0x00004000
2243
2244 #define MR_LINK_OK              0x80000000
2245
2246         unsigned long link_time, cur_time;
2247
2248         u32 ability_match_cfg;
2249         int ability_match_count;
2250
2251         char ability_match, idle_match, ack_match;
2252
2253         u32 txconfig, rxconfig;
2254 #define ANEG_CFG_NP             0x00000080
2255 #define ANEG_CFG_ACK            0x00000040
2256 #define ANEG_CFG_RF2            0x00000020
2257 #define ANEG_CFG_RF1            0x00000010
2258 #define ANEG_CFG_PS2            0x00000001
2259 #define ANEG_CFG_PS1            0x00008000
2260 #define ANEG_CFG_HD             0x00004000
2261 #define ANEG_CFG_FD             0x00002000
2262 #define ANEG_CFG_INVAL          0x00001f06
2263
2264 };
2265 #define ANEG_OK         0
2266 #define ANEG_DONE       1
2267 #define ANEG_TIMER_ENAB 2
2268 #define ANEG_FAILED     -1
2269
2270 #define ANEG_STATE_SETTLE_TIME  10000
2271
2272 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2273                                    struct tg3_fiber_aneginfo *ap)
2274 {
2275         unsigned long delta;
2276         u32 rx_cfg_reg;
2277         int ret;
2278
2279         if (ap->state == ANEG_STATE_UNKNOWN) {
2280                 ap->rxconfig = 0;
2281                 ap->link_time = 0;
2282                 ap->cur_time = 0;
2283                 ap->ability_match_cfg = 0;
2284                 ap->ability_match_count = 0;
2285                 ap->ability_match = 0;
2286                 ap->idle_match = 0;
2287                 ap->ack_match = 0;
2288         }
2289         ap->cur_time++;
2290
2291         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2292                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2293
2294                 if (rx_cfg_reg != ap->ability_match_cfg) {
2295                         ap->ability_match_cfg = rx_cfg_reg;
2296                         ap->ability_match = 0;
2297                         ap->ability_match_count = 0;
2298                 } else {
2299                         if (++ap->ability_match_count > 1) {
2300                                 ap->ability_match = 1;
2301                                 ap->ability_match_cfg = rx_cfg_reg;
2302                         }
2303                 }
2304                 if (rx_cfg_reg & ANEG_CFG_ACK)
2305                         ap->ack_match = 1;
2306                 else
2307                         ap->ack_match = 0;
2308
2309                 ap->idle_match = 0;
2310         } else {
2311                 ap->idle_match = 1;
2312                 ap->ability_match_cfg = 0;
2313                 ap->ability_match_count = 0;
2314                 ap->ability_match = 0;
2315                 ap->ack_match = 0;
2316
2317                 rx_cfg_reg = 0;
2318         }
2319
2320         ap->rxconfig = rx_cfg_reg;
2321         ret = ANEG_OK;
2322
2323         switch(ap->state) {
2324         case ANEG_STATE_UNKNOWN:
2325                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2326                         ap->state = ANEG_STATE_AN_ENABLE;
2327
2328                 /* fallthru */
2329         case ANEG_STATE_AN_ENABLE:
2330                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2331                 if (ap->flags & MR_AN_ENABLE) {
2332                         ap->link_time = 0;
2333                         ap->cur_time = 0;
2334                         ap->ability_match_cfg = 0;
2335                         ap->ability_match_count = 0;
2336                         ap->ability_match = 0;
2337                         ap->idle_match = 0;
2338                         ap->ack_match = 0;
2339
2340                         ap->state = ANEG_STATE_RESTART_INIT;
2341                 } else {
2342                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2343                 }
2344                 break;
2345
2346         case ANEG_STATE_RESTART_INIT:
2347                 ap->link_time = ap->cur_time;
2348                 ap->flags &= ~(MR_NP_LOADED);
2349                 ap->txconfig = 0;
2350                 tw32(MAC_TX_AUTO_NEG, 0);
2351                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2352                 tw32_f(MAC_MODE, tp->mac_mode);
2353                 udelay(40);
2354
2355                 ret = ANEG_TIMER_ENAB;
2356                 ap->state = ANEG_STATE_RESTART;
2357
2358                 /* fallthru */
2359         case ANEG_STATE_RESTART:
2360                 delta = ap->cur_time - ap->link_time;
2361                 if (delta > ANEG_STATE_SETTLE_TIME) {
2362                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2363                 } else {
2364                         ret = ANEG_TIMER_ENAB;
2365                 }
2366                 break;
2367
2368         case ANEG_STATE_DISABLE_LINK_OK:
2369                 ret = ANEG_DONE;
2370                 break;
2371
2372         case ANEG_STATE_ABILITY_DETECT_INIT:
2373                 ap->flags &= ~(MR_TOGGLE_TX);
2374                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2375                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2376                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2377                 tw32_f(MAC_MODE, tp->mac_mode);
2378                 udelay(40);
2379
2380                 ap->state = ANEG_STATE_ABILITY_DETECT;
2381                 break;
2382
2383         case ANEG_STATE_ABILITY_DETECT:
2384                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2385                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2386                 }
2387                 break;
2388
2389         case ANEG_STATE_ACK_DETECT_INIT:
2390                 ap->txconfig |= ANEG_CFG_ACK;
2391                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2392                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2393                 tw32_f(MAC_MODE, tp->mac_mode);
2394                 udelay(40);
2395
2396                 ap->state = ANEG_STATE_ACK_DETECT;
2397
2398                 /* fallthru */
2399         case ANEG_STATE_ACK_DETECT:
2400                 if (ap->ack_match != 0) {
2401                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2402                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2403                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2404                         } else {
2405                                 ap->state = ANEG_STATE_AN_ENABLE;
2406                         }
2407                 } else if (ap->ability_match != 0 &&
2408                            ap->rxconfig == 0) {
2409                         ap->state = ANEG_STATE_AN_ENABLE;
2410                 }
2411                 break;
2412
2413         case ANEG_STATE_COMPLETE_ACK_INIT:
2414                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2415                         ret = ANEG_FAILED;
2416                         break;
2417                 }
2418                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2419                                MR_LP_ADV_HALF_DUPLEX |
2420                                MR_LP_ADV_SYM_PAUSE |
2421                                MR_LP_ADV_ASYM_PAUSE |
2422                                MR_LP_ADV_REMOTE_FAULT1 |
2423                                MR_LP_ADV_REMOTE_FAULT2 |
2424                                MR_LP_ADV_NEXT_PAGE |
2425                                MR_TOGGLE_RX |
2426                                MR_NP_RX);
2427                 if (ap->rxconfig & ANEG_CFG_FD)
2428                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2429                 if (ap->rxconfig & ANEG_CFG_HD)
2430                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2431                 if (ap->rxconfig & ANEG_CFG_PS1)
2432                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2433                 if (ap->rxconfig & ANEG_CFG_PS2)
2434                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2435                 if (ap->rxconfig & ANEG_CFG_RF1)
2436                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2437                 if (ap->rxconfig & ANEG_CFG_RF2)
2438                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2439                 if (ap->rxconfig & ANEG_CFG_NP)
2440                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2441
2442                 ap->link_time = ap->cur_time;
2443
2444                 ap->flags ^= (MR_TOGGLE_TX);
2445                 if (ap->rxconfig & 0x0008)
2446                         ap->flags |= MR_TOGGLE_RX;
2447                 if (ap->rxconfig & ANEG_CFG_NP)
2448                         ap->flags |= MR_NP_RX;
2449                 ap->flags |= MR_PAGE_RX;
2450
2451                 ap->state = ANEG_STATE_COMPLETE_ACK;
2452                 ret = ANEG_TIMER_ENAB;
2453                 break;
2454
2455         case ANEG_STATE_COMPLETE_ACK:
2456                 if (ap->ability_match != 0 &&
2457                     ap->rxconfig == 0) {
2458                         ap->state = ANEG_STATE_AN_ENABLE;
2459                         break;
2460                 }
2461                 delta = ap->cur_time - ap->link_time;
2462                 if (delta > ANEG_STATE_SETTLE_TIME) {
2463                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2464                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2465                         } else {
2466                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2467                                     !(ap->flags & MR_NP_RX)) {
2468                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2469                                 } else {
2470                                         ret = ANEG_FAILED;
2471                                 }
2472                         }
2473                 }
2474                 break;
2475
2476         case ANEG_STATE_IDLE_DETECT_INIT:
2477                 ap->link_time = ap->cur_time;
2478                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2479                 tw32_f(MAC_MODE, tp->mac_mode);
2480                 udelay(40);
2481
2482                 ap->state = ANEG_STATE_IDLE_DETECT;
2483                 ret = ANEG_TIMER_ENAB;
2484                 break;
2485
2486         case ANEG_STATE_IDLE_DETECT:
2487                 if (ap->ability_match != 0 &&
2488                     ap->rxconfig == 0) {
2489                         ap->state = ANEG_STATE_AN_ENABLE;
2490                         break;
2491                 }
2492                 delta = ap->cur_time - ap->link_time;
2493                 if (delta > ANEG_STATE_SETTLE_TIME) {
2494                         /* XXX another gem from the Broadcom driver :( */
2495                         ap->state = ANEG_STATE_LINK_OK;
2496                 }
2497                 break;
2498
2499         case ANEG_STATE_LINK_OK:
2500                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2501                 ret = ANEG_DONE;
2502                 break;
2503
2504         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2505                 /* ??? unimplemented */
2506                 break;
2507
2508         case ANEG_STATE_NEXT_PAGE_WAIT:
2509                 /* ??? unimplemented */
2510                 break;
2511
2512         default:
2513                 ret = ANEG_FAILED;
2514                 break;
2515         };
2516
2517         return ret;
2518 }
2519
2520 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2521 {
2522         int res = 0;
2523         struct tg3_fiber_aneginfo aninfo;
2524         int status = ANEG_FAILED;
2525         unsigned int tick;
2526         u32 tmp;
2527
2528         tw32_f(MAC_TX_AUTO_NEG, 0);
2529
2530         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2531         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2532         udelay(40);
2533
2534         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2535         udelay(40);
2536
2537         memset(&aninfo, 0, sizeof(aninfo));
2538         aninfo.flags |= MR_AN_ENABLE;
2539         aninfo.state = ANEG_STATE_UNKNOWN;
2540         aninfo.cur_time = 0;
2541         tick = 0;
2542         while (++tick < 195000) {
2543                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2544                 if (status == ANEG_DONE || status == ANEG_FAILED)
2545                         break;
2546
2547                 udelay(1);
2548         }
2549
2550         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2551         tw32_f(MAC_MODE, tp->mac_mode);
2552         udelay(40);
2553
2554         *flags = aninfo.flags;
2555
2556         if (status == ANEG_DONE &&
2557             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2558                              MR_LP_ADV_FULL_DUPLEX)))
2559                 res = 1;
2560
2561         return res;
2562 }
2563
2564 static void tg3_init_bcm8002(struct tg3 *tp)
2565 {
2566         u32 mac_status = tr32(MAC_STATUS);
2567         int i;
2568
2569         /* Reset when initting first time or we have a link. */
2570         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2571             !(mac_status & MAC_STATUS_PCS_SYNCED))
2572                 return;
2573
2574         /* Set PLL lock range. */
2575         tg3_writephy(tp, 0x16, 0x8007);
2576
2577         /* SW reset */
2578         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2579
2580         /* Wait for reset to complete. */
2581         /* XXX schedule_timeout() ... */
2582         for (i = 0; i < 500; i++)
2583                 udelay(10);
2584
2585         /* Config mode; select PMA/Ch 1 regs. */
2586         tg3_writephy(tp, 0x10, 0x8411);
2587
2588         /* Enable auto-lock and comdet, select txclk for tx. */
2589         tg3_writephy(tp, 0x11, 0x0a10);
2590
2591         tg3_writephy(tp, 0x18, 0x00a0);
2592         tg3_writephy(tp, 0x16, 0x41ff);
2593
2594         /* Assert and deassert POR. */
2595         tg3_writephy(tp, 0x13, 0x0400);
2596         udelay(40);
2597         tg3_writephy(tp, 0x13, 0x0000);
2598
2599         tg3_writephy(tp, 0x11, 0x0a50);
2600         udelay(40);
2601         tg3_writephy(tp, 0x11, 0x0a10);
2602
2603         /* Wait for signal to stabilize */
2604         /* XXX schedule_timeout() ... */
2605         for (i = 0; i < 15000; i++)
2606                 udelay(10);
2607
2608         /* Deselect the channel register so we can read the PHYID
2609          * later.
2610          */
2611         tg3_writephy(tp, 0x10, 0x8011);
2612 }
2613
2614 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2615 {
2616         u32 sg_dig_ctrl, sg_dig_status;
2617         u32 serdes_cfg, expected_sg_dig_ctrl;
2618         int workaround, port_a;
2619         int current_link_up;
2620
2621         serdes_cfg = 0;
2622         expected_sg_dig_ctrl = 0;
2623         workaround = 0;
2624         port_a = 1;
2625         current_link_up = 0;
2626
2627         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2628             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2629                 workaround = 1;
2630                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2631                         port_a = 0;
2632
2633                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2634                 /* preserve bits 20-23 for voltage regulator */
2635                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2636         }
2637
2638         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2639
2640         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2641                 if (sg_dig_ctrl & (1 << 31)) {
2642                         if (workaround) {
2643                                 u32 val = serdes_cfg;
2644
2645                                 if (port_a)
2646                                         val |= 0xc010000;
2647                                 else
2648                                         val |= 0x4010000;
2649                                 tw32_f(MAC_SERDES_CFG, val);
2650                         }
2651                         tw32_f(SG_DIG_CTRL, 0x01388400);
2652                 }
2653                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2654                         tg3_setup_flow_control(tp, 0, 0);
2655                         current_link_up = 1;
2656                 }
2657                 goto out;
2658         }
2659
2660         /* Want auto-negotiation.  */
2661         expected_sg_dig_ctrl = 0x81388400;
2662
2663         /* Pause capability */
2664         expected_sg_dig_ctrl |= (1 << 11);
2665
2666         /* Asymettric pause */
2667         expected_sg_dig_ctrl |= (1 << 12);
2668
2669         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2670                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2671                     tp->serdes_counter &&
2672                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2673                                     MAC_STATUS_RCVD_CFG)) ==
2674                      MAC_STATUS_PCS_SYNCED)) {
2675                         tp->serdes_counter--;
2676                         current_link_up = 1;
2677                         goto out;
2678                 }
2679 restart_autoneg:
2680                 if (workaround)
2681                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2682                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2683                 udelay(5);
2684                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2685
2686                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2687                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2688         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2689                                  MAC_STATUS_SIGNAL_DET)) {
2690                 sg_dig_status = tr32(SG_DIG_STATUS);
2691                 mac_status = tr32(MAC_STATUS);
2692
2693                 if ((sg_dig_status & (1 << 1)) &&
2694                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2695                         u32 local_adv, remote_adv;
2696
2697                         local_adv = ADVERTISE_PAUSE_CAP;
2698                         remote_adv = 0;
2699                         if (sg_dig_status & (1 << 19))
2700                                 remote_adv |= LPA_PAUSE_CAP;
2701                         if (sg_dig_status & (1 << 20))
2702                                 remote_adv |= LPA_PAUSE_ASYM;
2703
2704                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2705                         current_link_up = 1;
2706                         tp->serdes_counter = 0;
2707                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2708                 } else if (!(sg_dig_status & (1 << 1))) {
2709                         if (tp->serdes_counter)
2710                                 tp->serdes_counter--;
2711                         else {
2712                                 if (workaround) {
2713                                         u32 val = serdes_cfg;
2714
2715                                         if (port_a)
2716                                                 val |= 0xc010000;
2717                                         else
2718                                                 val |= 0x4010000;
2719
2720                                         tw32_f(MAC_SERDES_CFG, val);
2721                                 }
2722
2723                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2724                                 udelay(40);
2725
2726                                 /* Link parallel detection - link is up */
2727                                 /* only if we have PCS_SYNC and not */
2728                                 /* receiving config code words */
2729                                 mac_status = tr32(MAC_STATUS);
2730                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2731                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2732                                         tg3_setup_flow_control(tp, 0, 0);
2733                                         current_link_up = 1;
2734                                         tp->tg3_flags2 |=
2735                                                 TG3_FLG2_PARALLEL_DETECT;
2736                                         tp->serdes_counter =
2737                                                 SERDES_PARALLEL_DET_TIMEOUT;
2738                                 } else
2739                                         goto restart_autoneg;
2740                         }
2741                 }
2742         } else {
2743                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2744                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2745         }
2746
2747 out:
2748         return current_link_up;
2749 }
2750
2751 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2752 {
2753         int current_link_up = 0;
2754
2755         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2756                 goto out;
2757
2758         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2759                 u32 flags;
2760                 int i;
2761
2762                 if (fiber_autoneg(tp, &flags)) {
2763                         u32 local_adv, remote_adv;
2764
2765                         local_adv = ADVERTISE_PAUSE_CAP;
2766                         remote_adv = 0;
2767                         if (flags & MR_LP_ADV_SYM_PAUSE)
2768                                 remote_adv |= LPA_PAUSE_CAP;
2769                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2770                                 remote_adv |= LPA_PAUSE_ASYM;
2771
2772                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2773
2774                         current_link_up = 1;
2775                 }
2776                 for (i = 0; i < 30; i++) {
2777                         udelay(20);
2778                         tw32_f(MAC_STATUS,
2779                                (MAC_STATUS_SYNC_CHANGED |
2780                                 MAC_STATUS_CFG_CHANGED));
2781                         udelay(40);
2782                         if ((tr32(MAC_STATUS) &
2783                              (MAC_STATUS_SYNC_CHANGED |
2784                               MAC_STATUS_CFG_CHANGED)) == 0)
2785                                 break;
2786                 }
2787
2788                 mac_status = tr32(MAC_STATUS);
2789                 if (current_link_up == 0 &&
2790                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2791                     !(mac_status & MAC_STATUS_RCVD_CFG))
2792                         current_link_up = 1;
2793         } else {
2794                 /* Forcing 1000FD link up. */
2795                 current_link_up = 1;
2796
2797                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2798                 udelay(40);
2799
2800                 tw32_f(MAC_MODE, tp->mac_mode);
2801                 udelay(40);
2802         }
2803
2804 out:
2805         return current_link_up;
2806 }
2807
2808 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2809 {
2810         u32 orig_pause_cfg;
2811         u16 orig_active_speed;
2812         u8 orig_active_duplex;
2813         u32 mac_status;
2814         int current_link_up;
2815         int i;
2816
2817         orig_pause_cfg =
2818                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2819                                   TG3_FLAG_TX_PAUSE));
2820         orig_active_speed = tp->link_config.active_speed;
2821         orig_active_duplex = tp->link_config.active_duplex;
2822
2823         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2824             netif_carrier_ok(tp->dev) &&
2825             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2826                 mac_status = tr32(MAC_STATUS);
2827                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2828                                MAC_STATUS_SIGNAL_DET |
2829                                MAC_STATUS_CFG_CHANGED |
2830                                MAC_STATUS_RCVD_CFG);
2831                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2832                                    MAC_STATUS_SIGNAL_DET)) {
2833                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2834                                             MAC_STATUS_CFG_CHANGED));
2835                         return 0;
2836                 }
2837         }
2838
2839         tw32_f(MAC_TX_AUTO_NEG, 0);
2840
2841         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2842         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2843         tw32_f(MAC_MODE, tp->mac_mode);
2844         udelay(40);
2845
2846         if (tp->phy_id == PHY_ID_BCM8002)
2847                 tg3_init_bcm8002(tp);
2848
2849         /* Enable link change event even when serdes polling.  */
2850         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2851         udelay(40);
2852
2853         current_link_up = 0;
2854         mac_status = tr32(MAC_STATUS);
2855
2856         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2857                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2858         else
2859                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2860
2861         tp->hw_status->status =
2862                 (SD_STATUS_UPDATED |
2863                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2864
2865         for (i = 0; i < 100; i++) {
2866                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2867                                     MAC_STATUS_CFG_CHANGED));
2868                 udelay(5);
2869                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2870                                          MAC_STATUS_CFG_CHANGED |
2871                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2872                         break;
2873         }
2874
2875         mac_status = tr32(MAC_STATUS);
2876         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2877                 current_link_up = 0;
2878                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2879                     tp->serdes_counter == 0) {
2880                         tw32_f(MAC_MODE, (tp->mac_mode |
2881                                           MAC_MODE_SEND_CONFIGS));
2882                         udelay(1);
2883                         tw32_f(MAC_MODE, tp->mac_mode);
2884                 }
2885         }
2886
2887         if (current_link_up == 1) {
2888                 tp->link_config.active_speed = SPEED_1000;
2889                 tp->link_config.active_duplex = DUPLEX_FULL;
2890                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2891                                     LED_CTRL_LNKLED_OVERRIDE |
2892                                     LED_CTRL_1000MBPS_ON));
2893         } else {
2894                 tp->link_config.active_speed = SPEED_INVALID;
2895                 tp->link_config.active_duplex = DUPLEX_INVALID;
2896                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2897                                     LED_CTRL_LNKLED_OVERRIDE |
2898                                     LED_CTRL_TRAFFIC_OVERRIDE));
2899         }
2900
2901         if (current_link_up != netif_carrier_ok(tp->dev)) {
2902                 if (current_link_up)
2903                         netif_carrier_on(tp->dev);
2904                 else
2905                         netif_carrier_off(tp->dev);
2906                 tg3_link_report(tp);
2907         } else {
2908                 u32 now_pause_cfg =
2909                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2910                                          TG3_FLAG_TX_PAUSE);
2911                 if (orig_pause_cfg != now_pause_cfg ||
2912                     orig_active_speed != tp->link_config.active_speed ||
2913                     orig_active_duplex != tp->link_config.active_duplex)
2914                         tg3_link_report(tp);
2915         }
2916
2917         return 0;
2918 }
2919
2920 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2921 {
2922         int current_link_up, err = 0;
2923         u32 bmsr, bmcr;
2924         u16 current_speed;
2925         u8 current_duplex;
2926
2927         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2928         tw32_f(MAC_MODE, tp->mac_mode);
2929         udelay(40);
2930
2931         tw32(MAC_EVENT, 0);
2932
2933         tw32_f(MAC_STATUS,
2934              (MAC_STATUS_SYNC_CHANGED |
2935               MAC_STATUS_CFG_CHANGED |
2936               MAC_STATUS_MI_COMPLETION |
2937               MAC_STATUS_LNKSTATE_CHANGED));
2938         udelay(40);
2939
2940         if (force_reset)
2941                 tg3_phy_reset(tp);
2942
2943         current_link_up = 0;
2944         current_speed = SPEED_INVALID;
2945         current_duplex = DUPLEX_INVALID;
2946
2947         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2948         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2949         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2950                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2951                         bmsr |= BMSR_LSTATUS;
2952                 else
2953                         bmsr &= ~BMSR_LSTATUS;
2954         }
2955
2956         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2957
2958         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2959             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2960                 /* do nothing, just check for link up at the end */
2961         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2962                 u32 adv, new_adv;
2963
2964                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2965                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2966                                   ADVERTISE_1000XPAUSE |
2967                                   ADVERTISE_1000XPSE_ASYM |
2968                                   ADVERTISE_SLCT);
2969
2970                 /* Always advertise symmetric PAUSE just like copper */
2971                 new_adv |= ADVERTISE_1000XPAUSE;
2972
2973                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2974                         new_adv |= ADVERTISE_1000XHALF;
2975                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2976                         new_adv |= ADVERTISE_1000XFULL;
2977
2978                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2979                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2980                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2981                         tg3_writephy(tp, MII_BMCR, bmcr);
2982
2983                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2984                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2985                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2986
2987                         return err;
2988                 }
2989         } else {
2990                 u32 new_bmcr;
2991
2992                 bmcr &= ~BMCR_SPEED1000;
2993                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2994
2995                 if (tp->link_config.duplex == DUPLEX_FULL)
2996                         new_bmcr |= BMCR_FULLDPLX;
2997
2998                 if (new_bmcr != bmcr) {
2999                         /* BMCR_SPEED1000 is a reserved bit that needs
3000                          * to be set on write.
3001                          */
3002                         new_bmcr |= BMCR_SPEED1000;
3003
3004                         /* Force a linkdown */
3005                         if (netif_carrier_ok(tp->dev)) {
3006                                 u32 adv;
3007
3008                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3009                                 adv &= ~(ADVERTISE_1000XFULL |
3010                                          ADVERTISE_1000XHALF |
3011                                          ADVERTISE_SLCT);
3012                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3013                                 tg3_writephy(tp, MII_BMCR, bmcr |
3014                                                            BMCR_ANRESTART |
3015                                                            BMCR_ANENABLE);
3016                                 udelay(10);
3017                                 netif_carrier_off(tp->dev);
3018                         }
3019                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3020                         bmcr = new_bmcr;
3021                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3022                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3023                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3024                             ASIC_REV_5714) {
3025                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3026                                         bmsr |= BMSR_LSTATUS;
3027                                 else
3028                                         bmsr &= ~BMSR_LSTATUS;
3029                         }
3030                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3031                 }
3032         }
3033
3034         if (bmsr & BMSR_LSTATUS) {
3035                 current_speed = SPEED_1000;
3036                 current_link_up = 1;
3037                 if (bmcr & BMCR_FULLDPLX)
3038                         current_duplex = DUPLEX_FULL;
3039                 else
3040                         current_duplex = DUPLEX_HALF;
3041
3042                 if (bmcr & BMCR_ANENABLE) {
3043                         u32 local_adv, remote_adv, common;
3044
3045                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3046                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3047                         common = local_adv & remote_adv;
3048                         if (common & (ADVERTISE_1000XHALF |
3049                                       ADVERTISE_1000XFULL)) {
3050                                 if (common & ADVERTISE_1000XFULL)
3051                                         current_duplex = DUPLEX_FULL;
3052                                 else
3053                                         current_duplex = DUPLEX_HALF;
3054
3055                                 tg3_setup_flow_control(tp, local_adv,
3056                                                        remote_adv);
3057                         }
3058                         else
3059                                 current_link_up = 0;
3060                 }
3061         }
3062
3063         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3064         if (tp->link_config.active_duplex == DUPLEX_HALF)
3065                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3066
3067         tw32_f(MAC_MODE, tp->mac_mode);
3068         udelay(40);
3069
3070         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3071
3072         tp->link_config.active_speed = current_speed;
3073         tp->link_config.active_duplex = current_duplex;
3074
3075         if (current_link_up != netif_carrier_ok(tp->dev)) {
3076                 if (current_link_up)
3077                         netif_carrier_on(tp->dev);
3078                 else {
3079                         netif_carrier_off(tp->dev);
3080                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3081                 }
3082                 tg3_link_report(tp);
3083         }
3084         return err;
3085 }
3086
3087 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3088 {
3089         if (tp->serdes_counter) {
3090                 /* Give autoneg time to complete. */
3091                 tp->serdes_counter--;
3092                 return;
3093         }
3094         if (!netif_carrier_ok(tp->dev) &&
3095             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3096                 u32 bmcr;
3097
3098                 tg3_readphy(tp, MII_BMCR, &bmcr);
3099                 if (bmcr & BMCR_ANENABLE) {
3100                         u32 phy1, phy2;
3101
3102                         /* Select shadow register 0x1f */
3103                         tg3_writephy(tp, 0x1c, 0x7c00);
3104                         tg3_readphy(tp, 0x1c, &phy1);
3105
3106                         /* Select expansion interrupt status register */
3107                         tg3_writephy(tp, 0x17, 0x0f01);
3108                         tg3_readphy(tp, 0x15, &phy2);
3109                         tg3_readphy(tp, 0x15, &phy2);
3110
3111                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3112                                 /* We have signal detect and not receiving
3113                                  * config code words, link is up by parallel
3114                                  * detection.
3115                                  */
3116
3117                                 bmcr &= ~BMCR_ANENABLE;
3118                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3119                                 tg3_writephy(tp, MII_BMCR, bmcr);
3120                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3121                         }
3122                 }
3123         }
3124         else if (netif_carrier_ok(tp->dev) &&
3125                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3126                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3127                 u32 phy2;
3128
3129                 /* Select expansion interrupt status register */
3130                 tg3_writephy(tp, 0x17, 0x0f01);
3131                 tg3_readphy(tp, 0x15, &phy2);
3132                 if (phy2 & 0x20) {
3133                         u32 bmcr;
3134
3135                         /* Config code words received, turn on autoneg. */
3136                         tg3_readphy(tp, MII_BMCR, &bmcr);
3137                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3138
3139                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3140
3141                 }
3142         }
3143 }
3144
3145 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3146 {
3147         int err;
3148
3149         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3150                 err = tg3_setup_fiber_phy(tp, force_reset);
3151         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3152                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3153         } else {
3154                 err = tg3_setup_copper_phy(tp, force_reset);
3155         }
3156
3157         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0) {
3158                 u32 val, scale;
3159
3160                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3161                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3162                         scale = 65;
3163                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3164                         scale = 6;
3165                 else
3166                         scale = 12;
3167
3168                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3169                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3170                 tw32(GRC_MISC_CFG, val);
3171         }
3172
3173         if (tp->link_config.active_speed == SPEED_1000 &&
3174             tp->link_config.active_duplex == DUPLEX_HALF)
3175                 tw32(MAC_TX_LENGTHS,
3176                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3177                       (6 << TX_LENGTHS_IPG_SHIFT) |
3178                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3179         else
3180                 tw32(MAC_TX_LENGTHS,
3181                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3182                       (6 << TX_LENGTHS_IPG_SHIFT) |
3183                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3184
3185         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3186                 if (netif_carrier_ok(tp->dev)) {
3187                         tw32(HOSTCC_STAT_COAL_TICKS,
3188                              tp->coal.stats_block_coalesce_usecs);
3189                 } else {
3190                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3191                 }
3192         }
3193
3194         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3195                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3196                 if (!netif_carrier_ok(tp->dev))
3197                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3198                               tp->pwrmgmt_thresh;
3199                 else
3200                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3201                 tw32(PCIE_PWR_MGMT_THRESH, val);
3202         }
3203
3204         return err;
3205 }
3206
3207 /* This is called whenever we suspect that the system chipset is re-
3208  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3209  * is bogus tx completions. We try to recover by setting the
3210  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3211  * in the workqueue.
3212  */
3213 static void tg3_tx_recover(struct tg3 *tp)
3214 {
3215         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3216                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3217
3218         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3219                "mapped I/O cycles to the network device, attempting to "
3220                "recover. Please report the problem to the driver maintainer "
3221                "and include system chipset information.\n", tp->dev->name);
3222
3223         spin_lock(&tp->lock);
3224         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3225         spin_unlock(&tp->lock);
3226 }
3227
3228 static inline u32 tg3_tx_avail(struct tg3 *tp)
3229 {
3230         smp_mb();
3231         return (tp->tx_pending -
3232                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3233 }
3234
3235 /* Tigon3 never reports partial packet sends.  So we do not
3236  * need special logic to handle SKBs that have not had all
3237  * of their frags sent yet, like SunGEM does.
3238  */
3239 static void tg3_tx(struct tg3 *tp)
3240 {
3241         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3242         u32 sw_idx = tp->tx_cons;
3243
3244         while (sw_idx != hw_idx) {
3245                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3246                 struct sk_buff *skb = ri->skb;
3247                 int i, tx_bug = 0;
3248
3249                 if (unlikely(skb == NULL)) {
3250                         tg3_tx_recover(tp);
3251                         return;
3252                 }
3253
3254                 pci_unmap_single(tp->pdev,
3255                                  pci_unmap_addr(ri, mapping),
3256                                  skb_headlen(skb),
3257                                  PCI_DMA_TODEVICE);
3258
3259                 ri->skb = NULL;
3260
3261                 sw_idx = NEXT_TX(sw_idx);
3262
3263                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3264                         ri = &tp->tx_buffers[sw_idx];
3265                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3266                                 tx_bug = 1;
3267
3268                         pci_unmap_page(tp->pdev,
3269                                        pci_unmap_addr(ri, mapping),
3270                                        skb_shinfo(skb)->frags[i].size,
3271                                        PCI_DMA_TODEVICE);
3272
3273                         sw_idx = NEXT_TX(sw_idx);
3274                 }
3275
3276                 dev_kfree_skb(skb);
3277
3278                 if (unlikely(tx_bug)) {
3279                         tg3_tx_recover(tp);
3280                         return;
3281                 }
3282         }
3283
3284         tp->tx_cons = sw_idx;
3285
3286         /* Need to make the tx_cons update visible to tg3_start_xmit()
3287          * before checking for netif_queue_stopped().  Without the
3288          * memory barrier, there is a small possibility that tg3_start_xmit()
3289          * will miss it and cause the queue to be stopped forever.
3290          */
3291         smp_mb();
3292
3293         if (unlikely(netif_queue_stopped(tp->dev) &&
3294                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3295                 netif_tx_lock(tp->dev);
3296                 if (netif_queue_stopped(tp->dev) &&
3297                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3298                         netif_wake_queue(tp->dev);
3299                 netif_tx_unlock(tp->dev);
3300         }
3301 }
3302
3303 /* Returns size of skb allocated or < 0 on error.
3304  *
3305  * We only need to fill in the address because the other members
3306  * of the RX descriptor are invariant, see tg3_init_rings.
3307  *
3308  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3309  * posting buffers we only dirty the first cache line of the RX
3310  * descriptor (containing the address).  Whereas for the RX status
3311  * buffers the cpu only reads the last cacheline of the RX descriptor
3312  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3313  */
3314 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3315                             int src_idx, u32 dest_idx_unmasked)
3316 {
3317         struct tg3_rx_buffer_desc *desc;
3318         struct ring_info *map, *src_map;
3319         struct sk_buff *skb;
3320         dma_addr_t mapping;
3321         int skb_size, dest_idx;
3322
3323         src_map = NULL;
3324         switch (opaque_key) {
3325         case RXD_OPAQUE_RING_STD:
3326                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3327                 desc = &tp->rx_std[dest_idx];
3328                 map = &tp->rx_std_buffers[dest_idx];
3329                 if (src_idx >= 0)
3330                         src_map = &tp->rx_std_buffers[src_idx];
3331                 skb_size = tp->rx_pkt_buf_sz;
3332                 break;
3333
3334         case RXD_OPAQUE_RING_JUMBO:
3335                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3336                 desc = &tp->rx_jumbo[dest_idx];
3337                 map = &tp->rx_jumbo_buffers[dest_idx];
3338                 if (src_idx >= 0)
3339                         src_map = &tp->rx_jumbo_buffers[src_idx];
3340                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3341                 break;
3342
3343         default:
3344                 return -EINVAL;
3345         };
3346
3347         /* Do not overwrite any of the map or rp information
3348          * until we are sure we can commit to a new buffer.
3349          *
3350          * Callers depend upon this behavior and assume that
3351          * we leave everything unchanged if we fail.
3352          */
3353         skb = netdev_alloc_skb(tp->dev, skb_size);
3354         if (skb == NULL)
3355                 return -ENOMEM;
3356
3357         skb_reserve(skb, tp->rx_offset);
3358
3359         mapping = pci_map_single(tp->pdev, skb->data,
3360                                  skb_size - tp->rx_offset,
3361                                  PCI_DMA_FROMDEVICE);
3362
3363         map->skb = skb;
3364         pci_unmap_addr_set(map, mapping, mapping);
3365
3366         if (src_map != NULL)
3367                 src_map->skb = NULL;
3368
3369         desc->addr_hi = ((u64)mapping >> 32);
3370         desc->addr_lo = ((u64)mapping & 0xffffffff);
3371
3372         return skb_size;
3373 }
3374
3375 /* We only need to move over in the address because the other
3376  * members of the RX descriptor are invariant.  See notes above
3377  * tg3_alloc_rx_skb for full details.
3378  */
3379 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3380                            int src_idx, u32 dest_idx_unmasked)
3381 {
3382         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3383         struct ring_info *src_map, *dest_map;
3384         int dest_idx;
3385
3386         switch (opaque_key) {
3387         case RXD_OPAQUE_RING_STD:
3388                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3389                 dest_desc = &tp->rx_std[dest_idx];
3390                 dest_map = &tp->rx_std_buffers[dest_idx];
3391                 src_desc = &tp->rx_std[src_idx];
3392                 src_map = &tp->rx_std_buffers[src_idx];
3393                 break;
3394
3395         case RXD_OPAQUE_RING_JUMBO:
3396                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3397                 dest_desc = &tp->rx_jumbo[dest_idx];
3398                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3399                 src_desc = &tp->rx_jumbo[src_idx];
3400                 src_map = &tp->rx_jumbo_buffers[src_idx];
3401                 break;
3402
3403         default:
3404                 return;
3405         };
3406
3407         dest_map->skb = src_map->skb;
3408         pci_unmap_addr_set(dest_map, mapping,
3409                            pci_unmap_addr(src_map, mapping));
3410         dest_desc->addr_hi = src_desc->addr_hi;
3411         dest_desc->addr_lo = src_desc->addr_lo;
3412
3413         src_map->skb = NULL;
3414 }
3415
3416 #if TG3_VLAN_TAG_USED
3417 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3418 {
3419         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3420 }
3421 #endif
3422
3423 /* The RX ring scheme is composed of multiple rings which post fresh
3424  * buffers to the chip, and one special ring the chip uses to report
3425  * status back to the host.
3426  *
3427  * The special ring reports the status of received packets to the
3428  * host.  The chip does not write into the original descriptor the
3429  * RX buffer was obtained from.  The chip simply takes the original
3430  * descriptor as provided by the host, updates the status and length
3431  * field, then writes this into the next status ring entry.
3432  *
3433  * Each ring the host uses to post buffers to the chip is described
3434  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3435  * it is first placed into the on-chip ram.  When the packet's length
3436  * is known, it walks down the TG3_BDINFO entries to select the ring.
3437  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3438  * which is within the range of the new packet's length is chosen.
3439  *
3440  * The "separate ring for rx status" scheme may sound queer, but it makes
3441  * sense from a cache coherency perspective.  If only the host writes
3442  * to the buffer post rings, and only the chip writes to the rx status
3443  * rings, then cache lines never move beyond shared-modified state.
3444  * If both the host and chip were to write into the same ring, cache line
3445  * eviction could occur since both entities want it in an exclusive state.
3446  */
3447 static int tg3_rx(struct tg3 *tp, int budget)
3448 {
3449         u32 work_mask, rx_std_posted = 0;
3450         u32 sw_idx = tp->rx_rcb_ptr;
3451         u16 hw_idx;
3452         int received;
3453
3454         hw_idx = tp->hw_status->idx[0].rx_producer;
3455         /*
3456          * We need to order the read of hw_idx and the read of
3457          * the opaque cookie.
3458          */
3459         rmb();
3460         work_mask = 0;
3461         received = 0;
3462         while (sw_idx != hw_idx && budget > 0) {
3463                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3464                 unsigned int len;
3465                 struct sk_buff *skb;
3466                 dma_addr_t dma_addr;
3467                 u32 opaque_key, desc_idx, *post_ptr;
3468
3469                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3470                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3471                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3472                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3473                                                   mapping);
3474                         skb = tp->rx_std_buffers[desc_idx].skb;
3475                         post_ptr = &tp->rx_std_ptr;
3476                         rx_std_posted++;
3477                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3478                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3479                                                   mapping);
3480                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3481                         post_ptr = &tp->rx_jumbo_ptr;
3482                 }
3483                 else {
3484                         goto next_pkt_nopost;
3485                 }
3486
3487                 work_mask |= opaque_key;
3488
3489                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3490                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3491                 drop_it:
3492                         tg3_recycle_rx(tp, opaque_key,
3493                                        desc_idx, *post_ptr);
3494                 drop_it_no_recycle:
3495                         /* Other statistics kept track of by card. */
3496                         tp->net_stats.rx_dropped++;
3497                         goto next_pkt;
3498                 }
3499
3500                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3501
3502                 if (len > RX_COPY_THRESHOLD
3503                         && tp->rx_offset == 2
3504                         /* rx_offset != 2 iff this is a 5701 card running
3505                          * in PCI-X mode [see tg3_get_invariants()] */
3506                 ) {
3507                         int skb_size;
3508
3509                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3510                                                     desc_idx, *post_ptr);
3511                         if (skb_size < 0)
3512                                 goto drop_it;
3513
3514                         pci_unmap_single(tp->pdev, dma_addr,
3515                                          skb_size - tp->rx_offset,
3516                                          PCI_DMA_FROMDEVICE);
3517
3518                         skb_put(skb, len);
3519                 } else {
3520                         struct sk_buff *copy_skb;
3521
3522                         tg3_recycle_rx(tp, opaque_key,
3523                                        desc_idx, *post_ptr);
3524
3525                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3526                         if (copy_skb == NULL)
3527                                 goto drop_it_no_recycle;
3528
3529                         skb_reserve(copy_skb, 2);
3530                         skb_put(copy_skb, len);
3531                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3532                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3533                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3534
3535                         /* We'll reuse the original ring buffer. */
3536                         skb = copy_skb;
3537                 }
3538
3539                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3540                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3541                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3542                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3543                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3544                 else
3545                         skb->ip_summed = CHECKSUM_NONE;
3546
3547                 skb->protocol = eth_type_trans(skb, tp->dev);
3548 #if TG3_VLAN_TAG_USED
3549                 if (tp->vlgrp != NULL &&
3550                     desc->type_flags & RXD_FLAG_VLAN) {
3551                         tg3_vlan_rx(tp, skb,
3552                                     desc->err_vlan & RXD_VLAN_MASK);
3553                 } else
3554 #endif
3555                         netif_receive_skb(skb);
3556
3557                 tp->dev->last_rx = jiffies;
3558                 received++;
3559                 budget--;
3560
3561 next_pkt:
3562                 (*post_ptr)++;
3563
3564                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3565                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3566
3567                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3568                                      TG3_64BIT_REG_LOW, idx);
3569                         work_mask &= ~RXD_OPAQUE_RING_STD;
3570                         rx_std_posted = 0;
3571                 }
3572 next_pkt_nopost:
3573                 sw_idx++;
3574                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3575
3576                 /* Refresh hw_idx to see if there is new work */
3577                 if (sw_idx == hw_idx) {
3578                         hw_idx = tp->hw_status->idx[0].rx_producer;
3579                         rmb();
3580                 }
3581         }
3582
3583         /* ACK the status ring. */
3584         tp->rx_rcb_ptr = sw_idx;
3585         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3586
3587         /* Refill RX ring(s). */
3588         if (work_mask & RXD_OPAQUE_RING_STD) {
3589                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3590                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3591                              sw_idx);
3592         }
3593         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3594                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3595                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3596                              sw_idx);
3597         }
3598         mmiowb();
3599
3600         return received;
3601 }
3602
3603 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3604 {
3605         struct tg3_hw_status *sblk = tp->hw_status;
3606
3607         /* handle link change and other phy events */
3608         if (!(tp->tg3_flags &
3609               (TG3_FLAG_USE_LINKCHG_REG |
3610                TG3_FLAG_POLL_SERDES))) {
3611                 if (sblk->status & SD_STATUS_LINK_CHG) {
3612                         sblk->status = SD_STATUS_UPDATED |
3613                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3614                         spin_lock(&tp->lock);
3615                         tg3_setup_phy(tp, 0);
3616                         spin_unlock(&tp->lock);
3617                 }
3618         }
3619
3620         /* run TX completion thread */
3621         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3622                 tg3_tx(tp);
3623                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3624                         return work_done;
3625         }
3626
3627         /* run RX thread, within the bounds set by NAPI.
3628          * All RX "locking" is done by ensuring outside
3629          * code synchronizes with tg3->napi.poll()
3630          */
3631         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3632                 work_done += tg3_rx(tp, budget - work_done);
3633
3634         return work_done;
3635 }
3636
3637 static int tg3_poll(struct napi_struct *napi, int budget)
3638 {
3639         struct tg3 *tp = container_of(napi, struct tg3, napi);
3640         int work_done = 0;
3641         struct tg3_hw_status *sblk = tp->hw_status;
3642
3643         while (1) {
3644                 work_done = tg3_poll_work(tp, work_done, budget);
3645
3646                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3647                         goto tx_recovery;
3648
3649                 if (unlikely(work_done >= budget))
3650                         break;
3651
3652                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3653                         /* tp->last_tag is used in tg3_restart_ints() below
3654                          * to tell the hw how much work has been processed,
3655                          * so we must read it before checking for more work.
3656                          */
3657                         tp->last_tag = sblk->status_tag;
3658                         rmb();
3659                 } else
3660                         sblk->status &= ~SD_STATUS_UPDATED;
3661
3662                 if (likely(!tg3_has_work(tp))) {
3663                         netif_rx_complete(tp->dev, napi);
3664                         tg3_restart_ints(tp);
3665                         break;
3666                 }
3667         }
3668
3669         return work_done;
3670
3671 tx_recovery:
3672         /* work_done is guaranteed to be less than budget. */
3673         netif_rx_complete(tp->dev, napi);
3674         schedule_work(&tp->reset_task);
3675         return work_done;
3676 }
3677
3678 static void tg3_irq_quiesce(struct tg3 *tp)
3679 {
3680         BUG_ON(tp->irq_sync);
3681
3682         tp->irq_sync = 1;
3683         smp_mb();
3684
3685         synchronize_irq(tp->pdev->irq);
3686 }
3687
3688 static inline int tg3_irq_sync(struct tg3 *tp)
3689 {
3690         return tp->irq_sync;
3691 }
3692
3693 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3694  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3695  * with as well.  Most of the time, this is not necessary except when
3696  * shutting down the device.
3697  */
3698 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3699 {
3700         spin_lock_bh(&tp->lock);
3701         if (irq_sync)
3702                 tg3_irq_quiesce(tp);
3703 }
3704
3705 static inline void tg3_full_unlock(struct tg3 *tp)
3706 {
3707         spin_unlock_bh(&tp->lock);
3708 }
3709
3710 /* One-shot MSI handler - Chip automatically disables interrupt
3711  * after sending MSI so driver doesn't have to do it.
3712  */
3713 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3714 {
3715         struct net_device *dev = dev_id;
3716         struct tg3 *tp = netdev_priv(dev);
3717
3718         prefetch(tp->hw_status);
3719         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3720
3721         if (likely(!tg3_irq_sync(tp)))
3722                 netif_rx_schedule(dev, &tp->napi);
3723
3724         return IRQ_HANDLED;
3725 }
3726
3727 /* MSI ISR - No need to check for interrupt sharing and no need to
3728  * flush status block and interrupt mailbox. PCI ordering rules
3729  * guarantee that MSI will arrive after the status block.
3730  */
3731 static irqreturn_t tg3_msi(int irq, void *dev_id)
3732 {
3733         struct net_device *dev = dev_id;
3734         struct tg3 *tp = netdev_priv(dev);
3735
3736         prefetch(tp->hw_status);
3737         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3738         /*
3739          * Writing any value to intr-mbox-0 clears PCI INTA# and
3740          * chip-internal interrupt pending events.
3741          * Writing non-zero to intr-mbox-0 additional tells the
3742          * NIC to stop sending us irqs, engaging "in-intr-handler"
3743          * event coalescing.
3744          */
3745         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3746         if (likely(!tg3_irq_sync(tp)))
3747                 netif_rx_schedule(dev, &tp->napi);
3748
3749         return IRQ_RETVAL(1);
3750 }
3751
3752 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3753 {
3754         struct net_device *dev = dev_id;
3755         struct tg3 *tp = netdev_priv(dev);
3756         struct tg3_hw_status *sblk = tp->hw_status;
3757         unsigned int handled = 1;
3758
3759         /* In INTx mode, it is possible for the interrupt to arrive at
3760          * the CPU before the status block posted prior to the interrupt.
3761          * Reading the PCI State register will confirm whether the
3762          * interrupt is ours and will flush the status block.
3763          */
3764         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3765                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3766                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3767                         handled = 0;
3768                         goto out;
3769                 }
3770         }
3771
3772         /*
3773          * Writing any value to intr-mbox-0 clears PCI INTA# and
3774          * chip-internal interrupt pending events.
3775          * Writing non-zero to intr-mbox-0 additional tells the
3776          * NIC to stop sending us irqs, engaging "in-intr-handler"
3777          * event coalescing.
3778          *
3779          * Flush the mailbox to de-assert the IRQ immediately to prevent
3780          * spurious interrupts.  The flush impacts performance but
3781          * excessive spurious interrupts can be worse in some cases.
3782          */
3783         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3784         if (tg3_irq_sync(tp))
3785                 goto out;
3786         sblk->status &= ~SD_STATUS_UPDATED;
3787         if (likely(tg3_has_work(tp))) {
3788                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3789                 netif_rx_schedule(dev, &tp->napi);
3790         } else {
3791                 /* No work, shared interrupt perhaps?  re-enable
3792                  * interrupts, and flush that PCI write
3793                  */
3794                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3795                                0x00000000);
3796         }
3797 out:
3798         return IRQ_RETVAL(handled);
3799 }
3800
3801 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3802 {
3803         struct net_device *dev = dev_id;
3804         struct tg3 *tp = netdev_priv(dev);
3805         struct tg3_hw_status *sblk = tp->hw_status;
3806         unsigned int handled = 1;
3807
3808         /* In INTx mode, it is possible for the interrupt to arrive at
3809          * the CPU before the status block posted prior to the interrupt.
3810          * Reading the PCI State register will confirm whether the
3811          * interrupt is ours and will flush the status block.
3812          */
3813         if (unlikely(sblk->status_tag == tp->last_tag)) {
3814                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3815                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3816                         handled = 0;
3817                         goto out;
3818                 }
3819         }
3820
3821         /*
3822          * writing any value to intr-mbox-0 clears PCI INTA# and
3823          * chip-internal interrupt pending events.
3824          * writing non-zero to intr-mbox-0 additional tells the
3825          * NIC to stop sending us irqs, engaging "in-intr-handler"
3826          * event coalescing.
3827          *
3828          * Flush the mailbox to de-assert the IRQ immediately to prevent
3829          * spurious interrupts.  The flush impacts performance but
3830          * excessive spurious interrupts can be worse in some cases.
3831          */
3832         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3833         if (tg3_irq_sync(tp))
3834                 goto out;
3835         if (netif_rx_schedule_prep(dev, &tp->napi)) {
3836                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3837                 /* Update last_tag to mark that this status has been
3838                  * seen. Because interrupt may be shared, we may be
3839                  * racing with tg3_poll(), so only update last_tag
3840                  * if tg3_poll() is not scheduled.
3841                  */
3842                 tp->last_tag = sblk->status_tag;
3843                 __netif_rx_schedule(dev, &tp->napi);
3844         }
3845 out:
3846         return IRQ_RETVAL(handled);
3847 }
3848
3849 /* ISR for interrupt test */
3850 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3851 {
3852         struct net_device *dev = dev_id;
3853         struct tg3 *tp = netdev_priv(dev);
3854         struct tg3_hw_status *sblk = tp->hw_status;
3855
3856         if ((sblk->status & SD_STATUS_UPDATED) ||
3857             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3858                 tg3_disable_ints(tp);
3859                 return IRQ_RETVAL(1);
3860         }
3861         return IRQ_RETVAL(0);
3862 }
3863
3864 static int tg3_init_hw(struct tg3 *, int);
3865 static int tg3_halt(struct tg3 *, int, int);
3866
3867 /* Restart hardware after configuration changes, self-test, etc.
3868  * Invoked with tp->lock held.
3869  */
3870 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3871 {
3872         int err;
3873
3874         err = tg3_init_hw(tp, reset_phy);
3875         if (err) {
3876                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3877                        "aborting.\n", tp->dev->name);
3878                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3879                 tg3_full_unlock(tp);
3880                 del_timer_sync(&tp->timer);
3881                 tp->irq_sync = 0;
3882                 napi_enable(&tp->napi);
3883                 dev_close(tp->dev);
3884                 tg3_full_lock(tp, 0);
3885         }
3886         return err;
3887 }
3888
3889 #ifdef CONFIG_NET_POLL_CONTROLLER
3890 static void tg3_poll_controller(struct net_device *dev)
3891 {
3892         struct tg3 *tp = netdev_priv(dev);
3893
3894         tg3_interrupt(tp->pdev->irq, dev);
3895 }
3896 #endif
3897
3898 static void tg3_reset_task(struct work_struct *work)
3899 {
3900         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3901         unsigned int restart_timer;
3902
3903         tg3_full_lock(tp, 0);
3904
3905         if (!netif_running(tp->dev)) {
3906                 tg3_full_unlock(tp);
3907                 return;
3908         }
3909
3910         tg3_full_unlock(tp);
3911
3912         tg3_netif_stop(tp);
3913
3914         tg3_full_lock(tp, 1);
3915
3916         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3917         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3918
3919         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3920                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3921                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3922                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3923                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3924         }
3925
3926         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3927         if (tg3_init_hw(tp, 1))
3928                 goto out;
3929
3930         tg3_netif_start(tp);
3931
3932         if (restart_timer)
3933                 mod_timer(&tp->timer, jiffies + 1);
3934
3935 out:
3936         tg3_full_unlock(tp);
3937 }
3938
3939 static void tg3_dump_short_state(struct tg3 *tp)
3940 {
3941         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3942                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3943         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3944                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3945 }
3946
3947 static void tg3_tx_timeout(struct net_device *dev)
3948 {
3949         struct tg3 *tp = netdev_priv(dev);
3950
3951         if (netif_msg_tx_err(tp)) {
3952                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3953                        dev->name);
3954                 tg3_dump_short_state(tp);
3955         }
3956
3957         schedule_work(&tp->reset_task);
3958 }
3959
3960 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3961 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3962 {
3963         u32 base = (u32) mapping & 0xffffffff;
3964
3965         return ((base > 0xffffdcc0) &&
3966                 (base + len + 8 < base));
3967 }
3968
3969 /* Test for DMA addresses > 40-bit */
3970 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3971                                           int len)
3972 {
3973 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3974         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3975                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3976         return 0;
3977 #else
3978         return 0;
3979 #endif
3980 }
3981
3982 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3983
3984 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3985 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3986                                        u32 last_plus_one, u32 *start,
3987                                        u32 base_flags, u32 mss)
3988 {
3989         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3990         dma_addr_t new_addr = 0;
3991         u32 entry = *start;
3992         int i, ret = 0;
3993
3994         if (!new_skb) {
3995                 ret = -1;
3996         } else {
3997                 /* New SKB is guaranteed to be linear. */
3998                 entry = *start;
3999                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4000                                           PCI_DMA_TODEVICE);
4001                 /* Make sure new skb does not cross any 4G boundaries.
4002                  * Drop the packet if it does.
4003                  */
4004                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4005                         ret = -1;
4006                         dev_kfree_skb(new_skb);
4007                         new_skb = NULL;
4008                 } else {
4009                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4010                                     base_flags, 1 | (mss << 1));
4011                         *start = NEXT_TX(entry);
4012                 }
4013         }
4014
4015         /* Now clean up the sw ring entries. */
4016         i = 0;
4017         while (entry != last_plus_one) {
4018                 int len;
4019
4020                 if (i == 0)
4021                         len = skb_headlen(skb);
4022                 else
4023                         len = skb_shinfo(skb)->frags[i-1].size;
4024                 pci_unmap_single(tp->pdev,
4025                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4026                                  len, PCI_DMA_TODEVICE);
4027                 if (i == 0) {
4028                         tp->tx_buffers[entry].skb = new_skb;
4029                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4030                 } else {
4031                         tp->tx_buffers[entry].skb = NULL;
4032                 }
4033                 entry = NEXT_TX(entry);
4034                 i++;
4035         }
4036
4037         dev_kfree_skb(skb);
4038
4039         return ret;
4040 }
4041
4042 static void tg3_set_txd(struct tg3 *tp, int entry,
4043                         dma_addr_t mapping, int len, u32 flags,
4044                         u32 mss_and_is_end)
4045 {
4046         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4047         int is_end = (mss_and_is_end & 0x1);
4048         u32 mss = (mss_and_is_end >> 1);
4049         u32 vlan_tag = 0;
4050
4051         if (is_end)
4052                 flags |= TXD_FLAG_END;
4053         if (flags & TXD_FLAG_VLAN) {
4054                 vlan_tag = flags >> 16;
4055                 flags &= 0xffff;
4056         }
4057         vlan_tag |= (mss << TXD_MSS_SHIFT);
4058
4059         txd->addr_hi = ((u64) mapping >> 32);
4060         txd->addr_lo = ((u64) mapping & 0xffffffff);
4061         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4062         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4063 }
4064
4065 /* hard_start_xmit for devices that don't have any bugs and
4066  * support TG3_FLG2_HW_TSO_2 only.
4067  */
4068 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4069 {
4070         struct tg3 *tp = netdev_priv(dev);
4071         dma_addr_t mapping;
4072         u32 len, entry, base_flags, mss;
4073
4074         len = skb_headlen(skb);
4075
4076         /* We are running in BH disabled context with netif_tx_lock
4077          * and TX reclaim runs via tp->napi.poll inside of a software
4078          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4079          * no IRQ context deadlocks to worry about either.  Rejoice!
4080          */
4081         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4082                 if (!netif_queue_stopped(dev)) {
4083                         netif_stop_queue(dev);
4084
4085                         /* This is a hard error, log it. */
4086                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4087                                "queue awake!\n", dev->name);
4088                 }
4089                 return NETDEV_TX_BUSY;
4090         }
4091
4092         entry = tp->tx_prod;
4093         base_flags = 0;
4094         mss = 0;
4095         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4096                 int tcp_opt_len, ip_tcp_len;
4097
4098                 if (skb_header_cloned(skb) &&
4099                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4100                         dev_kfree_skb(skb);
4101                         goto out_unlock;
4102                 }
4103
4104                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4105                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4106                 else {
4107                         struct iphdr *iph = ip_hdr(skb);
4108
4109                         tcp_opt_len = tcp_optlen(skb);
4110                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4111
4112                         iph->check = 0;
4113                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4114                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4115                 }
4116
4117                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4118                                TXD_FLAG_CPU_POST_DMA);
4119
4120                 tcp_hdr(skb)->check = 0;
4121
4122         }
4123         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4124                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4125 #if TG3_VLAN_TAG_USED
4126         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4127                 base_flags |= (TXD_FLAG_VLAN |
4128                                (vlan_tx_tag_get(skb) << 16));
4129 #endif
4130
4131         /* Queue skb data, a.k.a. the main skb fragment. */
4132         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4133
4134         tp->tx_buffers[entry].skb = skb;
4135         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4136
4137         tg3_set_txd(tp, entry, mapping, len, base_flags,
4138                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4139
4140         entry = NEXT_TX(entry);
4141
4142         /* Now loop through additional data fragments, and queue them. */
4143         if (skb_shinfo(skb)->nr_frags > 0) {
4144                 unsigned int i, last;
4145
4146                 last = skb_shinfo(skb)->nr_frags - 1;
4147                 for (i = 0; i <= last; i++) {
4148                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4149
4150                         len = frag->size;
4151                         mapping = pci_map_page(tp->pdev,
4152                                                frag->page,
4153                                                frag->page_offset,
4154                                                len, PCI_DMA_TODEVICE);
4155
4156                         tp->tx_buffers[entry].skb = NULL;
4157                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4158
4159                         tg3_set_txd(tp, entry, mapping, len,
4160                                     base_flags, (i == last) | (mss << 1));
4161
4162                         entry = NEXT_TX(entry);
4163                 }
4164         }
4165
4166         /* Packets are ready, update Tx producer idx local and on card. */
4167         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4168
4169         tp->tx_prod = entry;
4170         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4171                 netif_stop_queue(dev);
4172                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4173                         netif_wake_queue(tp->dev);
4174         }
4175
4176 out_unlock:
4177         mmiowb();
4178
4179         dev->trans_start = jiffies;
4180
4181         return NETDEV_TX_OK;
4182 }
4183
4184 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4185
4186 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4187  * TSO header is greater than 80 bytes.
4188  */
4189 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4190 {
4191         struct sk_buff *segs, *nskb;
4192
4193         /* Estimate the number of fragments in the worst case */
4194         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4195                 netif_stop_queue(tp->dev);
4196                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4197                         return NETDEV_TX_BUSY;
4198
4199                 netif_wake_queue(tp->dev);
4200         }
4201
4202         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4203         if (unlikely(IS_ERR(segs)))
4204                 goto tg3_tso_bug_end;
4205
4206         do {
4207                 nskb = segs;
4208                 segs = segs->next;
4209                 nskb->next = NULL;
4210                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4211         } while (segs);
4212
4213 tg3_tso_bug_end:
4214         dev_kfree_skb(skb);
4215
4216         return NETDEV_TX_OK;
4217 }
4218
4219 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4220  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4221  */
4222 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4223 {
4224         struct tg3 *tp = netdev_priv(dev);
4225         dma_addr_t mapping;
4226         u32 len, entry, base_flags, mss;
4227         int would_hit_hwbug;
4228
4229         len = skb_headlen(skb);
4230
4231         /* We are running in BH disabled context with netif_tx_lock
4232          * and TX reclaim runs via tp->napi.poll inside of a software
4233          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4234          * no IRQ context deadlocks to worry about either.  Rejoice!
4235          */
4236         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4237                 if (!netif_queue_stopped(dev)) {
4238                         netif_stop_queue(dev);
4239
4240                         /* This is a hard error, log it. */
4241                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4242                                "queue awake!\n", dev->name);
4243                 }
4244                 return NETDEV_TX_BUSY;
4245         }
4246
4247         entry = tp->tx_prod;
4248         base_flags = 0;
4249         if (skb->ip_summed == CHECKSUM_PARTIAL)
4250                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4251         mss = 0;
4252         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4253                 struct iphdr *iph;
4254                 int tcp_opt_len, ip_tcp_len, hdr_len;
4255
4256                 if (skb_header_cloned(skb) &&
4257                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4258                         dev_kfree_skb(skb);
4259                         goto out_unlock;
4260                 }
4261
4262                 tcp_opt_len = tcp_optlen(skb);
4263                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4264
4265                 hdr_len = ip_tcp_len + tcp_opt_len;
4266                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4267                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4268                         return (tg3_tso_bug(tp, skb));
4269
4270                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4271                                TXD_FLAG_CPU_POST_DMA);
4272
4273                 iph = ip_hdr(skb);
4274                 iph->check = 0;
4275                 iph->tot_len = htons(mss + hdr_len);
4276                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4277                         tcp_hdr(skb)->check = 0;
4278                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4279                 } else
4280                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4281                                                                  iph->daddr, 0,
4282                                                                  IPPROTO_TCP,
4283                                                                  0);
4284
4285                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4286                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4287                         if (tcp_opt_len || iph->ihl > 5) {
4288                                 int tsflags;
4289
4290                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4291                                 mss |= (tsflags << 11);
4292                         }
4293                 } else {
4294                         if (tcp_opt_len || iph->ihl > 5) {
4295                                 int tsflags;
4296
4297                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4298                                 base_flags |= tsflags << 12;
4299                         }
4300                 }
4301         }
4302 #if TG3_VLAN_TAG_USED
4303         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4304                 base_flags |= (TXD_FLAG_VLAN |
4305                                (vlan_tx_tag_get(skb) << 16));
4306 #endif
4307
4308         /* Queue skb data, a.k.a. the main skb fragment. */
4309         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4310
4311         tp->tx_buffers[entry].skb = skb;
4312         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4313
4314         would_hit_hwbug = 0;
4315
4316         if (tg3_4g_overflow_test(mapping, len))
4317                 would_hit_hwbug = 1;
4318
4319         tg3_set_txd(tp, entry, mapping, len, base_flags,
4320                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4321
4322         entry = NEXT_TX(entry);
4323
4324         /* Now loop through additional data fragments, and queue them. */
4325         if (skb_shinfo(skb)->nr_frags > 0) {
4326                 unsigned int i, last;
4327
4328                 last = skb_shinfo(skb)->nr_frags - 1;
4329                 for (i = 0; i <= last; i++) {
4330                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4331
4332                         len = frag->size;
4333                         mapping = pci_map_page(tp->pdev,
4334                                                frag->page,
4335                                                frag->page_offset,
4336                                                len, PCI_DMA_TODEVICE);
4337
4338                         tp->tx_buffers[entry].skb = NULL;
4339                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4340
4341                         if (tg3_4g_overflow_test(mapping, len))
4342                                 would_hit_hwbug = 1;
4343
4344                         if (tg3_40bit_overflow_test(tp, mapping, len))
4345                                 would_hit_hwbug = 1;
4346
4347                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4348                                 tg3_set_txd(tp, entry, mapping, len,
4349                                             base_flags, (i == last)|(mss << 1));
4350                         else
4351                                 tg3_set_txd(tp, entry, mapping, len,
4352                                             base_flags, (i == last));
4353
4354                         entry = NEXT_TX(entry);
4355                 }
4356         }
4357
4358         if (would_hit_hwbug) {
4359                 u32 last_plus_one = entry;
4360                 u32 start;
4361
4362                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4363                 start &= (TG3_TX_RING_SIZE - 1);
4364
4365                 /* If the workaround fails due to memory/mapping
4366                  * failure, silently drop this packet.
4367                  */
4368                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4369                                                 &start, base_flags, mss))
4370                         goto out_unlock;
4371
4372                 entry = start;
4373         }
4374
4375         /* Packets are ready, update Tx producer idx local and on card. */
4376         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4377
4378         tp->tx_prod = entry;
4379         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4380                 netif_stop_queue(dev);
4381                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4382                         netif_wake_queue(tp->dev);
4383         }
4384
4385 out_unlock:
4386         mmiowb();
4387
4388         dev->trans_start = jiffies;
4389
4390         return NETDEV_TX_OK;
4391 }
4392
4393 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4394                                int new_mtu)
4395 {
4396         dev->mtu = new_mtu;
4397
4398         if (new_mtu > ETH_DATA_LEN) {
4399                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4400                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4401                         ethtool_op_set_tso(dev, 0);
4402                 }
4403                 else
4404                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4405         } else {
4406                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4407                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4408                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4409         }
4410 }
4411
4412 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4413 {
4414         struct tg3 *tp = netdev_priv(dev);
4415         int err;
4416
4417         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4418                 return -EINVAL;
4419
4420         if (!netif_running(dev)) {
4421                 /* We'll just catch it later when the
4422                  * device is up'd.
4423                  */
4424                 tg3_set_mtu(dev, tp, new_mtu);
4425                 return 0;
4426         }
4427
4428         tg3_netif_stop(tp);
4429
4430         tg3_full_lock(tp, 1);
4431
4432         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4433
4434         tg3_set_mtu(dev, tp, new_mtu);
4435
4436         err = tg3_restart_hw(tp, 0);
4437
4438         if (!err)
4439                 tg3_netif_start(tp);
4440
4441         tg3_full_unlock(tp);
4442
4443         return err;
4444 }
4445
4446 /* Free up pending packets in all rx/tx rings.
4447  *
4448  * The chip has been shut down and the driver detached from
4449  * the networking, so no interrupts or new tx packets will
4450  * end up in the driver.  tp->{tx,}lock is not held and we are not
4451  * in an interrupt context and thus may sleep.
4452  */
4453 static void tg3_free_rings(struct tg3 *tp)
4454 {
4455         struct ring_info *rxp;
4456         int i;
4457
4458         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4459                 rxp = &tp->rx_std_buffers[i];
4460
4461                 if (rxp->skb == NULL)
4462                         continue;
4463                 pci_unmap_single(tp->pdev,
4464                                  pci_unmap_addr(rxp, mapping),
4465                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4466                                  PCI_DMA_FROMDEVICE);
4467                 dev_kfree_skb_any(rxp->skb);
4468                 rxp->skb = NULL;
4469         }
4470
4471         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4472                 rxp = &tp->rx_jumbo_buffers[i];
4473
4474                 if (rxp->skb == NULL)
4475                         continue;
4476                 pci_unmap_single(tp->pdev,
4477                                  pci_unmap_addr(rxp, mapping),
4478                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4479                                  PCI_DMA_FROMDEVICE);
4480                 dev_kfree_skb_any(rxp->skb);
4481                 rxp->skb = NULL;
4482         }
4483
4484         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4485                 struct tx_ring_info *txp;
4486                 struct sk_buff *skb;
4487                 int j;
4488
4489                 txp = &tp->tx_buffers[i];
4490                 skb = txp->skb;
4491
4492                 if (skb == NULL) {
4493                         i++;
4494                         continue;
4495                 }
4496
4497                 pci_unmap_single(tp->pdev,
4498                                  pci_unmap_addr(txp, mapping),
4499                                  skb_headlen(skb),
4500                                  PCI_DMA_TODEVICE);
4501                 txp->skb = NULL;
4502
4503                 i++;
4504
4505                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4506                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4507                         pci_unmap_page(tp->pdev,
4508                                        pci_unmap_addr(txp, mapping),
4509                                        skb_shinfo(skb)->frags[j].size,
4510                                        PCI_DMA_TODEVICE);
4511                         i++;
4512                 }
4513
4514                 dev_kfree_skb_any(skb);
4515         }
4516 }
4517
4518 /* Initialize tx/rx rings for packet processing.
4519  *
4520  * The chip has been shut down and the driver detached from
4521  * the networking, so no interrupts or new tx packets will
4522  * end up in the driver.  tp->{tx,}lock are held and thus
4523  * we may not sleep.
4524  */
4525 static int tg3_init_rings(struct tg3 *tp)
4526 {
4527         u32 i;
4528
4529         /* Free up all the SKBs. */
4530         tg3_free_rings(tp);
4531
4532         /* Zero out all descriptors. */
4533         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4534         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4535         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4536         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4537
4538         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4539         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4540             (tp->dev->mtu > ETH_DATA_LEN))
4541                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4542
4543         /* Initialize invariants of the rings, we only set this
4544          * stuff once.  This works because the card does not
4545          * write into the rx buffer posting rings.
4546          */
4547         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4548                 struct tg3_rx_buffer_desc *rxd;
4549
4550                 rxd = &tp->rx_std[i];
4551                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4552                         << RXD_LEN_SHIFT;
4553                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4554                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4555                                (i << RXD_OPAQUE_INDEX_SHIFT));
4556         }
4557
4558         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4559                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4560                         struct tg3_rx_buffer_desc *rxd;
4561
4562                         rxd = &tp->rx_jumbo[i];
4563                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4564                                 << RXD_LEN_SHIFT;
4565                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4566                                 RXD_FLAG_JUMBO;
4567                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4568                                (i << RXD_OPAQUE_INDEX_SHIFT));
4569                 }
4570         }
4571
4572         /* Now allocate fresh SKBs for each rx ring. */
4573         for (i = 0; i < tp->rx_pending; i++) {
4574                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4575                         printk(KERN_WARNING PFX
4576                                "%s: Using a smaller RX standard ring, "
4577                                "only %d out of %d buffers were allocated "
4578                                "successfully.\n",
4579                                tp->dev->name, i, tp->rx_pending);
4580                         if (i == 0)
4581                                 return -ENOMEM;
4582                         tp->rx_pending = i;
4583                         break;
4584                 }
4585         }
4586
4587         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4588                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4589                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4590                                              -1, i) < 0) {
4591                                 printk(KERN_WARNING PFX
4592                                        "%s: Using a smaller RX jumbo ring, "
4593                                        "only %d out of %d buffers were "
4594                                        "allocated successfully.\n",
4595                                        tp->dev->name, i, tp->rx_jumbo_pending);
4596                                 if (i == 0) {
4597                                         tg3_free_rings(tp);
4598                                         return -ENOMEM;
4599                                 }
4600                                 tp->rx_jumbo_pending = i;
4601                                 break;
4602                         }
4603                 }
4604         }
4605         return 0;
4606 }
4607
4608 /*
4609  * Must not be invoked with interrupt sources disabled and
4610  * the hardware shutdown down.
4611  */
4612 static void tg3_free_consistent(struct tg3 *tp)
4613 {
4614         kfree(tp->rx_std_buffers);
4615         tp->rx_std_buffers = NULL;
4616         if (tp->rx_std) {
4617                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4618                                     tp->rx_std, tp->rx_std_mapping);
4619                 tp->rx_std = NULL;
4620         }
4621         if (tp->rx_jumbo) {
4622                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4623                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4624                 tp->rx_jumbo = NULL;
4625         }
4626         if (tp->rx_rcb) {
4627                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4628                                     tp->rx_rcb, tp->rx_rcb_mapping);
4629                 tp->rx_rcb = NULL;
4630         }
4631         if (tp->tx_ring) {
4632                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4633                         tp->tx_ring, tp->tx_desc_mapping);
4634                 tp->tx_ring = NULL;
4635         }
4636         if (tp->hw_status) {
4637                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4638                                     tp->hw_status, tp->status_mapping);
4639                 tp->hw_status = NULL;
4640         }
4641         if (tp->hw_stats) {
4642                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4643                                     tp->hw_stats, tp->stats_mapping);
4644                 tp->hw_stats = NULL;
4645         }
4646 }
4647
4648 /*
4649  * Must not be invoked with interrupt sources disabled and
4650  * the hardware shutdown down.  Can sleep.
4651  */
4652 static int tg3_alloc_consistent(struct tg3 *tp)
4653 {
4654         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4655                                       (TG3_RX_RING_SIZE +
4656                                        TG3_RX_JUMBO_RING_SIZE)) +
4657                                      (sizeof(struct tx_ring_info) *
4658                                       TG3_TX_RING_SIZE),
4659                                      GFP_KERNEL);
4660         if (!tp->rx_std_buffers)
4661                 return -ENOMEM;
4662
4663         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4664         tp->tx_buffers = (struct tx_ring_info *)
4665                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4666
4667         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4668                                           &tp->rx_std_mapping);
4669         if (!tp->rx_std)
4670                 goto err_out;
4671
4672         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4673                                             &tp->rx_jumbo_mapping);
4674
4675         if (!tp->rx_jumbo)
4676                 goto err_out;
4677
4678         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4679                                           &tp->rx_rcb_mapping);
4680         if (!tp->rx_rcb)
4681                 goto err_out;
4682
4683         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4684                                            &tp->tx_desc_mapping);
4685         if (!tp->tx_ring)
4686                 goto err_out;
4687
4688         tp->hw_status = pci_alloc_consistent(tp->pdev,
4689                                              TG3_HW_STATUS_SIZE,
4690                                              &tp->status_mapping);
4691         if (!tp->hw_status)
4692                 goto err_out;
4693
4694         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4695                                             sizeof(struct tg3_hw_stats),
4696                                             &tp->stats_mapping);
4697         if (!tp->hw_stats)
4698                 goto err_out;
4699
4700         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4701         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4702
4703         return 0;
4704
4705 err_out:
4706         tg3_free_consistent(tp);
4707         return -ENOMEM;
4708 }
4709
4710 #define MAX_WAIT_CNT 1000
4711
4712 /* To stop a block, clear the enable bit and poll till it
4713  * clears.  tp->lock is held.
4714  */
4715 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4716 {
4717         unsigned int i;
4718         u32 val;
4719
4720         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4721                 switch (ofs) {
4722                 case RCVLSC_MODE:
4723                 case DMAC_MODE:
4724                 case MBFREE_MODE:
4725                 case BUFMGR_MODE:
4726                 case MEMARB_MODE:
4727                         /* We can't enable/disable these bits of the
4728                          * 5705/5750, just say success.
4729                          */
4730                         return 0;
4731
4732                 default:
4733                         break;
4734                 };
4735         }
4736
4737         val = tr32(ofs);
4738         val &= ~enable_bit;
4739         tw32_f(ofs, val);
4740
4741         for (i = 0; i < MAX_WAIT_CNT; i++) {
4742                 udelay(100);
4743                 val = tr32(ofs);
4744                 if ((val & enable_bit) == 0)
4745                         break;
4746         }
4747
4748         if (i == MAX_WAIT_CNT && !silent) {
4749                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4750                        "ofs=%lx enable_bit=%x\n",
4751                        ofs, enable_bit);
4752                 return -ENODEV;
4753         }
4754
4755         return 0;
4756 }
4757
4758 /* tp->lock is held. */
4759 static int tg3_abort_hw(struct tg3 *tp, int silent)
4760 {
4761         int i, err;
4762
4763         tg3_disable_ints(tp);
4764
4765         tp->rx_mode &= ~RX_MODE_ENABLE;
4766         tw32_f(MAC_RX_MODE, tp->rx_mode);
4767         udelay(10);
4768
4769         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4770         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4771         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4772         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4773         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4774         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4775
4776         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4777         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4778         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4779         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4780         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4781         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4782         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4783
4784         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4785         tw32_f(MAC_MODE, tp->mac_mode);
4786         udelay(40);
4787
4788         tp->tx_mode &= ~TX_MODE_ENABLE;
4789         tw32_f(MAC_TX_MODE, tp->tx_mode);
4790
4791         for (i = 0; i < MAX_WAIT_CNT; i++) {
4792                 udelay(100);
4793                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4794                         break;
4795         }
4796         if (i >= MAX_WAIT_CNT) {
4797                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4798                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4799                        tp->dev->name, tr32(MAC_TX_MODE));
4800                 err |= -ENODEV;
4801         }
4802
4803         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4804         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4805         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4806
4807         tw32(FTQ_RESET, 0xffffffff);
4808         tw32(FTQ_RESET, 0x00000000);
4809
4810         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4811         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4812
4813         if (tp->hw_status)
4814                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4815         if (tp->hw_stats)
4816                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4817
4818         return err;
4819 }
4820
4821 /* tp->lock is held. */
4822 static int tg3_nvram_lock(struct tg3 *tp)
4823 {
4824         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4825                 int i;
4826
4827                 if (tp->nvram_lock_cnt == 0) {
4828                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4829                         for (i = 0; i < 8000; i++) {
4830                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4831                                         break;
4832                                 udelay(20);
4833                         }
4834                         if (i == 8000) {
4835                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4836                                 return -ENODEV;
4837                         }
4838                 }
4839                 tp->nvram_lock_cnt++;
4840         }
4841         return 0;
4842 }
4843
4844 /* tp->lock is held. */
4845 static void tg3_nvram_unlock(struct tg3 *tp)
4846 {
4847         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4848                 if (tp->nvram_lock_cnt > 0)
4849                         tp->nvram_lock_cnt--;
4850                 if (tp->nvram_lock_cnt == 0)
4851                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4852         }
4853 }
4854
4855 /* tp->lock is held. */
4856 static void tg3_enable_nvram_access(struct tg3 *tp)
4857 {
4858         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4859             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4860                 u32 nvaccess = tr32(NVRAM_ACCESS);
4861
4862                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4863         }
4864 }
4865
4866 /* tp->lock is held. */
4867 static void tg3_disable_nvram_access(struct tg3 *tp)
4868 {
4869         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4870             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4871                 u32 nvaccess = tr32(NVRAM_ACCESS);
4872
4873                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4874         }
4875 }
4876
4877 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
4878 {
4879         int i;
4880         u32 apedata;
4881
4882         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
4883         if (apedata != APE_SEG_SIG_MAGIC)
4884                 return;
4885
4886         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
4887         if (apedata != APE_FW_STATUS_READY)
4888                 return;
4889
4890         /* Wait for up to 1 millisecond for APE to service previous event. */
4891         for (i = 0; i < 10; i++) {
4892                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
4893                         return;
4894
4895                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
4896
4897                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4898                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
4899                                         event | APE_EVENT_STATUS_EVENT_PENDING);
4900
4901                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
4902
4903                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4904                         break;
4905
4906                 udelay(100);
4907         }
4908
4909         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4910                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
4911 }
4912
4913 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
4914 {
4915         u32 event;
4916         u32 apedata;
4917
4918         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
4919                 return;
4920
4921         switch (kind) {
4922                 case RESET_KIND_INIT:
4923                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
4924                                         APE_HOST_SEG_SIG_MAGIC);
4925                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
4926                                         APE_HOST_SEG_LEN_MAGIC);
4927                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
4928                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
4929                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
4930                                         APE_HOST_DRIVER_ID_MAGIC);
4931                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
4932                                         APE_HOST_BEHAV_NO_PHYLOCK);
4933
4934                         event = APE_EVENT_STATUS_STATE_START;
4935                         break;
4936                 case RESET_KIND_SHUTDOWN:
4937                         event = APE_EVENT_STATUS_STATE_UNLOAD;
4938                         break;
4939                 case RESET_KIND_SUSPEND:
4940                         event = APE_EVENT_STATUS_STATE_SUSPEND;
4941                         break;
4942                 default:
4943                         return;
4944         }
4945
4946         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
4947
4948         tg3_ape_send_event(tp, event);
4949 }
4950
4951 /* tp->lock is held. */
4952 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4953 {
4954         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4955                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4956
4957         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4958                 switch (kind) {
4959                 case RESET_KIND_INIT:
4960                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4961                                       DRV_STATE_START);
4962                         break;
4963
4964                 case RESET_KIND_SHUTDOWN:
4965                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4966                                       DRV_STATE_UNLOAD);
4967                         break;
4968
4969                 case RESET_KIND_SUSPEND:
4970                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4971                                       DRV_STATE_SUSPEND);
4972                         break;
4973
4974                 default:
4975                         break;
4976                 };
4977         }
4978
4979         if (kind == RESET_KIND_INIT ||
4980             kind == RESET_KIND_SUSPEND)
4981                 tg3_ape_driver_state_change(tp, kind);
4982 }
4983
4984 /* tp->lock is held. */
4985 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4986 {
4987         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4988                 switch (kind) {
4989                 case RESET_KIND_INIT:
4990                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4991                                       DRV_STATE_START_DONE);
4992                         break;
4993
4994                 case RESET_KIND_SHUTDOWN:
4995                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4996                                       DRV_STATE_UNLOAD_DONE);
4997                         break;
4998
4999                 default:
5000                         break;
5001                 };
5002         }
5003
5004         if (kind == RESET_KIND_SHUTDOWN)
5005                 tg3_ape_driver_state_change(tp, kind);
5006 }
5007
5008 /* tp->lock is held. */
5009 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5010 {
5011         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5012                 switch (kind) {
5013                 case RESET_KIND_INIT:
5014                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5015                                       DRV_STATE_START);
5016                         break;
5017
5018                 case RESET_KIND_SHUTDOWN:
5019                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5020                                       DRV_STATE_UNLOAD);
5021                         break;
5022
5023                 case RESET_KIND_SUSPEND:
5024                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5025                                       DRV_STATE_SUSPEND);
5026                         break;
5027
5028                 default:
5029                         break;
5030                 };
5031         }
5032 }
5033
5034 static int tg3_poll_fw(struct tg3 *tp)
5035 {
5036         int i;
5037         u32 val;
5038
5039         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5040                 /* Wait up to 20ms for init done. */
5041                 for (i = 0; i < 200; i++) {
5042                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5043                                 return 0;
5044                         udelay(100);
5045                 }
5046                 return -ENODEV;
5047         }
5048
5049         /* Wait for firmware initialization to complete. */
5050         for (i = 0; i < 100000; i++) {
5051                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5052                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5053                         break;
5054                 udelay(10);
5055         }
5056
5057         /* Chip might not be fitted with firmware.  Some Sun onboard
5058          * parts are configured like that.  So don't signal the timeout
5059          * of the above loop as an error, but do report the lack of
5060          * running firmware once.
5061          */
5062         if (i >= 100000 &&
5063             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5064                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5065
5066                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5067                        tp->dev->name);
5068         }
5069
5070         return 0;
5071 }
5072
5073 /* Save PCI command register before chip reset */
5074 static void tg3_save_pci_state(struct tg3 *tp)
5075 {
5076         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5077 }
5078
5079 /* Restore PCI state after chip reset */
5080 static void tg3_restore_pci_state(struct tg3 *tp)
5081 {
5082         u32 val;
5083
5084         /* Re-enable indirect register accesses. */
5085         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5086                                tp->misc_host_ctrl);
5087
5088         /* Set MAX PCI retry to zero. */
5089         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5090         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5091             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5092                 val |= PCISTATE_RETRY_SAME_DMA;
5093         /* Allow reads and writes to the APE register and memory space. */
5094         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5095                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5096                        PCISTATE_ALLOW_APE_SHMEM_WR;
5097         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5098
5099         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5100
5101         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5102                 pcie_set_readrq(tp->pdev, 4096);
5103         else {
5104                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5105                                       tp->pci_cacheline_sz);
5106                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5107                                       tp->pci_lat_timer);
5108         }
5109
5110         /* Make sure PCI-X relaxed ordering bit is clear. */
5111         if (tp->pcix_cap) {
5112                 u16 pcix_cmd;
5113
5114                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5115                                      &pcix_cmd);
5116                 pcix_cmd &= ~PCI_X_CMD_ERO;
5117                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5118                                       pcix_cmd);
5119         }
5120
5121         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5122
5123                 /* Chip reset on 5780 will reset MSI enable bit,
5124                  * so need to restore it.
5125                  */
5126                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5127                         u16 ctrl;
5128
5129                         pci_read_config_word(tp->pdev,
5130                                              tp->msi_cap + PCI_MSI_FLAGS,
5131                                              &ctrl);
5132                         pci_write_config_word(tp->pdev,
5133                                               tp->msi_cap + PCI_MSI_FLAGS,
5134                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5135                         val = tr32(MSGINT_MODE);
5136                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5137                 }
5138         }
5139 }
5140
5141 static void tg3_stop_fw(struct tg3 *);
5142
5143 /* tp->lock is held. */
5144 static int tg3_chip_reset(struct tg3 *tp)
5145 {
5146         u32 val;
5147         void (*write_op)(struct tg3 *, u32, u32);
5148         int err;
5149
5150         tg3_nvram_lock(tp);
5151
5152         /* No matching tg3_nvram_unlock() after this because
5153          * chip reset below will undo the nvram lock.
5154          */
5155         tp->nvram_lock_cnt = 0;
5156
5157         /* GRC_MISC_CFG core clock reset will clear the memory
5158          * enable bit in PCI register 4 and the MSI enable bit
5159          * on some chips, so we save relevant registers here.
5160          */
5161         tg3_save_pci_state(tp);
5162
5163         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5164             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5165             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5166             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5167             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5168                 tw32(GRC_FASTBOOT_PC, 0);
5169
5170         /*
5171          * We must avoid the readl() that normally takes place.
5172          * It locks machines, causes machine checks, and other
5173          * fun things.  So, temporarily disable the 5701
5174          * hardware workaround, while we do the reset.
5175          */
5176         write_op = tp->write32;
5177         if (write_op == tg3_write_flush_reg32)
5178                 tp->write32 = tg3_write32;
5179
5180         /* Prevent the irq handler from reading or writing PCI registers
5181          * during chip reset when the memory enable bit in the PCI command
5182          * register may be cleared.  The chip does not generate interrupt
5183          * at this time, but the irq handler may still be called due to irq
5184          * sharing or irqpoll.
5185          */
5186         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5187         if (tp->hw_status) {
5188                 tp->hw_status->status = 0;
5189                 tp->hw_status->status_tag = 0;
5190         }
5191         tp->last_tag = 0;
5192         smp_mb();
5193         synchronize_irq(tp->pdev->irq);
5194
5195         /* do the reset */
5196         val = GRC_MISC_CFG_CORECLK_RESET;
5197
5198         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5199                 if (tr32(0x7e2c) == 0x60) {
5200                         tw32(0x7e2c, 0x20);
5201                 }
5202                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5203                         tw32(GRC_MISC_CFG, (1 << 29));
5204                         val |= (1 << 29);
5205                 }
5206         }
5207
5208         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5209                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5210                 tw32(GRC_VCPU_EXT_CTRL,
5211                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5212         }
5213
5214         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5215                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5216         tw32(GRC_MISC_CFG, val);
5217
5218         /* restore 5701 hardware bug workaround write method */
5219         tp->write32 = write_op;
5220
5221         /* Unfortunately, we have to delay before the PCI read back.
5222          * Some 575X chips even will not respond to a PCI cfg access
5223          * when the reset command is given to the chip.
5224          *
5225          * How do these hardware designers expect things to work
5226          * properly if the PCI write is posted for a long period
5227          * of time?  It is always necessary to have some method by
5228          * which a register read back can occur to push the write
5229          * out which does the reset.
5230          *
5231          * For most tg3 variants the trick below was working.
5232          * Ho hum...
5233          */
5234         udelay(120);
5235
5236         /* Flush PCI posted writes.  The normal MMIO registers
5237          * are inaccessible at this time so this is the only
5238          * way to make this reliably (actually, this is no longer
5239          * the case, see above).  I tried to use indirect
5240          * register read/write but this upset some 5701 variants.
5241          */
5242         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5243
5244         udelay(120);
5245
5246         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5247                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5248                         int i;
5249                         u32 cfg_val;
5250
5251                         /* Wait for link training to complete.  */
5252                         for (i = 0; i < 5000; i++)
5253                                 udelay(100);
5254
5255                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5256                         pci_write_config_dword(tp->pdev, 0xc4,
5257                                                cfg_val | (1 << 15));
5258                 }
5259                 /* Set PCIE max payload size and clear error status.  */
5260                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5261         }
5262
5263         tg3_restore_pci_state(tp);
5264
5265         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5266
5267         val = 0;
5268         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5269                 val = tr32(MEMARB_MODE);
5270         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5271
5272         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5273                 tg3_stop_fw(tp);
5274                 tw32(0x5000, 0x400);
5275         }
5276
5277         tw32(GRC_MODE, tp->grc_mode);
5278
5279         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5280                 val = tr32(0xc4);
5281
5282                 tw32(0xc4, val | (1 << 15));
5283         }
5284
5285         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5286             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5287                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5288                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5289                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5290                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5291         }
5292
5293         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5294                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5295                 tw32_f(MAC_MODE, tp->mac_mode);
5296         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5297                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5298                 tw32_f(MAC_MODE, tp->mac_mode);
5299         } else
5300                 tw32_f(MAC_MODE, 0);
5301         udelay(40);
5302
5303         err = tg3_poll_fw(tp);
5304         if (err)
5305                 return err;
5306
5307         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5308             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5309                 val = tr32(0x7c00);
5310
5311                 tw32(0x7c00, val | (1 << 25));
5312         }
5313
5314         /* Reprobe ASF enable state.  */
5315         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5316         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5317         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5318         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5319                 u32 nic_cfg;
5320
5321                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5322                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5323                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5324                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5325                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5326                 }
5327         }
5328
5329         return 0;
5330 }
5331
5332 /* tp->lock is held. */
5333 static void tg3_stop_fw(struct tg3 *tp)
5334 {
5335         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5336            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5337                 u32 val;
5338                 int i;
5339
5340                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5341                 val = tr32(GRC_RX_CPU_EVENT);
5342                 val |= (1 << 14);
5343                 tw32(GRC_RX_CPU_EVENT, val);
5344
5345                 /* Wait for RX cpu to ACK the event.  */
5346                 for (i = 0; i < 100; i++) {
5347                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5348                                 break;
5349                         udelay(1);
5350                 }
5351         }
5352 }
5353
5354 /* tp->lock is held. */
5355 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5356 {
5357         int err;
5358
5359         tg3_stop_fw(tp);
5360
5361         tg3_write_sig_pre_reset(tp, kind);
5362
5363         tg3_abort_hw(tp, silent);
5364         err = tg3_chip_reset(tp);
5365
5366         tg3_write_sig_legacy(tp, kind);
5367         tg3_write_sig_post_reset(tp, kind);
5368
5369         if (err)
5370                 return err;
5371
5372         return 0;
5373 }
5374
5375 #define TG3_FW_RELEASE_MAJOR    0x0
5376 #define TG3_FW_RELASE_MINOR     0x0
5377 #define TG3_FW_RELEASE_FIX      0x0
5378 #define TG3_FW_START_ADDR       0x08000000
5379 #define TG3_FW_TEXT_ADDR        0x08000000
5380 #define TG3_FW_TEXT_LEN         0x9c0
5381 #define TG3_FW_RODATA_ADDR      0x080009c0
5382 #define TG3_FW_RODATA_LEN       0x60
5383 #define TG3_FW_DATA_ADDR        0x08000a40
5384 #define TG3_FW_DATA_LEN         0x20
5385 #define TG3_FW_SBSS_ADDR        0x08000a60
5386 #define TG3_FW_SBSS_LEN         0xc
5387 #define TG3_FW_BSS_ADDR         0x08000a70
5388 #define TG3_FW_BSS_LEN          0x10
5389
5390 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5391         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5392         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5393         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5394         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5395         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5396         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5397         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5398         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5399         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5400         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5401         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5402         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5403         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5404         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5405         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5406         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5407         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5408         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5409         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5410         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5411         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5412         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5413         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5414         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5415         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5416         0, 0, 0, 0, 0, 0,
5417         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5418         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5419         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5420         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5421         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5422         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5423         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5424         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5425         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5426         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5427         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5428         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5429         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5430         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5431         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5432         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5433         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5434         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5435         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5436         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5437         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5438         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5439         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5440         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5441         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5442         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5443         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5444         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5445         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5446         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5447         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5448         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5449         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5450         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5451         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5452         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5453         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5454         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5455         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5456         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5457         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5458         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5459         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5460         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5461         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5462         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5463         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5464         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5465         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5466         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5467         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5468         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5469         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5470         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5471         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5472         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5473         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5474         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5475         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5476         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5477         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5478         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5479         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5480         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5481         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5482 };
5483
5484 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5485         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5486         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5487         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5488         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5489         0x00000000
5490 };
5491
5492 #if 0 /* All zeros, don't eat up space with it. */
5493 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5494         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5495         0x00000000, 0x00000000, 0x00000000, 0x00000000
5496 };
5497 #endif
5498
5499 #define RX_CPU_SCRATCH_BASE     0x30000
5500 #define RX_CPU_SCRATCH_SIZE     0x04000
5501 #define TX_CPU_SCRATCH_BASE     0x34000
5502 #define TX_CPU_SCRATCH_SIZE     0x04000
5503
5504 /* tp->lock is held. */
5505 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5506 {
5507         int i;
5508
5509         BUG_ON(offset == TX_CPU_BASE &&
5510             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5511
5512         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5513                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5514
5515                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5516                 return 0;
5517         }
5518         if (offset == RX_CPU_BASE) {
5519                 for (i = 0; i < 10000; i++) {
5520                         tw32(offset + CPU_STATE, 0xffffffff);
5521                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5522                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5523                                 break;
5524                 }
5525
5526                 tw32(offset + CPU_STATE, 0xffffffff);
5527                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5528                 udelay(10);
5529         } else {
5530                 for (i = 0; i < 10000; i++) {
5531                         tw32(offset + CPU_STATE, 0xffffffff);
5532                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5533                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5534                                 break;
5535                 }
5536         }
5537
5538         if (i >= 10000) {
5539                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5540                        "and %s CPU\n",
5541                        tp->dev->name,
5542                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5543                 return -ENODEV;
5544         }
5545
5546         /* Clear firmware's nvram arbitration. */
5547         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5548                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5549         return 0;
5550 }
5551
5552 struct fw_info {
5553         unsigned int text_base;
5554         unsigned int text_len;
5555         const u32 *text_data;
5556         unsigned int rodata_base;
5557         unsigned int rodata_len;
5558         const u32 *rodata_data;
5559         unsigned int data_base;
5560         unsigned int data_len;
5561         const u32 *data_data;
5562 };
5563
5564 /* tp->lock is held. */
5565 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5566                                  int cpu_scratch_size, struct fw_info *info)
5567 {
5568         int err, lock_err, i;
5569         void (*write_op)(struct tg3 *, u32, u32);
5570
5571         if (cpu_base == TX_CPU_BASE &&
5572             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5573                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5574                        "TX cpu firmware on %s which is 5705.\n",
5575                        tp->dev->name);
5576                 return -EINVAL;
5577         }
5578
5579         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5580                 write_op = tg3_write_mem;
5581         else
5582                 write_op = tg3_write_indirect_reg32;
5583
5584         /* It is possible that bootcode is still loading at this point.
5585          * Get the nvram lock first before halting the cpu.
5586          */
5587         lock_err = tg3_nvram_lock(tp);
5588         err = tg3_halt_cpu(tp, cpu_base);
5589         if (!lock_err)
5590                 tg3_nvram_unlock(tp);
5591         if (err)
5592                 goto out;
5593
5594         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5595                 write_op(tp, cpu_scratch_base + i, 0);
5596         tw32(cpu_base + CPU_STATE, 0xffffffff);
5597         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5598         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5599                 write_op(tp, (cpu_scratch_base +
5600                               (info->text_base & 0xffff) +
5601                               (i * sizeof(u32))),
5602                          (info->text_data ?
5603                           info->text_data[i] : 0));
5604         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5605                 write_op(tp, (cpu_scratch_base +
5606                               (info->rodata_base & 0xffff) +
5607                               (i * sizeof(u32))),
5608                          (info->rodata_data ?
5609                           info->rodata_data[i] : 0));
5610         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5611                 write_op(tp, (cpu_scratch_base +
5612                               (info->data_base & 0xffff) +
5613                               (i * sizeof(u32))),
5614                          (info->data_data ?
5615                           info->data_data[i] : 0));
5616
5617         err = 0;
5618
5619 out:
5620         return err;
5621 }
5622
5623 /* tp->lock is held. */
5624 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5625 {
5626         struct fw_info info;
5627         int err, i;
5628
5629         info.text_base = TG3_FW_TEXT_ADDR;
5630         info.text_len = TG3_FW_TEXT_LEN;
5631         info.text_data = &tg3FwText[0];
5632         info.rodata_base = TG3_FW_RODATA_ADDR;
5633         info.rodata_len = TG3_FW_RODATA_LEN;
5634         info.rodata_data = &tg3FwRodata[0];
5635         info.data_base = TG3_FW_DATA_ADDR;
5636         info.data_len = TG3_FW_DATA_LEN;
5637         info.data_data = NULL;
5638
5639         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5640                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5641                                     &info);
5642         if (err)
5643                 return err;
5644
5645         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5646                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5647                                     &info);
5648         if (err)
5649                 return err;
5650
5651         /* Now startup only the RX cpu. */
5652         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5653         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5654
5655         for (i = 0; i < 5; i++) {
5656                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5657                         break;
5658                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5659                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5660                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5661                 udelay(1000);
5662         }
5663         if (i >= 5) {
5664                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5665                        "to set RX CPU PC, is %08x should be %08x\n",
5666                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5667                        TG3_FW_TEXT_ADDR);
5668                 return -ENODEV;
5669         }
5670         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5671         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5672
5673         return 0;
5674 }
5675
5676
5677 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5678 #define TG3_TSO_FW_RELASE_MINOR         0x6
5679 #define TG3_TSO_FW_RELEASE_FIX          0x0
5680 #define TG3_TSO_FW_START_ADDR           0x08000000
5681 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5682 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5683 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5684 #define TG3_TSO_FW_RODATA_LEN           0x60
5685 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5686 #define TG3_TSO_FW_DATA_LEN             0x30
5687 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5688 #define TG3_TSO_FW_SBSS_LEN             0x2c
5689 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5690 #define TG3_TSO_FW_BSS_LEN              0x894
5691
5692 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5693         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5694         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5695         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5696         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5697         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5698         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5699         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5700         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5701         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5702         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5703         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5704         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5705         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5706         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5707         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5708         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5709         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5710         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5711         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5712         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5713         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5714         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5715         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5716         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5717         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5718         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5719         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5720         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5721         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5722         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5723         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5724         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5725         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5726         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5727         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5728         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5729         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5730         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5731         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5732         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5733         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5734         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5735         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5736         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5737         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5738         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5739         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5740         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5741         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5742         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5743         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5744         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5745         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5746         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5747         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5748         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5749         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5750         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5751         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5752         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5753         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5754         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5755         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5756         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5757         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5758         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5759         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5760         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5761         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5762         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5763         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5764         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5765         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5766         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5767         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5768         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5769         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5770         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5771         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5772         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5773         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5774         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5775         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5776         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5777         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5778         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5779         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5780         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5781         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5782         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5783         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5784         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5785         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5786         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5787         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5788         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5789         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5790         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5791         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5792         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5793         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5794         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5795         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5796         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5797         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5798         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5799         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5800         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5801         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5802         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5803         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5804         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5805         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5806         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5807         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5808         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5809         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5810         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5811         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5812         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5813         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5814         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5815         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5816         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5817         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5818         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5819         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5820         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5821         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5822         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5823         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5824         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5825         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5826         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5827         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5828         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5829         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5830         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5831         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5832         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5833         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5834         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5835         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5836         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5837         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5838         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5839         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5840         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5841         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5842         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5843         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5844         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5845         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5846         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5847         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5848         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5849         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5850         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5851         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5852         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5853         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5854         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5855         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5856         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5857         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5858         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5859         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5860         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5861         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5862         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5863         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5864         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5865         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5866         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5867         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5868         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5869         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5870         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5871         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5872         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5873         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5874         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5875         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5876         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5877         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5878         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5879         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5880         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5881         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5882         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5883         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5884         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5885         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5886         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5887         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5888         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5889         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5890         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5891         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5892         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5893         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5894         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5895         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5896         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5897         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5898         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5899         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5900         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5901         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5902         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5903         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5904         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5905         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5906         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5907         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5908         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5909         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5910         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5911         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5912         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5913         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5914         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5915         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5916         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5917         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5918         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5919         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5920         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5921         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5922         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5923         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5924         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5925         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5926         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5927         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5928         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5929         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5930         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5931         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5932         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5933         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5934         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5935         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5936         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5937         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5938         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5939         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5940         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5941         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5942         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5943         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5944         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5945         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5946         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5947         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5948         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5949         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5950         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5951         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5952         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5953         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5954         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5955         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5956         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5957         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5958         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5959         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5960         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5961         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5962         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5963         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5964         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5965         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5966         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5967         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5968         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5969         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5970         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5971         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5972         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5973         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5974         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5975         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5976         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5977 };
5978
5979 static const u32 tg3TsoFwRodata[] = {
5980         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5981         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5982         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5983         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5984         0x00000000,
5985 };
5986
5987 static const u32 tg3TsoFwData[] = {
5988         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5989         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5990         0x00000000,
5991 };
5992
5993 /* 5705 needs a special version of the TSO firmware.  */
5994 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5995 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5996 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5997 #define TG3_TSO5_FW_START_ADDR          0x00010000
5998 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5999 #define TG3_TSO5_FW_TEXT_LEN            0xe90
6000 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
6001 #define TG3_TSO5_FW_RODATA_LEN          0x50
6002 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
6003 #define TG3_TSO5_FW_DATA_LEN            0x20
6004 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
6005 #define TG3_TSO5_FW_SBSS_LEN            0x28
6006 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
6007 #define TG3_TSO5_FW_BSS_LEN             0x88
6008
6009 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6010         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6011         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6012         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6013         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6014         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6015         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6016         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6017         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6018         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6019         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6020         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6021         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6022         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6023         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6024         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6025         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6026         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6027         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6028         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6029         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6030         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6031         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6032         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6033         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6034         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6035         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6036         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6037         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6038         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6039         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6040         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6041         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6042         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6043         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6044         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6045         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6046         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6047         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6048         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6049         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6050         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6051         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6052         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6053         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6054         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6055         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6056         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6057         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6058         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6059         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6060         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6061         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6062         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6063         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6064         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6065         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6066         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6067         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6068         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6069         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6070         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6071         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6072         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6073         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6074         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6075         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6076         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6077         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6078         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6079         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6080         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6081         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6082         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6083         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6084         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6085         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6086         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6087         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6088         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6089         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6090         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6091         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6092         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6093         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6094         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6095         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6096         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6097         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6098         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6099         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6100         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6101         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6102         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6103         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6104         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6105         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6106         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6107         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6108         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6109         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6110         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6111         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6112         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6113         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6114         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6115         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6116         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6117         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6118         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6119         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6120         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6121         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6122         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6123         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6124         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6125         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6126         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6127         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6128         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6129         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6130         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6131         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6132         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6133         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6134         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6135         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6136         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6137         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6138         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6139         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6140         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6141         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6142         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6143         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6144         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6145         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6146         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6147         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6148         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6149         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6150         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6151         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6152         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6153         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6154         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6155         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6156         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6157         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6158         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6159         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6160         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6161         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6162         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6163         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6164         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6165         0x00000000, 0x00000000, 0x00000000,
6166 };
6167
6168 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6169         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6170         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6171         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6172         0x00000000, 0x00000000, 0x00000000,
6173 };
6174
6175 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6176         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6177         0x00000000, 0x00000000, 0x00000000,
6178 };
6179
6180 /* tp->lock is held. */
6181 static int tg3_load_tso_firmware(struct tg3 *tp)
6182 {
6183         struct fw_info info;
6184         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6185         int err, i;
6186
6187         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6188                 return 0;
6189
6190         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6191                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6192                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6193                 info.text_data = &tg3Tso5FwText[0];
6194                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6195                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6196                 info.rodata_data = &tg3Tso5FwRodata[0];
6197                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6198                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6199                 info.data_data = &tg3Tso5FwData[0];
6200                 cpu_base = RX_CPU_BASE;
6201                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6202                 cpu_scratch_size = (info.text_len +
6203                                     info.rodata_len +
6204                                     info.data_len +
6205                                     TG3_TSO5_FW_SBSS_LEN +
6206                                     TG3_TSO5_FW_BSS_LEN);
6207         } else {
6208                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6209                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6210                 info.text_data = &tg3TsoFwText[0];
6211                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6212                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6213                 info.rodata_data = &tg3TsoFwRodata[0];
6214                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6215                 info.data_len = TG3_TSO_FW_DATA_LEN;
6216                 info.data_data = &tg3TsoFwData[0];
6217                 cpu_base = TX_CPU_BASE;
6218                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6219                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6220         }
6221
6222         err = tg3_load_firmware_cpu(tp, cpu_base,
6223                                     cpu_scratch_base, cpu_scratch_size,
6224                                     &info);
6225         if (err)
6226                 return err;
6227
6228         /* Now startup the cpu. */
6229         tw32(cpu_base + CPU_STATE, 0xffffffff);
6230         tw32_f(cpu_base + CPU_PC,    info.text_base);
6231
6232         for (i = 0; i < 5; i++) {
6233                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6234                         break;
6235                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6236                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6237                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6238                 udelay(1000);
6239         }
6240         if (i >= 5) {
6241                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6242                        "to set CPU PC, is %08x should be %08x\n",
6243                        tp->dev->name, tr32(cpu_base + CPU_PC),
6244                        info.text_base);
6245                 return -ENODEV;
6246         }
6247         tw32(cpu_base + CPU_STATE, 0xffffffff);
6248         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6249         return 0;
6250 }
6251
6252
6253 /* tp->lock is held. */
6254 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6255 {
6256         u32 addr_high, addr_low;
6257         int i;
6258
6259         addr_high = ((tp->dev->dev_addr[0] << 8) |
6260                      tp->dev->dev_addr[1]);
6261         addr_low = ((tp->dev->dev_addr[2] << 24) |
6262                     (tp->dev->dev_addr[3] << 16) |
6263                     (tp->dev->dev_addr[4] <<  8) |
6264                     (tp->dev->dev_addr[5] <<  0));
6265         for (i = 0; i < 4; i++) {
6266                 if (i == 1 && skip_mac_1)
6267                         continue;
6268                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6269                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6270         }
6271
6272         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6273             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6274                 for (i = 0; i < 12; i++) {
6275                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6276                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6277                 }
6278         }
6279
6280         addr_high = (tp->dev->dev_addr[0] +
6281                      tp->dev->dev_addr[1] +
6282                      tp->dev->dev_addr[2] +
6283                      tp->dev->dev_addr[3] +
6284                      tp->dev->dev_addr[4] +
6285                      tp->dev->dev_addr[5]) &
6286                 TX_BACKOFF_SEED_MASK;
6287         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6288 }
6289
6290 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6291 {
6292         struct tg3 *tp = netdev_priv(dev);
6293         struct sockaddr *addr = p;
6294         int err = 0, skip_mac_1 = 0;
6295
6296         if (!is_valid_ether_addr(addr->sa_data))
6297                 return -EINVAL;
6298
6299         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6300
6301         if (!netif_running(dev))
6302                 return 0;
6303
6304         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6305                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6306
6307                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6308                 addr0_low = tr32(MAC_ADDR_0_LOW);
6309                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6310                 addr1_low = tr32(MAC_ADDR_1_LOW);
6311
6312                 /* Skip MAC addr 1 if ASF is using it. */
6313                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6314                     !(addr1_high == 0 && addr1_low == 0))
6315                         skip_mac_1 = 1;
6316         }
6317         spin_lock_bh(&tp->lock);
6318         __tg3_set_mac_addr(tp, skip_mac_1);
6319         spin_unlock_bh(&tp->lock);
6320
6321         return err;
6322 }
6323
6324 /* tp->lock is held. */
6325 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6326                            dma_addr_t mapping, u32 maxlen_flags,
6327                            u32 nic_addr)
6328 {
6329         tg3_write_mem(tp,
6330                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6331                       ((u64) mapping >> 32));
6332         tg3_write_mem(tp,
6333                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6334                       ((u64) mapping & 0xffffffff));
6335         tg3_write_mem(tp,
6336                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6337                        maxlen_flags);
6338
6339         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6340                 tg3_write_mem(tp,
6341                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6342                               nic_addr);
6343 }
6344
6345 static void __tg3_set_rx_mode(struct net_device *);
6346 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6347 {
6348         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6349         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6350         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6351         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6352         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6353                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6354                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6355         }
6356         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6357         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6358         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6359                 u32 val = ec->stats_block_coalesce_usecs;
6360
6361                 if (!netif_carrier_ok(tp->dev))
6362                         val = 0;
6363
6364                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6365         }
6366 }
6367
6368 /* tp->lock is held. */
6369 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6370 {
6371         u32 val, rdmac_mode;
6372         int i, err, limit;
6373
6374         tg3_disable_ints(tp);
6375
6376         tg3_stop_fw(tp);
6377
6378         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6379
6380         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6381                 tg3_abort_hw(tp, 1);
6382         }
6383
6384         if (reset_phy)
6385                 tg3_phy_reset(tp);
6386
6387         err = tg3_chip_reset(tp);
6388         if (err)
6389                 return err;
6390
6391         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6392
6393         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0) {
6394                 val = tr32(TG3_CPMU_CTRL);
6395                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6396                 tw32(TG3_CPMU_CTRL, val);
6397
6398                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6399                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6400                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6401                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6402
6403                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6404                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6405                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6406                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6407
6408                 val = tr32(TG3_CPMU_HST_ACC);
6409                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6410                 val |= CPMU_HST_ACC_MACCLK_6_25;
6411                 tw32(TG3_CPMU_HST_ACC, val);
6412         }
6413
6414         /* This works around an issue with Athlon chipsets on
6415          * B3 tigon3 silicon.  This bit has no effect on any
6416          * other revision.  But do not set this on PCI Express
6417          * chips and don't even touch the clocks if the CPMU is present.
6418          */
6419         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6420                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6421                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6422                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6423         }
6424
6425         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6426             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6427                 val = tr32(TG3PCI_PCISTATE);
6428                 val |= PCISTATE_RETRY_SAME_DMA;
6429                 tw32(TG3PCI_PCISTATE, val);
6430         }
6431
6432         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6433                 /* Allow reads and writes to the
6434                  * APE register and memory space.
6435                  */
6436                 val = tr32(TG3PCI_PCISTATE);
6437                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6438                        PCISTATE_ALLOW_APE_SHMEM_WR;
6439                 tw32(TG3PCI_PCISTATE, val);
6440         }
6441
6442         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6443                 /* Enable some hw fixes.  */
6444                 val = tr32(TG3PCI_MSI_DATA);
6445                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6446                 tw32(TG3PCI_MSI_DATA, val);
6447         }
6448
6449         /* Descriptor ring init may make accesses to the
6450          * NIC SRAM area to setup the TX descriptors, so we
6451          * can only do this after the hardware has been
6452          * successfully reset.
6453          */
6454         err = tg3_init_rings(tp);
6455         if (err)
6456                 return err;
6457
6458         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6459             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6460                 /* This value is determined during the probe time DMA
6461                  * engine test, tg3_test_dma.
6462                  */
6463                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6464         }
6465
6466         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6467                           GRC_MODE_4X_NIC_SEND_RINGS |
6468                           GRC_MODE_NO_TX_PHDR_CSUM |
6469                           GRC_MODE_NO_RX_PHDR_CSUM);
6470         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6471
6472         /* Pseudo-header checksum is done by hardware logic and not
6473          * the offload processers, so make the chip do the pseudo-
6474          * header checksums on receive.  For transmit it is more
6475          * convenient to do the pseudo-header checksum in software
6476          * as Linux does that on transmit for us in all cases.
6477          */
6478         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6479
6480         tw32(GRC_MODE,
6481              tp->grc_mode |
6482              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6483
6484         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6485         val = tr32(GRC_MISC_CFG);
6486         val &= ~0xff;
6487         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6488         tw32(GRC_MISC_CFG, val);
6489
6490         /* Initialize MBUF/DESC pool. */
6491         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6492                 /* Do nothing.  */
6493         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6494                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6495                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6496                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6497                 else
6498                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6499                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6500                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6501         }
6502         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6503                 int fw_len;
6504
6505                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6506                           TG3_TSO5_FW_RODATA_LEN +
6507                           TG3_TSO5_FW_DATA_LEN +
6508                           TG3_TSO5_FW_SBSS_LEN +
6509                           TG3_TSO5_FW_BSS_LEN);
6510                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6511                 tw32(BUFMGR_MB_POOL_ADDR,
6512                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6513                 tw32(BUFMGR_MB_POOL_SIZE,
6514                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6515         }
6516
6517         if (tp->dev->mtu <= ETH_DATA_LEN) {
6518                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6519                      tp->bufmgr_config.mbuf_read_dma_low_water);
6520                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6521                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6522                 tw32(BUFMGR_MB_HIGH_WATER,
6523                      tp->bufmgr_config.mbuf_high_water);
6524         } else {
6525                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6526                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6527                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6528                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6529                 tw32(BUFMGR_MB_HIGH_WATER,
6530                      tp->bufmgr_config.mbuf_high_water_jumbo);
6531         }
6532         tw32(BUFMGR_DMA_LOW_WATER,
6533              tp->bufmgr_config.dma_low_water);
6534         tw32(BUFMGR_DMA_HIGH_WATER,
6535              tp->bufmgr_config.dma_high_water);
6536
6537         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6538         for (i = 0; i < 2000; i++) {
6539                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6540                         break;
6541                 udelay(10);
6542         }
6543         if (i >= 2000) {
6544                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6545                        tp->dev->name);
6546                 return -ENODEV;
6547         }
6548
6549         /* Setup replenish threshold. */
6550         val = tp->rx_pending / 8;
6551         if (val == 0)
6552                 val = 1;
6553         else if (val > tp->rx_std_max_post)
6554                 val = tp->rx_std_max_post;
6555         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6556                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6557                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6558
6559                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6560                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6561         }
6562
6563         tw32(RCVBDI_STD_THRESH, val);
6564
6565         /* Initialize TG3_BDINFO's at:
6566          *  RCVDBDI_STD_BD:     standard eth size rx ring
6567          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6568          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6569          *
6570          * like so:
6571          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6572          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6573          *                              ring attribute flags
6574          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6575          *
6576          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6577          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6578          *
6579          * The size of each ring is fixed in the firmware, but the location is
6580          * configurable.
6581          */
6582         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6583              ((u64) tp->rx_std_mapping >> 32));
6584         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6585              ((u64) tp->rx_std_mapping & 0xffffffff));
6586         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6587              NIC_SRAM_RX_BUFFER_DESC);
6588
6589         /* Don't even try to program the JUMBO/MINI buffer descriptor
6590          * configs on 5705.
6591          */
6592         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6593                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6594                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6595         } else {
6596                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6597                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6598
6599                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6600                      BDINFO_FLAGS_DISABLED);
6601
6602                 /* Setup replenish threshold. */
6603                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6604
6605                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6606                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6607                              ((u64) tp->rx_jumbo_mapping >> 32));
6608                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6609                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6610                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6611                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6612                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6613                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6614                 } else {
6615                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6616                              BDINFO_FLAGS_DISABLED);
6617                 }
6618
6619         }
6620
6621         /* There is only one send ring on 5705/5750, no need to explicitly
6622          * disable the others.
6623          */
6624         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6625                 /* Clear out send RCB ring in SRAM. */
6626                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6627                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6628                                       BDINFO_FLAGS_DISABLED);
6629         }
6630
6631         tp->tx_prod = 0;
6632         tp->tx_cons = 0;
6633         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6634         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6635
6636         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6637                        tp->tx_desc_mapping,
6638                        (TG3_TX_RING_SIZE <<
6639                         BDINFO_FLAGS_MAXLEN_SHIFT),
6640                        NIC_SRAM_TX_BUFFER_DESC);
6641
6642         /* There is only one receive return ring on 5705/5750, no need
6643          * to explicitly disable the others.
6644          */
6645         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6646                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6647                      i += TG3_BDINFO_SIZE) {
6648                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6649                                       BDINFO_FLAGS_DISABLED);
6650                 }
6651         }
6652
6653         tp->rx_rcb_ptr = 0;
6654         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6655
6656         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6657                        tp->rx_rcb_mapping,
6658                        (TG3_RX_RCB_RING_SIZE(tp) <<
6659                         BDINFO_FLAGS_MAXLEN_SHIFT),
6660                        0);
6661
6662         tp->rx_std_ptr = tp->rx_pending;
6663         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6664                      tp->rx_std_ptr);
6665
6666         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6667                                                 tp->rx_jumbo_pending : 0;
6668         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6669                      tp->rx_jumbo_ptr);
6670
6671         /* Initialize MAC address and backoff seed. */
6672         __tg3_set_mac_addr(tp, 0);
6673
6674         /* MTU + ethernet header + FCS + optional VLAN tag */
6675         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6676
6677         /* The slot time is changed by tg3_setup_phy if we
6678          * run at gigabit with half duplex.
6679          */
6680         tw32(MAC_TX_LENGTHS,
6681              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6682              (6 << TX_LENGTHS_IPG_SHIFT) |
6683              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6684
6685         /* Receive rules. */
6686         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6687         tw32(RCVLPC_CONFIG, 0x0181);
6688
6689         /* Calculate RDMAC_MODE setting early, we need it to determine
6690          * the RCVLPC_STATE_ENABLE mask.
6691          */
6692         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6693                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6694                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6695                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6696                       RDMAC_MODE_LNGREAD_ENAB);
6697
6698         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6699                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6700                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6701                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6702
6703         /* If statement applies to 5705 and 5750 PCI devices only */
6704         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6705              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6706             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6707                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6708                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6709                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6710                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6711                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6712                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6713                 }
6714         }
6715
6716         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6717                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6718
6719         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6720                 rdmac_mode |= (1 << 27);
6721
6722         /* Receive/send statistics. */
6723         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6724                 val = tr32(RCVLPC_STATS_ENABLE);
6725                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6726                 tw32(RCVLPC_STATS_ENABLE, val);
6727         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6728                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6729                 val = tr32(RCVLPC_STATS_ENABLE);
6730                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6731                 tw32(RCVLPC_STATS_ENABLE, val);
6732         } else {
6733                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6734         }
6735         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6736         tw32(SNDDATAI_STATSENAB, 0xffffff);
6737         tw32(SNDDATAI_STATSCTRL,
6738              (SNDDATAI_SCTRL_ENABLE |
6739               SNDDATAI_SCTRL_FASTUPD));
6740
6741         /* Setup host coalescing engine. */
6742         tw32(HOSTCC_MODE, 0);
6743         for (i = 0; i < 2000; i++) {
6744                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6745                         break;
6746                 udelay(10);
6747         }
6748
6749         __tg3_set_coalesce(tp, &tp->coal);
6750
6751         /* set status block DMA address */
6752         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6753              ((u64) tp->status_mapping >> 32));
6754         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6755              ((u64) tp->status_mapping & 0xffffffff));
6756
6757         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6758                 /* Status/statistics block address.  See tg3_timer,
6759                  * the tg3_periodic_fetch_stats call there, and
6760                  * tg3_get_stats to see how this works for 5705/5750 chips.
6761                  */
6762                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6763                      ((u64) tp->stats_mapping >> 32));
6764                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6765                      ((u64) tp->stats_mapping & 0xffffffff));
6766                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6767                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6768         }
6769
6770         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6771
6772         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6773         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6774         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6775                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6776
6777         /* Clear statistics/status block in chip, and status block in ram. */
6778         for (i = NIC_SRAM_STATS_BLK;
6779              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6780              i += sizeof(u32)) {
6781                 tg3_write_mem(tp, i, 0);
6782                 udelay(40);
6783         }
6784         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6785
6786         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6787                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6788                 /* reset to prevent losing 1st rx packet intermittently */
6789                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6790                 udelay(10);
6791         }
6792
6793         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6794                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6795         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6796             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6797             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6798                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6799         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6800         udelay(40);
6801
6802         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6803          * If TG3_FLG2_IS_NIC is zero, we should read the
6804          * register to preserve the GPIO settings for LOMs. The GPIOs,
6805          * whether used as inputs or outputs, are set by boot code after
6806          * reset.
6807          */
6808         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6809                 u32 gpio_mask;
6810
6811                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6812                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6813                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6814
6815                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6816                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6817                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6818
6819                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6820                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6821
6822                 tp->grc_local_ctrl &= ~gpio_mask;
6823                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6824
6825                 /* GPIO1 must be driven high for eeprom write protect */
6826                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6827                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6828                                                GRC_LCLCTRL_GPIO_OUTPUT1);
6829         }
6830         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6831         udelay(100);
6832
6833         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6834         tp->last_tag = 0;
6835
6836         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6837                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6838                 udelay(40);
6839         }
6840
6841         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6842                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6843                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6844                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6845                WDMAC_MODE_LNGREAD_ENAB);
6846
6847         /* If statement applies to 5705 and 5750 PCI devices only */
6848         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6849              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6850             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6851                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6852                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6853                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6854                         /* nothing */
6855                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6856                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6857                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6858                         val |= WDMAC_MODE_RX_ACCEL;
6859                 }
6860         }
6861
6862         /* Enable host coalescing bug fix */
6863         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6864             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
6865             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
6866             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
6867                 val |= (1 << 29);
6868
6869         tw32_f(WDMAC_MODE, val);
6870         udelay(40);
6871
6872         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6873                 u16 pcix_cmd;
6874
6875                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6876                                      &pcix_cmd);
6877                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6878                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
6879                         pcix_cmd |= PCI_X_CMD_READ_2K;
6880                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6881                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
6882                         pcix_cmd |= PCI_X_CMD_READ_2K;
6883                 }
6884                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6885                                       pcix_cmd);
6886         }
6887
6888         tw32_f(RDMAC_MODE, rdmac_mode);
6889         udelay(40);
6890
6891         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6892         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6893                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6894
6895         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6896                 tw32(SNDDATAC_MODE,
6897                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
6898         else
6899                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6900
6901         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6902         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6903         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6904         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6905         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6906                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6907         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6908         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6909
6910         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6911                 err = tg3_load_5701_a0_firmware_fix(tp);
6912                 if (err)
6913                         return err;
6914         }
6915
6916         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6917                 err = tg3_load_tso_firmware(tp);
6918                 if (err)
6919                         return err;
6920         }
6921
6922         tp->tx_mode = TX_MODE_ENABLE;
6923         tw32_f(MAC_TX_MODE, tp->tx_mode);
6924         udelay(100);
6925
6926         tp->rx_mode = RX_MODE_ENABLE;
6927         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
6928             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6929                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6930
6931         tw32_f(MAC_RX_MODE, tp->rx_mode);
6932         udelay(10);
6933
6934         if (tp->link_config.phy_is_low_power) {
6935                 tp->link_config.phy_is_low_power = 0;
6936                 tp->link_config.speed = tp->link_config.orig_speed;
6937                 tp->link_config.duplex = tp->link_config.orig_duplex;
6938                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6939         }
6940
6941         tp->mi_mode = MAC_MI_MODE_BASE;
6942         tw32_f(MAC_MI_MODE, tp->mi_mode);
6943         udelay(80);
6944
6945         tw32(MAC_LED_CTRL, tp->led_ctrl);
6946
6947         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6948         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6949                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6950                 udelay(10);
6951         }
6952         tw32_f(MAC_RX_MODE, tp->rx_mode);
6953         udelay(10);
6954
6955         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6956                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6957                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6958                         /* Set drive transmission level to 1.2V  */
6959                         /* only if the signal pre-emphasis bit is not set  */
6960                         val = tr32(MAC_SERDES_CFG);
6961                         val &= 0xfffff000;
6962                         val |= 0x880;
6963                         tw32(MAC_SERDES_CFG, val);
6964                 }
6965                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6966                         tw32(MAC_SERDES_CFG, 0x616000);
6967         }
6968
6969         /* Prevent chip from dropping frames when flow control
6970          * is enabled.
6971          */
6972         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6973
6974         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6975             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6976                 /* Use hardware link auto-negotiation */
6977                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6978         }
6979
6980         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6981             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6982                 u32 tmp;
6983
6984                 tmp = tr32(SERDES_RX_CTRL);
6985                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6986                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6987                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6988                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6989         }
6990
6991         err = tg3_setup_phy(tp, 0);
6992         if (err)
6993                 return err;
6994
6995         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6996             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
6997                 u32 tmp;
6998
6999                 /* Clear CRC stats. */
7000                 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7001                         tg3_writephy(tp, MII_TG3_TEST1,
7002                                      tmp | MII_TG3_TEST1_CRC_EN);
7003                         tg3_readphy(tp, 0x14, &tmp);
7004                 }
7005         }
7006
7007         __tg3_set_rx_mode(tp->dev);
7008
7009         /* Initialize receive rules. */
7010         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7011         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7012         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7013         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7014
7015         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7016             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7017                 limit = 8;
7018         else
7019                 limit = 16;
7020         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7021                 limit -= 4;
7022         switch (limit) {
7023         case 16:
7024                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7025         case 15:
7026                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7027         case 14:
7028                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7029         case 13:
7030                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7031         case 12:
7032                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7033         case 11:
7034                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7035         case 10:
7036                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7037         case 9:
7038                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7039         case 8:
7040                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7041         case 7:
7042                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7043         case 6:
7044                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7045         case 5:
7046                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7047         case 4:
7048                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7049         case 3:
7050                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7051         case 2:
7052         case 1:
7053
7054         default:
7055                 break;
7056         };
7057
7058         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7059                 /* Write our heartbeat update interval to APE. */
7060                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7061                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7062
7063         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7064
7065         return 0;
7066 }
7067
7068 /* Called at device open time to get the chip ready for
7069  * packet processing.  Invoked with tp->lock held.
7070  */
7071 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7072 {
7073         int err;
7074
7075         /* Force the chip into D0. */
7076         err = tg3_set_power_state(tp, PCI_D0);
7077         if (err)
7078                 goto out;
7079
7080         tg3_switch_clocks(tp);
7081
7082         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7083
7084         err = tg3_reset_hw(tp, reset_phy);
7085
7086 out:
7087         return err;
7088 }
7089
7090 #define TG3_STAT_ADD32(PSTAT, REG) \
7091 do {    u32 __val = tr32(REG); \
7092         (PSTAT)->low += __val; \
7093         if ((PSTAT)->low < __val) \
7094                 (PSTAT)->high += 1; \
7095 } while (0)
7096
7097 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7098 {
7099         struct tg3_hw_stats *sp = tp->hw_stats;
7100
7101         if (!netif_carrier_ok(tp->dev))
7102                 return;
7103
7104         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7105         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7106         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7107         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7108         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7109         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7110         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7111         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7112         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7113         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7114         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7115         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7116         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7117
7118         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7119         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7120         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7121         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7122         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7123         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7124         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7125         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7126         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7127         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7128         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7129         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7130         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7131         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7132
7133         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7134         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7135         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7136 }
7137
7138 static void tg3_timer(unsigned long __opaque)
7139 {
7140         struct tg3 *tp = (struct tg3 *) __opaque;
7141
7142         if (tp->irq_sync)
7143                 goto restart_timer;
7144
7145         spin_lock(&tp->lock);
7146
7147         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7148                 /* All of this garbage is because when using non-tagged
7149                  * IRQ status the mailbox/status_block protocol the chip
7150                  * uses with the cpu is race prone.
7151                  */
7152                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7153                         tw32(GRC_LOCAL_CTRL,
7154                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7155                 } else {
7156                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7157                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7158                 }
7159
7160                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7161                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7162                         spin_unlock(&tp->lock);
7163                         schedule_work(&tp->reset_task);
7164                         return;
7165                 }
7166         }
7167
7168         /* This part only runs once per second. */
7169         if (!--tp->timer_counter) {
7170                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7171                         tg3_periodic_fetch_stats(tp);
7172
7173                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7174                         u32 mac_stat;
7175                         int phy_event;
7176
7177                         mac_stat = tr32(MAC_STATUS);
7178
7179                         phy_event = 0;
7180                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7181                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7182                                         phy_event = 1;
7183                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7184                                 phy_event = 1;
7185
7186                         if (phy_event)
7187                                 tg3_setup_phy(tp, 0);
7188                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7189                         u32 mac_stat = tr32(MAC_STATUS);
7190                         int need_setup = 0;
7191
7192                         if (netif_carrier_ok(tp->dev) &&
7193                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7194                                 need_setup = 1;
7195                         }
7196                         if (! netif_carrier_ok(tp->dev) &&
7197                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7198                                          MAC_STATUS_SIGNAL_DET))) {
7199                                 need_setup = 1;
7200                         }
7201                         if (need_setup) {
7202                                 if (!tp->serdes_counter) {
7203                                         tw32_f(MAC_MODE,
7204                                              (tp->mac_mode &
7205                                               ~MAC_MODE_PORT_MODE_MASK));
7206                                         udelay(40);
7207                                         tw32_f(MAC_MODE, tp->mac_mode);
7208                                         udelay(40);
7209                                 }
7210                                 tg3_setup_phy(tp, 0);
7211                         }
7212                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7213                         tg3_serdes_parallel_detect(tp);
7214
7215                 tp->timer_counter = tp->timer_multiplier;
7216         }
7217
7218         /* Heartbeat is only sent once every 2 seconds.
7219          *
7220          * The heartbeat is to tell the ASF firmware that the host
7221          * driver is still alive.  In the event that the OS crashes,
7222          * ASF needs to reset the hardware to free up the FIFO space
7223          * that may be filled with rx packets destined for the host.
7224          * If the FIFO is full, ASF will no longer function properly.
7225          *
7226          * Unintended resets have been reported on real time kernels
7227          * where the timer doesn't run on time.  Netpoll will also have
7228          * same problem.
7229          *
7230          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7231          * to check the ring condition when the heartbeat is expiring
7232          * before doing the reset.  This will prevent most unintended
7233          * resets.
7234          */
7235         if (!--tp->asf_counter) {
7236                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7237                         u32 val;
7238
7239                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7240                                       FWCMD_NICDRV_ALIVE3);
7241                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7242                         /* 5 seconds timeout */
7243                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7244                         val = tr32(GRC_RX_CPU_EVENT);
7245                         val |= (1 << 14);
7246                         tw32(GRC_RX_CPU_EVENT, val);
7247                 }
7248                 tp->asf_counter = tp->asf_multiplier;
7249         }
7250
7251         spin_unlock(&tp->lock);
7252
7253 restart_timer:
7254         tp->timer.expires = jiffies + tp->timer_offset;
7255         add_timer(&tp->timer);
7256 }
7257
7258 static int tg3_request_irq(struct tg3 *tp)
7259 {
7260         irq_handler_t fn;
7261         unsigned long flags;
7262         struct net_device *dev = tp->dev;
7263
7264         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7265                 fn = tg3_msi;
7266                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7267                         fn = tg3_msi_1shot;
7268                 flags = IRQF_SAMPLE_RANDOM;
7269         } else {
7270                 fn = tg3_interrupt;
7271                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7272                         fn = tg3_interrupt_tagged;
7273                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7274         }
7275         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7276 }
7277
7278 static int tg3_test_interrupt(struct tg3 *tp)
7279 {
7280         struct net_device *dev = tp->dev;
7281         int err, i, intr_ok = 0;
7282
7283         if (!netif_running(dev))
7284                 return -ENODEV;
7285
7286         tg3_disable_ints(tp);
7287
7288         free_irq(tp->pdev->irq, dev);
7289
7290         err = request_irq(tp->pdev->irq, tg3_test_isr,
7291                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7292         if (err)
7293                 return err;
7294
7295         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7296         tg3_enable_ints(tp);
7297
7298         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7299                HOSTCC_MODE_NOW);
7300
7301         for (i = 0; i < 5; i++) {
7302                 u32 int_mbox, misc_host_ctrl;
7303
7304                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7305                                         TG3_64BIT_REG_LOW);
7306                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7307
7308                 if ((int_mbox != 0) ||
7309                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7310                         intr_ok = 1;
7311                         break;
7312                 }
7313
7314                 msleep(10);
7315         }
7316
7317         tg3_disable_ints(tp);
7318
7319         free_irq(tp->pdev->irq, dev);
7320
7321         err = tg3_request_irq(tp);
7322
7323         if (err)
7324                 return err;
7325
7326         if (intr_ok)
7327                 return 0;
7328
7329         return -EIO;
7330 }
7331
7332 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7333  * successfully restored
7334  */
7335 static int tg3_test_msi(struct tg3 *tp)
7336 {
7337         struct net_device *dev = tp->dev;
7338         int err;
7339         u16 pci_cmd;
7340
7341         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7342                 return 0;
7343
7344         /* Turn off SERR reporting in case MSI terminates with Master
7345          * Abort.
7346          */
7347         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7348         pci_write_config_word(tp->pdev, PCI_COMMAND,
7349                               pci_cmd & ~PCI_COMMAND_SERR);
7350
7351         err = tg3_test_interrupt(tp);
7352
7353         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7354
7355         if (!err)
7356                 return 0;
7357
7358         /* other failures */
7359         if (err != -EIO)
7360                 return err;
7361
7362         /* MSI test failed, go back to INTx mode */
7363         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7364                "switching to INTx mode. Please report this failure to "
7365                "the PCI maintainer and include system chipset information.\n",
7366                        tp->dev->name);
7367
7368         free_irq(tp->pdev->irq, dev);
7369         pci_disable_msi(tp->pdev);
7370
7371         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7372
7373         err = tg3_request_irq(tp);
7374         if (err)
7375                 return err;
7376
7377         /* Need to reset the chip because the MSI cycle may have terminated
7378          * with Master Abort.
7379          */
7380         tg3_full_lock(tp, 1);
7381
7382         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7383         err = tg3_init_hw(tp, 1);
7384
7385         tg3_full_unlock(tp);
7386
7387         if (err)
7388                 free_irq(tp->pdev->irq, dev);
7389
7390         return err;
7391 }
7392
7393 static int tg3_open(struct net_device *dev)
7394 {
7395         struct tg3 *tp = netdev_priv(dev);
7396         int err;
7397
7398         netif_carrier_off(tp->dev);
7399
7400         tg3_full_lock(tp, 0);
7401
7402         err = tg3_set_power_state(tp, PCI_D0);
7403         if (err) {
7404                 tg3_full_unlock(tp);
7405                 return err;
7406         }
7407
7408         tg3_disable_ints(tp);
7409         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7410
7411         tg3_full_unlock(tp);
7412
7413         /* The placement of this call is tied
7414          * to the setup and use of Host TX descriptors.
7415          */
7416         err = tg3_alloc_consistent(tp);
7417         if (err)
7418                 return err;
7419
7420         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7421                 /* All MSI supporting chips should support tagged
7422                  * status.  Assert that this is the case.
7423                  */
7424                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7425                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7426                                "Not using MSI.\n", tp->dev->name);
7427                 } else if (pci_enable_msi(tp->pdev) == 0) {
7428                         u32 msi_mode;
7429
7430                         msi_mode = tr32(MSGINT_MODE);
7431                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7432                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7433                 }
7434         }
7435         err = tg3_request_irq(tp);
7436
7437         if (err) {
7438                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7439                         pci_disable_msi(tp->pdev);
7440                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7441                 }
7442                 tg3_free_consistent(tp);
7443                 return err;
7444         }
7445
7446         napi_enable(&tp->napi);
7447
7448         tg3_full_lock(tp, 0);
7449
7450         err = tg3_init_hw(tp, 1);
7451         if (err) {
7452                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7453                 tg3_free_rings(tp);
7454         } else {
7455                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7456                         tp->timer_offset = HZ;
7457                 else
7458                         tp->timer_offset = HZ / 10;
7459
7460                 BUG_ON(tp->timer_offset > HZ);
7461                 tp->timer_counter = tp->timer_multiplier =
7462                         (HZ / tp->timer_offset);
7463                 tp->asf_counter = tp->asf_multiplier =
7464                         ((HZ / tp->timer_offset) * 2);
7465
7466                 init_timer(&tp->timer);
7467                 tp->timer.expires = jiffies + tp->timer_offset;
7468                 tp->timer.data = (unsigned long) tp;
7469                 tp->timer.function = tg3_timer;
7470         }
7471
7472         tg3_full_unlock(tp);
7473
7474         if (err) {
7475                 napi_disable(&tp->napi);
7476                 free_irq(tp->pdev->irq, dev);
7477                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7478                         pci_disable_msi(tp->pdev);
7479                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7480                 }
7481                 tg3_free_consistent(tp);
7482                 return err;
7483         }
7484
7485         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7486                 err = tg3_test_msi(tp);
7487
7488                 if (err) {
7489                         tg3_full_lock(tp, 0);
7490
7491                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7492                                 pci_disable_msi(tp->pdev);
7493                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7494                         }
7495                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7496                         tg3_free_rings(tp);
7497                         tg3_free_consistent(tp);
7498
7499                         tg3_full_unlock(tp);
7500
7501                         napi_disable(&tp->napi);
7502
7503                         return err;
7504                 }
7505
7506                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7507                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7508                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7509
7510                                 tw32(PCIE_TRANSACTION_CFG,
7511                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7512                         }
7513                 }
7514         }
7515
7516         tg3_full_lock(tp, 0);
7517
7518         add_timer(&tp->timer);
7519         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7520         tg3_enable_ints(tp);
7521
7522         tg3_full_unlock(tp);
7523
7524         netif_start_queue(dev);
7525
7526         return 0;
7527 }
7528
7529 #if 0
7530 /*static*/ void tg3_dump_state(struct tg3 *tp)
7531 {
7532         u32 val32, val32_2, val32_3, val32_4, val32_5;
7533         u16 val16;
7534         int i;
7535
7536         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7537         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7538         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7539                val16, val32);
7540
7541         /* MAC block */
7542         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7543                tr32(MAC_MODE), tr32(MAC_STATUS));
7544         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7545                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7546         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7547                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7548         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7549                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7550
7551         /* Send data initiator control block */
7552         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7553                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7554         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7555                tr32(SNDDATAI_STATSCTRL));
7556
7557         /* Send data completion control block */
7558         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7559
7560         /* Send BD ring selector block */
7561         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7562                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7563
7564         /* Send BD initiator control block */
7565         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7566                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7567
7568         /* Send BD completion control block */
7569         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7570
7571         /* Receive list placement control block */
7572         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7573                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7574         printk("       RCVLPC_STATSCTRL[%08x]\n",
7575                tr32(RCVLPC_STATSCTRL));
7576
7577         /* Receive data and receive BD initiator control block */
7578         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7579                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7580
7581         /* Receive data completion control block */
7582         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7583                tr32(RCVDCC_MODE));
7584
7585         /* Receive BD initiator control block */
7586         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7587                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7588
7589         /* Receive BD completion control block */
7590         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7591                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7592
7593         /* Receive list selector control block */
7594         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7595                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7596
7597         /* Mbuf cluster free block */
7598         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7599                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7600
7601         /* Host coalescing control block */
7602         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7603                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7604         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7605                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7606                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7607         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7608                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7609                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7610         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7611                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7612         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7613                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7614
7615         /* Memory arbiter control block */
7616         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7617                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7618
7619         /* Buffer manager control block */
7620         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7621                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7622         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7623                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7624         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7625                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7626                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7627                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7628
7629         /* Read DMA control block */
7630         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7631                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7632
7633         /* Write DMA control block */
7634         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7635                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7636
7637         /* DMA completion block */
7638         printk("DEBUG: DMAC_MODE[%08x]\n",
7639                tr32(DMAC_MODE));
7640
7641         /* GRC block */
7642         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7643                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7644         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7645                tr32(GRC_LOCAL_CTRL));
7646
7647         /* TG3_BDINFOs */
7648         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7649                tr32(RCVDBDI_JUMBO_BD + 0x0),
7650                tr32(RCVDBDI_JUMBO_BD + 0x4),
7651                tr32(RCVDBDI_JUMBO_BD + 0x8),
7652                tr32(RCVDBDI_JUMBO_BD + 0xc));
7653         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7654                tr32(RCVDBDI_STD_BD + 0x0),
7655                tr32(RCVDBDI_STD_BD + 0x4),
7656                tr32(RCVDBDI_STD_BD + 0x8),
7657                tr32(RCVDBDI_STD_BD + 0xc));
7658         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7659                tr32(RCVDBDI_MINI_BD + 0x0),
7660                tr32(RCVDBDI_MINI_BD + 0x4),
7661                tr32(RCVDBDI_MINI_BD + 0x8),
7662                tr32(RCVDBDI_MINI_BD + 0xc));
7663
7664         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7665         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7666         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7667         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7668         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7669                val32, val32_2, val32_3, val32_4);
7670
7671         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7672         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7673         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7674         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7675         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7676                val32, val32_2, val32_3, val32_4);
7677
7678         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7679         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7680         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7681         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7682         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7683         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7684                val32, val32_2, val32_3, val32_4, val32_5);
7685
7686         /* SW status block */
7687         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7688                tp->hw_status->status,
7689                tp->hw_status->status_tag,
7690                tp->hw_status->rx_jumbo_consumer,
7691                tp->hw_status->rx_consumer,
7692                tp->hw_status->rx_mini_consumer,
7693                tp->hw_status->idx[0].rx_producer,
7694                tp->hw_status->idx[0].tx_consumer);
7695
7696         /* SW statistics block */
7697         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7698                ((u32 *)tp->hw_stats)[0],
7699                ((u32 *)tp->hw_stats)[1],
7700                ((u32 *)tp->hw_stats)[2],
7701                ((u32 *)tp->hw_stats)[3]);
7702
7703         /* Mailboxes */
7704         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7705                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7706                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7707                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7708                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7709
7710         /* NIC side send descriptors. */
7711         for (i = 0; i < 6; i++) {
7712                 unsigned long txd;
7713
7714                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7715                         + (i * sizeof(struct tg3_tx_buffer_desc));
7716                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7717                        i,
7718                        readl(txd + 0x0), readl(txd + 0x4),
7719                        readl(txd + 0x8), readl(txd + 0xc));
7720         }
7721
7722         /* NIC side RX descriptors. */
7723         for (i = 0; i < 6; i++) {
7724                 unsigned long rxd;
7725
7726                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7727                         + (i * sizeof(struct tg3_rx_buffer_desc));
7728                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7729                        i,
7730                        readl(rxd + 0x0), readl(rxd + 0x4),
7731                        readl(rxd + 0x8), readl(rxd + 0xc));
7732                 rxd += (4 * sizeof(u32));
7733                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7734                        i,
7735                        readl(rxd + 0x0), readl(rxd + 0x4),
7736                        readl(rxd + 0x8), readl(rxd + 0xc));
7737         }
7738
7739         for (i = 0; i < 6; i++) {
7740                 unsigned long rxd;
7741
7742                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7743                         + (i * sizeof(struct tg3_rx_buffer_desc));
7744                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7745                        i,
7746                        readl(rxd + 0x0), readl(rxd + 0x4),
7747                        readl(rxd + 0x8), readl(rxd + 0xc));
7748                 rxd += (4 * sizeof(u32));
7749                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7750                        i,
7751                        readl(rxd + 0x0), readl(rxd + 0x4),
7752                        readl(rxd + 0x8), readl(rxd + 0xc));
7753         }
7754 }
7755 #endif
7756
7757 static struct net_device_stats *tg3_get_stats(struct net_device *);
7758 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7759
7760 static int tg3_close(struct net_device *dev)
7761 {
7762         struct tg3 *tp = netdev_priv(dev);
7763
7764         napi_disable(&tp->napi);
7765         cancel_work_sync(&tp->reset_task);
7766
7767         netif_stop_queue(dev);
7768
7769         del_timer_sync(&tp->timer);
7770
7771         tg3_full_lock(tp, 1);
7772 #if 0
7773         tg3_dump_state(tp);
7774 #endif
7775
7776         tg3_disable_ints(tp);
7777
7778         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7779         tg3_free_rings(tp);
7780         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7781
7782         tg3_full_unlock(tp);
7783
7784         free_irq(tp->pdev->irq, dev);
7785         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7786                 pci_disable_msi(tp->pdev);
7787                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7788         }
7789
7790         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7791                sizeof(tp->net_stats_prev));
7792         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7793                sizeof(tp->estats_prev));
7794
7795         tg3_free_consistent(tp);
7796
7797         tg3_set_power_state(tp, PCI_D3hot);
7798
7799         netif_carrier_off(tp->dev);
7800
7801         return 0;
7802 }
7803
7804 static inline unsigned long get_stat64(tg3_stat64_t *val)
7805 {
7806         unsigned long ret;
7807
7808 #if (BITS_PER_LONG == 32)
7809         ret = val->low;
7810 #else
7811         ret = ((u64)val->high << 32) | ((u64)val->low);
7812 #endif
7813         return ret;
7814 }
7815
7816 static unsigned long calc_crc_errors(struct tg3 *tp)
7817 {
7818         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7819
7820         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7821             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7822              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7823                 u32 val;
7824
7825                 spin_lock_bh(&tp->lock);
7826                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7827                         tg3_writephy(tp, MII_TG3_TEST1,
7828                                      val | MII_TG3_TEST1_CRC_EN);
7829                         tg3_readphy(tp, 0x14, &val);
7830                 } else
7831                         val = 0;
7832                 spin_unlock_bh(&tp->lock);
7833
7834                 tp->phy_crc_errors += val;
7835
7836                 return tp->phy_crc_errors;
7837         }
7838
7839         return get_stat64(&hw_stats->rx_fcs_errors);
7840 }
7841
7842 #define ESTAT_ADD(member) \
7843         estats->member =        old_estats->member + \
7844                                 get_stat64(&hw_stats->member)
7845
7846 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7847 {
7848         struct tg3_ethtool_stats *estats = &tp->estats;
7849         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7850         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7851
7852         if (!hw_stats)
7853                 return old_estats;
7854
7855         ESTAT_ADD(rx_octets);
7856         ESTAT_ADD(rx_fragments);
7857         ESTAT_ADD(rx_ucast_packets);
7858         ESTAT_ADD(rx_mcast_packets);
7859         ESTAT_ADD(rx_bcast_packets);
7860         ESTAT_ADD(rx_fcs_errors);
7861         ESTAT_ADD(rx_align_errors);
7862         ESTAT_ADD(rx_xon_pause_rcvd);
7863         ESTAT_ADD(rx_xoff_pause_rcvd);
7864         ESTAT_ADD(rx_mac_ctrl_rcvd);
7865         ESTAT_ADD(rx_xoff_entered);
7866         ESTAT_ADD(rx_frame_too_long_errors);
7867         ESTAT_ADD(rx_jabbers);
7868         ESTAT_ADD(rx_undersize_packets);
7869         ESTAT_ADD(rx_in_length_errors);
7870         ESTAT_ADD(rx_out_length_errors);
7871         ESTAT_ADD(rx_64_or_less_octet_packets);
7872         ESTAT_ADD(rx_65_to_127_octet_packets);
7873         ESTAT_ADD(rx_128_to_255_octet_packets);
7874         ESTAT_ADD(rx_256_to_511_octet_packets);
7875         ESTAT_ADD(rx_512_to_1023_octet_packets);
7876         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7877         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7878         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7879         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7880         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7881
7882         ESTAT_ADD(tx_octets);
7883         ESTAT_ADD(tx_collisions);
7884         ESTAT_ADD(tx_xon_sent);
7885         ESTAT_ADD(tx_xoff_sent);
7886         ESTAT_ADD(tx_flow_control);
7887         ESTAT_ADD(tx_mac_errors);
7888         ESTAT_ADD(tx_single_collisions);
7889         ESTAT_ADD(tx_mult_collisions);
7890         ESTAT_ADD(tx_deferred);
7891         ESTAT_ADD(tx_excessive_collisions);
7892         ESTAT_ADD(tx_late_collisions);
7893         ESTAT_ADD(tx_collide_2times);
7894         ESTAT_ADD(tx_collide_3times);
7895         ESTAT_ADD(tx_collide_4times);
7896         ESTAT_ADD(tx_collide_5times);
7897         ESTAT_ADD(tx_collide_6times);
7898         ESTAT_ADD(tx_collide_7times);
7899         ESTAT_ADD(tx_collide_8times);
7900         ESTAT_ADD(tx_collide_9times);
7901         ESTAT_ADD(tx_collide_10times);
7902         ESTAT_ADD(tx_collide_11times);
7903         ESTAT_ADD(tx_collide_12times);
7904         ESTAT_ADD(tx_collide_13times);
7905         ESTAT_ADD(tx_collide_14times);
7906         ESTAT_ADD(tx_collide_15times);
7907         ESTAT_ADD(tx_ucast_packets);
7908         ESTAT_ADD(tx_mcast_packets);
7909         ESTAT_ADD(tx_bcast_packets);
7910         ESTAT_ADD(tx_carrier_sense_errors);
7911         ESTAT_ADD(tx_discards);
7912         ESTAT_ADD(tx_errors);
7913
7914         ESTAT_ADD(dma_writeq_full);
7915         ESTAT_ADD(dma_write_prioq_full);
7916         ESTAT_ADD(rxbds_empty);
7917         ESTAT_ADD(rx_discards);
7918         ESTAT_ADD(rx_errors);
7919         ESTAT_ADD(rx_threshold_hit);
7920
7921         ESTAT_ADD(dma_readq_full);
7922         ESTAT_ADD(dma_read_prioq_full);
7923         ESTAT_ADD(tx_comp_queue_full);
7924
7925         ESTAT_ADD(ring_set_send_prod_index);
7926         ESTAT_ADD(ring_status_update);
7927         ESTAT_ADD(nic_irqs);
7928         ESTAT_ADD(nic_avoided_irqs);
7929         ESTAT_ADD(nic_tx_threshold_hit);
7930
7931         return estats;
7932 }
7933
7934 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7935 {
7936         struct tg3 *tp = netdev_priv(dev);
7937         struct net_device_stats *stats = &tp->net_stats;
7938         struct net_device_stats *old_stats = &tp->net_stats_prev;
7939         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7940
7941         if (!hw_stats)
7942                 return old_stats;
7943
7944         stats->rx_packets = old_stats->rx_packets +
7945                 get_stat64(&hw_stats->rx_ucast_packets) +
7946                 get_stat64(&hw_stats->rx_mcast_packets) +
7947                 get_stat64(&hw_stats->rx_bcast_packets);
7948
7949         stats->tx_packets = old_stats->tx_packets +
7950                 get_stat64(&hw_stats->tx_ucast_packets) +
7951                 get_stat64(&hw_stats->tx_mcast_packets) +
7952                 get_stat64(&hw_stats->tx_bcast_packets);
7953
7954         stats->rx_bytes = old_stats->rx_bytes +
7955                 get_stat64(&hw_stats->rx_octets);
7956         stats->tx_bytes = old_stats->tx_bytes +
7957                 get_stat64(&hw_stats->tx_octets);
7958
7959         stats->rx_errors = old_stats->rx_errors +
7960                 get_stat64(&hw_stats->rx_errors);
7961         stats->tx_errors = old_stats->tx_errors +
7962                 get_stat64(&hw_stats->tx_errors) +
7963                 get_stat64(&hw_stats->tx_mac_errors) +
7964                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7965                 get_stat64(&hw_stats->tx_discards);
7966
7967         stats->multicast = old_stats->multicast +
7968                 get_stat64(&hw_stats->rx_mcast_packets);
7969         stats->collisions = old_stats->collisions +
7970                 get_stat64(&hw_stats->tx_collisions);
7971
7972         stats->rx_length_errors = old_stats->rx_length_errors +
7973                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7974                 get_stat64(&hw_stats->rx_undersize_packets);
7975
7976         stats->rx_over_errors = old_stats->rx_over_errors +
7977                 get_stat64(&hw_stats->rxbds_empty);
7978         stats->rx_frame_errors = old_stats->rx_frame_errors +
7979                 get_stat64(&hw_stats->rx_align_errors);
7980         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7981                 get_stat64(&hw_stats->tx_discards);
7982         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7983                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7984
7985         stats->rx_crc_errors = old_stats->rx_crc_errors +
7986                 calc_crc_errors(tp);
7987
7988         stats->rx_missed_errors = old_stats->rx_missed_errors +
7989                 get_stat64(&hw_stats->rx_discards);
7990
7991         return stats;
7992 }
7993
7994 static inline u32 calc_crc(unsigned char *buf, int len)
7995 {
7996         u32 reg;
7997         u32 tmp;
7998         int j, k;
7999
8000         reg = 0xffffffff;
8001
8002         for (j = 0; j < len; j++) {
8003                 reg ^= buf[j];
8004
8005                 for (k = 0; k < 8; k++) {
8006                         tmp = reg & 0x01;
8007
8008                         reg >>= 1;
8009
8010                         if (tmp) {
8011                                 reg ^= 0xedb88320;
8012                         }
8013                 }
8014         }
8015
8016         return ~reg;
8017 }
8018
8019 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8020 {
8021         /* accept or reject all multicast frames */
8022         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8023         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8024         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8025         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8026 }
8027
8028 static void __tg3_set_rx_mode(struct net_device *dev)
8029 {
8030         struct tg3 *tp = netdev_priv(dev);
8031         u32 rx_mode;
8032
8033         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8034                                   RX_MODE_KEEP_VLAN_TAG);
8035
8036         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8037          * flag clear.
8038          */
8039 #if TG3_VLAN_TAG_USED
8040         if (!tp->vlgrp &&
8041             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8042                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8043 #else
8044         /* By definition, VLAN is disabled always in this
8045          * case.
8046          */
8047         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8048                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8049 #endif
8050
8051         if (dev->flags & IFF_PROMISC) {
8052                 /* Promiscuous mode. */
8053                 rx_mode |= RX_MODE_PROMISC;
8054         } else if (dev->flags & IFF_ALLMULTI) {
8055                 /* Accept all multicast. */
8056                 tg3_set_multi (tp, 1);
8057         } else if (dev->mc_count < 1) {
8058                 /* Reject all multicast. */
8059                 tg3_set_multi (tp, 0);
8060         } else {
8061                 /* Accept one or more multicast(s). */
8062                 struct dev_mc_list *mclist;
8063                 unsigned int i;
8064                 u32 mc_filter[4] = { 0, };
8065                 u32 regidx;
8066                 u32 bit;
8067                 u32 crc;
8068
8069                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8070                      i++, mclist = mclist->next) {
8071
8072                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8073                         bit = ~crc & 0x7f;
8074                         regidx = (bit & 0x60) >> 5;
8075                         bit &= 0x1f;
8076                         mc_filter[regidx] |= (1 << bit);
8077                 }
8078
8079                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8080                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8081                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8082                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8083         }
8084
8085         if (rx_mode != tp->rx_mode) {
8086                 tp->rx_mode = rx_mode;
8087                 tw32_f(MAC_RX_MODE, rx_mode);
8088                 udelay(10);
8089         }
8090 }
8091
8092 static void tg3_set_rx_mode(struct net_device *dev)
8093 {
8094         struct tg3 *tp = netdev_priv(dev);
8095
8096         if (!netif_running(dev))
8097                 return;
8098
8099         tg3_full_lock(tp, 0);
8100         __tg3_set_rx_mode(dev);
8101         tg3_full_unlock(tp);
8102 }
8103
8104 #define TG3_REGDUMP_LEN         (32 * 1024)
8105
8106 static int tg3_get_regs_len(struct net_device *dev)
8107 {
8108         return TG3_REGDUMP_LEN;
8109 }
8110
8111 static void tg3_get_regs(struct net_device *dev,
8112                 struct ethtool_regs *regs, void *_p)
8113 {
8114         u32 *p = _p;
8115         struct tg3 *tp = netdev_priv(dev);
8116         u8 *orig_p = _p;
8117         int i;
8118
8119         regs->version = 0;
8120
8121         memset(p, 0, TG3_REGDUMP_LEN);
8122
8123         if (tp->link_config.phy_is_low_power)
8124                 return;
8125
8126         tg3_full_lock(tp, 0);
8127
8128 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8129 #define GET_REG32_LOOP(base,len)                \
8130 do {    p = (u32 *)(orig_p + (base));           \
8131         for (i = 0; i < len; i += 4)            \
8132                 __GET_REG32((base) + i);        \
8133 } while (0)
8134 #define GET_REG32_1(reg)                        \
8135 do {    p = (u32 *)(orig_p + (reg));            \
8136         __GET_REG32((reg));                     \
8137 } while (0)
8138
8139         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8140         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8141         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8142         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8143         GET_REG32_1(SNDDATAC_MODE);
8144         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8145         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8146         GET_REG32_1(SNDBDC_MODE);
8147         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8148         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8149         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8150         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8151         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8152         GET_REG32_1(RCVDCC_MODE);
8153         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8154         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8155         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8156         GET_REG32_1(MBFREE_MODE);
8157         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8158         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8159         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8160         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8161         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8162         GET_REG32_1(RX_CPU_MODE);
8163         GET_REG32_1(RX_CPU_STATE);
8164         GET_REG32_1(RX_CPU_PGMCTR);
8165         GET_REG32_1(RX_CPU_HWBKPT);
8166         GET_REG32_1(TX_CPU_MODE);
8167         GET_REG32_1(TX_CPU_STATE);
8168         GET_REG32_1(TX_CPU_PGMCTR);
8169         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8170         GET_REG32_LOOP(FTQ_RESET, 0x120);
8171         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8172         GET_REG32_1(DMAC_MODE);
8173         GET_REG32_LOOP(GRC_MODE, 0x4c);
8174         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8175                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8176
8177 #undef __GET_REG32
8178 #undef GET_REG32_LOOP
8179 #undef GET_REG32_1
8180
8181         tg3_full_unlock(tp);
8182 }
8183
8184 static int tg3_get_eeprom_len(struct net_device *dev)
8185 {
8186         struct tg3 *tp = netdev_priv(dev);
8187
8188         return tp->nvram_size;
8189 }
8190
8191 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8192 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8193
8194 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8195 {
8196         struct tg3 *tp = netdev_priv(dev);
8197         int ret;
8198         u8  *pd;
8199         u32 i, offset, len, val, b_offset, b_count;
8200
8201         if (tp->link_config.phy_is_low_power)
8202                 return -EAGAIN;
8203
8204         offset = eeprom->offset;
8205         len = eeprom->len;
8206         eeprom->len = 0;
8207
8208         eeprom->magic = TG3_EEPROM_MAGIC;
8209
8210         if (offset & 3) {
8211                 /* adjustments to start on required 4 byte boundary */
8212                 b_offset = offset & 3;
8213                 b_count = 4 - b_offset;
8214                 if (b_count > len) {
8215                         /* i.e. offset=1 len=2 */
8216                         b_count = len;
8217                 }
8218                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
8219                 if (ret)
8220                         return ret;
8221                 val = cpu_to_le32(val);
8222                 memcpy(data, ((char*)&val) + b_offset, b_count);
8223                 len -= b_count;
8224                 offset += b_count;
8225                 eeprom->len += b_count;
8226         }
8227
8228         /* read bytes upto the last 4 byte boundary */
8229         pd = &data[eeprom->len];
8230         for (i = 0; i < (len - (len & 3)); i += 4) {
8231                 ret = tg3_nvram_read(tp, offset + i, &val);
8232                 if (ret) {
8233                         eeprom->len += i;
8234                         return ret;
8235                 }
8236                 val = cpu_to_le32(val);
8237                 memcpy(pd + i, &val, 4);
8238         }
8239         eeprom->len += i;
8240
8241         if (len & 3) {
8242                 /* read last bytes not ending on 4 byte boundary */
8243                 pd = &data[eeprom->len];
8244                 b_count = len & 3;
8245                 b_offset = offset + len - b_count;
8246                 ret = tg3_nvram_read(tp, b_offset, &val);
8247                 if (ret)
8248                         return ret;
8249                 val = cpu_to_le32(val);
8250                 memcpy(pd, ((char*)&val), b_count);
8251                 eeprom->len += b_count;
8252         }
8253         return 0;
8254 }
8255
8256 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8257
8258 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8259 {
8260         struct tg3 *tp = netdev_priv(dev);
8261         int ret;
8262         u32 offset, len, b_offset, odd_len, start, end;
8263         u8 *buf;
8264
8265         if (tp->link_config.phy_is_low_power)
8266                 return -EAGAIN;
8267
8268         if (eeprom->magic != TG3_EEPROM_MAGIC)
8269                 return -EINVAL;
8270
8271         offset = eeprom->offset;
8272         len = eeprom->len;
8273
8274         if ((b_offset = (offset & 3))) {
8275                 /* adjustments to start on required 4 byte boundary */
8276                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
8277                 if (ret)
8278                         return ret;
8279                 start = cpu_to_le32(start);
8280                 len += b_offset;
8281                 offset &= ~3;
8282                 if (len < 4)
8283                         len = 4;
8284         }
8285
8286         odd_len = 0;
8287         if (len & 3) {
8288                 /* adjustments to end on required 4 byte boundary */
8289                 odd_len = 1;
8290                 len = (len + 3) & ~3;
8291                 ret = tg3_nvram_read(tp, offset+len-4, &end);
8292                 if (ret)
8293                         return ret;
8294                 end = cpu_to_le32(end);
8295         }
8296
8297         buf = data;
8298         if (b_offset || odd_len) {
8299                 buf = kmalloc(len, GFP_KERNEL);
8300                 if (!buf)
8301                         return -ENOMEM;
8302                 if (b_offset)
8303                         memcpy(buf, &start, 4);
8304                 if (odd_len)
8305                         memcpy(buf+len-4, &end, 4);
8306                 memcpy(buf + b_offset, data, eeprom->len);
8307         }
8308
8309         ret = tg3_nvram_write_block(tp, offset, len, buf);
8310
8311         if (buf != data)
8312                 kfree(buf);
8313
8314         return ret;
8315 }
8316
8317 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8318 {
8319         struct tg3 *tp = netdev_priv(dev);
8320
8321         cmd->supported = (SUPPORTED_Autoneg);
8322
8323         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8324                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8325                                    SUPPORTED_1000baseT_Full);
8326
8327         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8328                 cmd->supported |= (SUPPORTED_100baseT_Half |
8329                                   SUPPORTED_100baseT_Full |
8330                                   SUPPORTED_10baseT_Half |
8331                                   SUPPORTED_10baseT_Full |
8332                                   SUPPORTED_MII);
8333                 cmd->port = PORT_TP;
8334         } else {
8335                 cmd->supported |= SUPPORTED_FIBRE;
8336                 cmd->port = PORT_FIBRE;
8337         }
8338
8339         cmd->advertising = tp->link_config.advertising;
8340         if (netif_running(dev)) {
8341                 cmd->speed = tp->link_config.active_speed;
8342                 cmd->duplex = tp->link_config.active_duplex;
8343         }
8344         cmd->phy_address = PHY_ADDR;
8345         cmd->transceiver = 0;
8346         cmd->autoneg = tp->link_config.autoneg;
8347         cmd->maxtxpkt = 0;
8348         cmd->maxrxpkt = 0;
8349         return 0;
8350 }
8351
8352 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8353 {
8354         struct tg3 *tp = netdev_priv(dev);
8355
8356         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8357                 /* These are the only valid advertisement bits allowed.  */
8358                 if (cmd->autoneg == AUTONEG_ENABLE &&
8359                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8360                                           ADVERTISED_1000baseT_Full |
8361                                           ADVERTISED_Autoneg |
8362                                           ADVERTISED_FIBRE)))
8363                         return -EINVAL;
8364                 /* Fiber can only do SPEED_1000.  */
8365                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8366                          (cmd->speed != SPEED_1000))
8367                         return -EINVAL;
8368         /* Copper cannot force SPEED_1000.  */
8369         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8370                    (cmd->speed == SPEED_1000))
8371                 return -EINVAL;
8372         else if ((cmd->speed == SPEED_1000) &&
8373                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8374                 return -EINVAL;
8375
8376         tg3_full_lock(tp, 0);
8377
8378         tp->link_config.autoneg = cmd->autoneg;
8379         if (cmd->autoneg == AUTONEG_ENABLE) {
8380                 tp->link_config.advertising = (cmd->advertising |
8381                                               ADVERTISED_Autoneg);
8382                 tp->link_config.speed = SPEED_INVALID;
8383                 tp->link_config.duplex = DUPLEX_INVALID;
8384         } else {
8385                 tp->link_config.advertising = 0;
8386                 tp->link_config.speed = cmd->speed;
8387                 tp->link_config.duplex = cmd->duplex;
8388         }
8389
8390         tp->link_config.orig_speed = tp->link_config.speed;
8391         tp->link_config.orig_duplex = tp->link_config.duplex;
8392         tp->link_config.orig_autoneg = tp->link_config.autoneg;
8393
8394         if (netif_running(dev))
8395                 tg3_setup_phy(tp, 1);
8396
8397         tg3_full_unlock(tp);
8398
8399         return 0;
8400 }
8401
8402 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8403 {
8404         struct tg3 *tp = netdev_priv(dev);
8405
8406         strcpy(info->driver, DRV_MODULE_NAME);
8407         strcpy(info->version, DRV_MODULE_VERSION);
8408         strcpy(info->fw_version, tp->fw_ver);
8409         strcpy(info->bus_info, pci_name(tp->pdev));
8410 }
8411
8412 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8413 {
8414         struct tg3 *tp = netdev_priv(dev);
8415
8416         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8417                 wol->supported = WAKE_MAGIC;
8418         else
8419                 wol->supported = 0;
8420         wol->wolopts = 0;
8421         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8422                 wol->wolopts = WAKE_MAGIC;
8423         memset(&wol->sopass, 0, sizeof(wol->sopass));
8424 }
8425
8426 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8427 {
8428         struct tg3 *tp = netdev_priv(dev);
8429
8430         if (wol->wolopts & ~WAKE_MAGIC)
8431                 return -EINVAL;
8432         if ((wol->wolopts & WAKE_MAGIC) &&
8433             !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8434                 return -EINVAL;
8435
8436         spin_lock_bh(&tp->lock);
8437         if (wol->wolopts & WAKE_MAGIC)
8438                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8439         else
8440                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8441         spin_unlock_bh(&tp->lock);
8442
8443         return 0;
8444 }
8445
8446 static u32 tg3_get_msglevel(struct net_device *dev)
8447 {
8448         struct tg3 *tp = netdev_priv(dev);
8449         return tp->msg_enable;
8450 }
8451
8452 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8453 {
8454         struct tg3 *tp = netdev_priv(dev);
8455         tp->msg_enable = value;
8456 }
8457
8458 static int tg3_set_tso(struct net_device *dev, u32 value)
8459 {
8460         struct tg3 *tp = netdev_priv(dev);
8461
8462         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8463                 if (value)
8464                         return -EINVAL;
8465                 return 0;
8466         }
8467         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8468             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8469                 if (value) {
8470                         dev->features |= NETIF_F_TSO6;
8471                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8472                                 dev->features |= NETIF_F_TSO_ECN;
8473                 } else
8474                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8475         }
8476         return ethtool_op_set_tso(dev, value);
8477 }
8478
8479 static int tg3_nway_reset(struct net_device *dev)
8480 {
8481         struct tg3 *tp = netdev_priv(dev);
8482         u32 bmcr;
8483         int r;
8484
8485         if (!netif_running(dev))
8486                 return -EAGAIN;
8487
8488         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8489                 return -EINVAL;
8490
8491         spin_lock_bh(&tp->lock);
8492         r = -EINVAL;
8493         tg3_readphy(tp, MII_BMCR, &bmcr);
8494         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8495             ((bmcr & BMCR_ANENABLE) ||
8496              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8497                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8498                                            BMCR_ANENABLE);
8499                 r = 0;
8500         }
8501         spin_unlock_bh(&tp->lock);
8502
8503         return r;
8504 }
8505
8506 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8507 {
8508         struct tg3 *tp = netdev_priv(dev);
8509
8510         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8511         ering->rx_mini_max_pending = 0;
8512         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8513                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8514         else
8515                 ering->rx_jumbo_max_pending = 0;
8516
8517         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8518
8519         ering->rx_pending = tp->rx_pending;
8520         ering->rx_mini_pending = 0;
8521         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8522                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8523         else
8524                 ering->rx_jumbo_pending = 0;
8525
8526         ering->tx_pending = tp->tx_pending;
8527 }
8528
8529 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8530 {
8531         struct tg3 *tp = netdev_priv(dev);
8532         int irq_sync = 0, err = 0;
8533
8534         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8535             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8536             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8537             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8538             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8539              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8540                 return -EINVAL;
8541
8542         if (netif_running(dev)) {
8543                 tg3_netif_stop(tp);
8544                 irq_sync = 1;
8545         }
8546
8547         tg3_full_lock(tp, irq_sync);
8548
8549         tp->rx_pending = ering->rx_pending;
8550
8551         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8552             tp->rx_pending > 63)
8553                 tp->rx_pending = 63;
8554         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8555         tp->tx_pending = ering->tx_pending;
8556
8557         if (netif_running(dev)) {
8558                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8559                 err = tg3_restart_hw(tp, 1);
8560                 if (!err)
8561                         tg3_netif_start(tp);
8562         }
8563
8564         tg3_full_unlock(tp);
8565
8566         return err;
8567 }
8568
8569 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8570 {
8571         struct tg3 *tp = netdev_priv(dev);
8572
8573         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8574         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8575         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8576 }
8577
8578 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8579 {
8580         struct tg3 *tp = netdev_priv(dev);
8581         int irq_sync = 0, err = 0;
8582
8583         if (netif_running(dev)) {
8584                 tg3_netif_stop(tp);
8585                 irq_sync = 1;
8586         }
8587
8588         tg3_full_lock(tp, irq_sync);
8589
8590         if (epause->autoneg)
8591                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8592         else
8593                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8594         if (epause->rx_pause)
8595                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8596         else
8597                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8598         if (epause->tx_pause)
8599                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8600         else
8601                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8602
8603         if (netif_running(dev)) {
8604                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8605                 err = tg3_restart_hw(tp, 1);
8606                 if (!err)
8607                         tg3_netif_start(tp);
8608         }
8609
8610         tg3_full_unlock(tp);
8611
8612         return err;
8613 }
8614
8615 static u32 tg3_get_rx_csum(struct net_device *dev)
8616 {
8617         struct tg3 *tp = netdev_priv(dev);
8618         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8619 }
8620
8621 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8622 {
8623         struct tg3 *tp = netdev_priv(dev);
8624
8625         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8626                 if (data != 0)
8627                         return -EINVAL;
8628                 return 0;
8629         }
8630
8631         spin_lock_bh(&tp->lock);
8632         if (data)
8633                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8634         else
8635                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8636         spin_unlock_bh(&tp->lock);
8637
8638         return 0;
8639 }
8640
8641 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8642 {
8643         struct tg3 *tp = netdev_priv(dev);
8644
8645         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8646                 if (data != 0)
8647                         return -EINVAL;
8648                 return 0;
8649         }
8650
8651         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8652             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8653             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8654             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8655                 ethtool_op_set_tx_ipv6_csum(dev, data);
8656         else
8657                 ethtool_op_set_tx_csum(dev, data);
8658
8659         return 0;
8660 }
8661
8662 static int tg3_get_sset_count (struct net_device *dev, int sset)
8663 {
8664         switch (sset) {
8665         case ETH_SS_TEST:
8666                 return TG3_NUM_TEST;
8667         case ETH_SS_STATS:
8668                 return TG3_NUM_STATS;
8669         default:
8670                 return -EOPNOTSUPP;
8671         }
8672 }
8673
8674 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8675 {
8676         switch (stringset) {
8677         case ETH_SS_STATS:
8678                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8679                 break;
8680         case ETH_SS_TEST:
8681                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8682                 break;
8683         default:
8684                 WARN_ON(1);     /* we need a WARN() */
8685                 break;
8686         }
8687 }
8688
8689 static int tg3_phys_id(struct net_device *dev, u32 data)
8690 {
8691         struct tg3 *tp = netdev_priv(dev);
8692         int i;
8693
8694         if (!netif_running(tp->dev))
8695                 return -EAGAIN;
8696
8697         if (data == 0)
8698                 data = 2;
8699
8700         for (i = 0; i < (data * 2); i++) {
8701                 if ((i % 2) == 0)
8702                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8703                                            LED_CTRL_1000MBPS_ON |
8704                                            LED_CTRL_100MBPS_ON |
8705                                            LED_CTRL_10MBPS_ON |
8706                                            LED_CTRL_TRAFFIC_OVERRIDE |
8707                                            LED_CTRL_TRAFFIC_BLINK |
8708                                            LED_CTRL_TRAFFIC_LED);
8709
8710                 else
8711                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8712                                            LED_CTRL_TRAFFIC_OVERRIDE);
8713
8714                 if (msleep_interruptible(500))
8715                         break;
8716         }
8717         tw32(MAC_LED_CTRL, tp->led_ctrl);
8718         return 0;
8719 }
8720
8721 static void tg3_get_ethtool_stats (struct net_device *dev,
8722                                    struct ethtool_stats *estats, u64 *tmp_stats)
8723 {
8724         struct tg3 *tp = netdev_priv(dev);
8725         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8726 }
8727
8728 #define NVRAM_TEST_SIZE 0x100
8729 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
8730 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
8731 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
8732 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8733 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8734
8735 static int tg3_test_nvram(struct tg3 *tp)
8736 {
8737         u32 *buf, csum, magic;
8738         int i, j, k, err = 0, size;
8739
8740         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8741                 return -EIO;
8742
8743         if (magic == TG3_EEPROM_MAGIC)
8744                 size = NVRAM_TEST_SIZE;
8745         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8746                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
8747                     TG3_EEPROM_SB_FORMAT_1) {
8748                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
8749                         case TG3_EEPROM_SB_REVISION_0:
8750                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
8751                                 break;
8752                         case TG3_EEPROM_SB_REVISION_2:
8753                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
8754                                 break;
8755                         case TG3_EEPROM_SB_REVISION_3:
8756                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
8757                                 break;
8758                         default:
8759                                 return 0;
8760                         }
8761                 } else
8762                         return 0;
8763         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8764                 size = NVRAM_SELFBOOT_HW_SIZE;
8765         else
8766                 return -EIO;
8767
8768         buf = kmalloc(size, GFP_KERNEL);
8769         if (buf == NULL)
8770                 return -ENOMEM;
8771
8772         err = -EIO;
8773         for (i = 0, j = 0; i < size; i += 4, j++) {
8774                 u32 val;
8775
8776                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8777                         break;
8778                 buf[j] = cpu_to_le32(val);
8779         }
8780         if (i < size)
8781                 goto out;
8782
8783         /* Selfboot format */
8784         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
8785             TG3_EEPROM_MAGIC_FW) {
8786                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8787
8788                 if ((cpu_to_be32(buf[0]) & TG3_EEPROM_SB_REVISION_MASK) ==
8789                     TG3_EEPROM_SB_REVISION_2) {
8790                         /* For rev 2, the csum doesn't include the MBA. */
8791                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
8792                                 csum8 += buf8[i];
8793                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
8794                                 csum8 += buf8[i];
8795                 } else {
8796                         for (i = 0; i < size; i++)
8797                                 csum8 += buf8[i];
8798                 }
8799
8800                 if (csum8 == 0) {
8801                         err = 0;
8802                         goto out;
8803                 }
8804
8805                 err = -EIO;
8806                 goto out;
8807         }
8808
8809         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
8810             TG3_EEPROM_MAGIC_HW) {
8811                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8812                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8813                 u8 *buf8 = (u8 *) buf;
8814
8815                 /* Separate the parity bits and the data bytes.  */
8816                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8817                         if ((i == 0) || (i == 8)) {
8818                                 int l;
8819                                 u8 msk;
8820
8821                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8822                                         parity[k++] = buf8[i] & msk;
8823                                 i++;
8824                         }
8825                         else if (i == 16) {
8826                                 int l;
8827                                 u8 msk;
8828
8829                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8830                                         parity[k++] = buf8[i] & msk;
8831                                 i++;
8832
8833                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8834                                         parity[k++] = buf8[i] & msk;
8835                                 i++;
8836                         }
8837                         data[j++] = buf8[i];
8838                 }
8839
8840                 err = -EIO;
8841                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8842                         u8 hw8 = hweight8(data[i]);
8843
8844                         if ((hw8 & 0x1) && parity[i])
8845                                 goto out;
8846                         else if (!(hw8 & 0x1) && !parity[i])
8847                                 goto out;
8848                 }
8849                 err = 0;
8850                 goto out;
8851         }
8852
8853         /* Bootstrap checksum at offset 0x10 */
8854         csum = calc_crc((unsigned char *) buf, 0x10);
8855         if(csum != cpu_to_le32(buf[0x10/4]))
8856                 goto out;
8857
8858         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8859         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8860         if (csum != cpu_to_le32(buf[0xfc/4]))
8861                  goto out;
8862
8863         err = 0;
8864
8865 out:
8866         kfree(buf);
8867         return err;
8868 }
8869
8870 #define TG3_SERDES_TIMEOUT_SEC  2
8871 #define TG3_COPPER_TIMEOUT_SEC  6
8872
8873 static int tg3_test_link(struct tg3 *tp)
8874 {
8875         int i, max;
8876
8877         if (!netif_running(tp->dev))
8878                 return -ENODEV;
8879
8880         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8881                 max = TG3_SERDES_TIMEOUT_SEC;
8882         else
8883                 max = TG3_COPPER_TIMEOUT_SEC;
8884
8885         for (i = 0; i < max; i++) {
8886                 if (netif_carrier_ok(tp->dev))
8887                         return 0;
8888
8889                 if (msleep_interruptible(1000))
8890                         break;
8891         }
8892
8893         return -EIO;
8894 }
8895
8896 /* Only test the commonly used registers */
8897 static int tg3_test_registers(struct tg3 *tp)
8898 {
8899         int i, is_5705, is_5750;
8900         u32 offset, read_mask, write_mask, val, save_val, read_val;
8901         static struct {
8902                 u16 offset;
8903                 u16 flags;
8904 #define TG3_FL_5705     0x1
8905 #define TG3_FL_NOT_5705 0x2
8906 #define TG3_FL_NOT_5788 0x4
8907 #define TG3_FL_NOT_5750 0x8
8908                 u32 read_mask;
8909                 u32 write_mask;
8910         } reg_tbl[] = {
8911                 /* MAC Control Registers */
8912                 { MAC_MODE, TG3_FL_NOT_5705,
8913                         0x00000000, 0x00ef6f8c },
8914                 { MAC_MODE, TG3_FL_5705,
8915                         0x00000000, 0x01ef6b8c },
8916                 { MAC_STATUS, TG3_FL_NOT_5705,
8917                         0x03800107, 0x00000000 },
8918                 { MAC_STATUS, TG3_FL_5705,
8919                         0x03800100, 0x00000000 },
8920                 { MAC_ADDR_0_HIGH, 0x0000,
8921                         0x00000000, 0x0000ffff },
8922                 { MAC_ADDR_0_LOW, 0x0000,
8923                         0x00000000, 0xffffffff },
8924                 { MAC_RX_MTU_SIZE, 0x0000,
8925                         0x00000000, 0x0000ffff },
8926                 { MAC_TX_MODE, 0x0000,
8927                         0x00000000, 0x00000070 },
8928                 { MAC_TX_LENGTHS, 0x0000,
8929                         0x00000000, 0x00003fff },
8930                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8931                         0x00000000, 0x000007fc },
8932                 { MAC_RX_MODE, TG3_FL_5705,
8933                         0x00000000, 0x000007dc },
8934                 { MAC_HASH_REG_0, 0x0000,
8935                         0x00000000, 0xffffffff },
8936                 { MAC_HASH_REG_1, 0x0000,
8937                         0x00000000, 0xffffffff },
8938                 { MAC_HASH_REG_2, 0x0000,
8939                         0x00000000, 0xffffffff },
8940                 { MAC_HASH_REG_3, 0x0000,
8941                         0x00000000, 0xffffffff },
8942
8943                 /* Receive Data and Receive BD Initiator Control Registers. */
8944                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8945                         0x00000000, 0xffffffff },
8946                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8947                         0x00000000, 0xffffffff },
8948                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8949                         0x00000000, 0x00000003 },
8950                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8951                         0x00000000, 0xffffffff },
8952                 { RCVDBDI_STD_BD+0, 0x0000,
8953                         0x00000000, 0xffffffff },
8954                 { RCVDBDI_STD_BD+4, 0x0000,
8955                         0x00000000, 0xffffffff },
8956                 { RCVDBDI_STD_BD+8, 0x0000,
8957                         0x00000000, 0xffff0002 },
8958                 { RCVDBDI_STD_BD+0xc, 0x0000,
8959                         0x00000000, 0xffffffff },
8960
8961                 /* Receive BD Initiator Control Registers. */
8962                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8963                         0x00000000, 0xffffffff },
8964                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8965                         0x00000000, 0x000003ff },
8966                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8967                         0x00000000, 0xffffffff },
8968
8969                 /* Host Coalescing Control Registers. */
8970                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8971                         0x00000000, 0x00000004 },
8972                 { HOSTCC_MODE, TG3_FL_5705,
8973                         0x00000000, 0x000000f6 },
8974                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8975                         0x00000000, 0xffffffff },
8976                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8977                         0x00000000, 0x000003ff },
8978                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8979                         0x00000000, 0xffffffff },
8980                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8981                         0x00000000, 0x000003ff },
8982                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8983                         0x00000000, 0xffffffff },
8984                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8985                         0x00000000, 0x000000ff },
8986                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8987                         0x00000000, 0xffffffff },
8988                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8989                         0x00000000, 0x000000ff },
8990                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8991                         0x00000000, 0xffffffff },
8992                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8993                         0x00000000, 0xffffffff },
8994                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8995                         0x00000000, 0xffffffff },
8996                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8997                         0x00000000, 0x000000ff },
8998                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8999                         0x00000000, 0xffffffff },
9000                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9001                         0x00000000, 0x000000ff },
9002                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9003                         0x00000000, 0xffffffff },
9004                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9005                         0x00000000, 0xffffffff },
9006                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9007                         0x00000000, 0xffffffff },
9008                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9009                         0x00000000, 0xffffffff },
9010                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9011                         0x00000000, 0xffffffff },
9012                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9013                         0xffffffff, 0x00000000 },
9014                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9015                         0xffffffff, 0x00000000 },
9016
9017                 /* Buffer Manager Control Registers. */
9018                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9019                         0x00000000, 0x007fff80 },
9020                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9021                         0x00000000, 0x007fffff },
9022                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9023                         0x00000000, 0x0000003f },
9024                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9025                         0x00000000, 0x000001ff },
9026                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9027                         0x00000000, 0x000001ff },
9028                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9029                         0xffffffff, 0x00000000 },
9030                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9031                         0xffffffff, 0x00000000 },
9032
9033                 /* Mailbox Registers */
9034                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9035                         0x00000000, 0x000001ff },
9036                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9037                         0x00000000, 0x000001ff },
9038                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9039                         0x00000000, 0x000007ff },
9040                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9041                         0x00000000, 0x000001ff },
9042
9043                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9044         };
9045
9046         is_5705 = is_5750 = 0;
9047         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9048                 is_5705 = 1;
9049                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9050                         is_5750 = 1;
9051         }
9052
9053         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9054                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9055                         continue;
9056
9057                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9058                         continue;
9059
9060                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9061                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9062                         continue;
9063
9064                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9065                         continue;
9066
9067                 offset = (u32) reg_tbl[i].offset;
9068                 read_mask = reg_tbl[i].read_mask;
9069                 write_mask = reg_tbl[i].write_mask;
9070
9071                 /* Save the original register content */
9072                 save_val = tr32(offset);
9073
9074                 /* Determine the read-only value. */
9075                 read_val = save_val & read_mask;
9076
9077                 /* Write zero to the register, then make sure the read-only bits
9078                  * are not changed and the read/write bits are all zeros.
9079                  */
9080                 tw32(offset, 0);
9081
9082                 val = tr32(offset);
9083
9084                 /* Test the read-only and read/write bits. */
9085                 if (((val & read_mask) != read_val) || (val & write_mask))
9086                         goto out;
9087
9088                 /* Write ones to all the bits defined by RdMask and WrMask, then
9089                  * make sure the read-only bits are not changed and the
9090                  * read/write bits are all ones.
9091                  */
9092                 tw32(offset, read_mask | write_mask);
9093
9094                 val = tr32(offset);
9095
9096                 /* Test the read-only bits. */
9097                 if ((val & read_mask) != read_val)
9098                         goto out;
9099
9100                 /* Test the read/write bits. */
9101                 if ((val & write_mask) != write_mask)
9102                         goto out;
9103
9104                 tw32(offset, save_val);
9105         }
9106
9107         return 0;
9108
9109 out:
9110         if (netif_msg_hw(tp))
9111                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9112                        offset);
9113         tw32(offset, save_val);
9114         return -EIO;
9115 }
9116
9117 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9118 {
9119         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9120         int i;
9121         u32 j;
9122
9123         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9124                 for (j = 0; j < len; j += 4) {
9125                         u32 val;
9126
9127                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9128                         tg3_read_mem(tp, offset + j, &val);
9129                         if (val != test_pattern[i])
9130                                 return -EIO;
9131                 }
9132         }
9133         return 0;
9134 }
9135
9136 static int tg3_test_memory(struct tg3 *tp)
9137 {
9138         static struct mem_entry {
9139                 u32 offset;
9140                 u32 len;
9141         } mem_tbl_570x[] = {
9142                 { 0x00000000, 0x00b50},
9143                 { 0x00002000, 0x1c000},
9144                 { 0xffffffff, 0x00000}
9145         }, mem_tbl_5705[] = {
9146                 { 0x00000100, 0x0000c},
9147                 { 0x00000200, 0x00008},
9148                 { 0x00004000, 0x00800},
9149                 { 0x00006000, 0x01000},
9150                 { 0x00008000, 0x02000},
9151                 { 0x00010000, 0x0e000},
9152                 { 0xffffffff, 0x00000}
9153         }, mem_tbl_5755[] = {
9154                 { 0x00000200, 0x00008},
9155                 { 0x00004000, 0x00800},
9156                 { 0x00006000, 0x00800},
9157                 { 0x00008000, 0x02000},
9158                 { 0x00010000, 0x0c000},
9159                 { 0xffffffff, 0x00000}
9160         }, mem_tbl_5906[] = {
9161                 { 0x00000200, 0x00008},
9162                 { 0x00004000, 0x00400},
9163                 { 0x00006000, 0x00400},
9164                 { 0x00008000, 0x01000},
9165                 { 0x00010000, 0x01000},
9166                 { 0xffffffff, 0x00000}
9167         };
9168         struct mem_entry *mem_tbl;
9169         int err = 0;
9170         int i;
9171
9172         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9173                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9174                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9175                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9176                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9177                         mem_tbl = mem_tbl_5755;
9178                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9179                         mem_tbl = mem_tbl_5906;
9180                 else
9181                         mem_tbl = mem_tbl_5705;
9182         } else
9183                 mem_tbl = mem_tbl_570x;
9184
9185         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9186                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9187                     mem_tbl[i].len)) != 0)
9188                         break;
9189         }
9190
9191         return err;
9192 }
9193
9194 #define TG3_MAC_LOOPBACK        0
9195 #define TG3_PHY_LOOPBACK        1
9196
9197 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9198 {
9199         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9200         u32 desc_idx;
9201         struct sk_buff *skb, *rx_skb;
9202         u8 *tx_data;
9203         dma_addr_t map;
9204         int num_pkts, tx_len, rx_len, i, err;
9205         struct tg3_rx_buffer_desc *desc;
9206
9207         if (loopback_mode == TG3_MAC_LOOPBACK) {
9208                 /* HW errata - mac loopback fails in some cases on 5780.
9209                  * Normal traffic and PHY loopback are not affected by
9210                  * errata.
9211                  */
9212                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9213                         return 0;
9214
9215                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9216                            MAC_MODE_PORT_INT_LPBACK;
9217                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9218                         mac_mode |= MAC_MODE_LINK_POLARITY;
9219                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9220                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9221                 else
9222                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9223                 tw32(MAC_MODE, mac_mode);
9224         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9225                 u32 val;
9226
9227                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9228                         u32 phytest;
9229
9230                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9231                                 u32 phy;
9232
9233                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9234                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9235                                 if (!tg3_readphy(tp, 0x1b, &phy))
9236                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9237                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9238                         }
9239                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9240                 } else
9241                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9242
9243                 tg3_phy_toggle_automdix(tp, 0);
9244
9245                 tg3_writephy(tp, MII_BMCR, val);
9246                 udelay(40);
9247
9248                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9249                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9250                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9251                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9252                 } else
9253                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9254
9255                 /* reset to prevent losing 1st rx packet intermittently */
9256                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9257                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9258                         udelay(10);
9259                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9260                 }
9261                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9262                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9263                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9264                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9265                                 mac_mode |= MAC_MODE_LINK_POLARITY;
9266                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
9267                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9268                 }
9269                 tw32(MAC_MODE, mac_mode);
9270         }
9271         else
9272                 return -EINVAL;
9273
9274         err = -EIO;
9275
9276         tx_len = 1514;
9277         skb = netdev_alloc_skb(tp->dev, tx_len);
9278         if (!skb)
9279                 return -ENOMEM;
9280
9281         tx_data = skb_put(skb, tx_len);
9282         memcpy(tx_data, tp->dev->dev_addr, 6);
9283         memset(tx_data + 6, 0x0, 8);
9284
9285         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9286
9287         for (i = 14; i < tx_len; i++)
9288                 tx_data[i] = (u8) (i & 0xff);
9289
9290         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9291
9292         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9293              HOSTCC_MODE_NOW);
9294
9295         udelay(10);
9296
9297         rx_start_idx = tp->hw_status->idx[0].rx_producer;
9298
9299         num_pkts = 0;
9300
9301         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9302
9303         tp->tx_prod++;
9304         num_pkts++;
9305
9306         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9307                      tp->tx_prod);
9308         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9309
9310         udelay(10);
9311
9312         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
9313         for (i = 0; i < 25; i++) {
9314                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9315                        HOSTCC_MODE_NOW);
9316
9317                 udelay(10);
9318
9319                 tx_idx = tp->hw_status->idx[0].tx_consumer;
9320                 rx_idx = tp->hw_status->idx[0].rx_producer;
9321                 if ((tx_idx == tp->tx_prod) &&
9322                     (rx_idx == (rx_start_idx + num_pkts)))
9323                         break;
9324         }
9325
9326         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9327         dev_kfree_skb(skb);
9328
9329         if (tx_idx != tp->tx_prod)
9330                 goto out;
9331
9332         if (rx_idx != rx_start_idx + num_pkts)
9333                 goto out;
9334
9335         desc = &tp->rx_rcb[rx_start_idx];
9336         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9337         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9338         if (opaque_key != RXD_OPAQUE_RING_STD)
9339                 goto out;
9340
9341         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9342             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9343                 goto out;
9344
9345         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9346         if (rx_len != tx_len)
9347                 goto out;
9348
9349         rx_skb = tp->rx_std_buffers[desc_idx].skb;
9350
9351         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9352         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9353
9354         for (i = 14; i < tx_len; i++) {
9355                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9356                         goto out;
9357         }
9358         err = 0;
9359
9360         /* tg3_free_rings will unmap and free the rx_skb */
9361 out:
9362         return err;
9363 }
9364
9365 #define TG3_MAC_LOOPBACK_FAILED         1
9366 #define TG3_PHY_LOOPBACK_FAILED         2
9367 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
9368                                          TG3_PHY_LOOPBACK_FAILED)
9369
9370 static int tg3_test_loopback(struct tg3 *tp)
9371 {
9372         int err = 0;
9373         u32 cpmuctrl = 0;
9374
9375         if (!netif_running(tp->dev))
9376                 return TG3_LOOPBACK_FAILED;
9377
9378         err = tg3_reset_hw(tp, 1);
9379         if (err)
9380                 return TG3_LOOPBACK_FAILED;
9381
9382         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
9383             tp->pci_chip_rev_id == CHIPREV_ID_5761_A0) {
9384                 int i;
9385                 u32 status;
9386
9387                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9388
9389                 /* Wait for up to 40 microseconds to acquire lock. */
9390                 for (i = 0; i < 4; i++) {
9391                         status = tr32(TG3_CPMU_MUTEX_GNT);
9392                         if (status == CPMU_MUTEX_GNT_DRIVER)
9393                                 break;
9394                         udelay(10);
9395                 }
9396
9397                 if (status != CPMU_MUTEX_GNT_DRIVER)
9398                         return TG3_LOOPBACK_FAILED;
9399
9400                 /* Turn off power management based on link speed. */
9401                 cpmuctrl = tr32(TG3_CPMU_CTRL);
9402                 tw32(TG3_CPMU_CTRL,
9403                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9404                                   CPMU_CTRL_LINK_AWARE_MODE));
9405         }
9406
9407         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9408                 err |= TG3_MAC_LOOPBACK_FAILED;
9409
9410         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
9411             tp->pci_chip_rev_id == CHIPREV_ID_5761_A0) {
9412                 tw32(TG3_CPMU_CTRL, cpmuctrl);
9413
9414                 /* Release the mutex */
9415                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9416         }
9417
9418         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9419                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9420                         err |= TG3_PHY_LOOPBACK_FAILED;
9421         }
9422
9423         return err;
9424 }
9425
9426 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9427                           u64 *data)
9428 {
9429         struct tg3 *tp = netdev_priv(dev);
9430
9431         if (tp->link_config.phy_is_low_power)
9432                 tg3_set_power_state(tp, PCI_D0);
9433
9434         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9435
9436         if (tg3_test_nvram(tp) != 0) {
9437                 etest->flags |= ETH_TEST_FL_FAILED;
9438                 data[0] = 1;
9439         }
9440         if (tg3_test_link(tp) != 0) {
9441                 etest->flags |= ETH_TEST_FL_FAILED;
9442                 data[1] = 1;
9443         }
9444         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9445                 int err, irq_sync = 0;
9446
9447                 if (netif_running(dev)) {
9448                         tg3_netif_stop(tp);
9449                         irq_sync = 1;
9450                 }
9451
9452                 tg3_full_lock(tp, irq_sync);
9453
9454                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9455                 err = tg3_nvram_lock(tp);
9456                 tg3_halt_cpu(tp, RX_CPU_BASE);
9457                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9458                         tg3_halt_cpu(tp, TX_CPU_BASE);
9459                 if (!err)
9460                         tg3_nvram_unlock(tp);
9461
9462                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9463                         tg3_phy_reset(tp);
9464
9465                 if (tg3_test_registers(tp) != 0) {
9466                         etest->flags |= ETH_TEST_FL_FAILED;
9467                         data[2] = 1;
9468                 }
9469                 if (tg3_test_memory(tp) != 0) {
9470                         etest->flags |= ETH_TEST_FL_FAILED;
9471                         data[3] = 1;
9472                 }
9473                 if ((data[4] = tg3_test_loopback(tp)) != 0)
9474                         etest->flags |= ETH_TEST_FL_FAILED;
9475
9476                 tg3_full_unlock(tp);
9477
9478                 if (tg3_test_interrupt(tp) != 0) {
9479                         etest->flags |= ETH_TEST_FL_FAILED;
9480                         data[5] = 1;
9481                 }
9482
9483                 tg3_full_lock(tp, 0);
9484
9485                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9486                 if (netif_running(dev)) {
9487                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9488                         if (!tg3_restart_hw(tp, 1))
9489                                 tg3_netif_start(tp);
9490                 }
9491
9492                 tg3_full_unlock(tp);
9493         }
9494         if (tp->link_config.phy_is_low_power)
9495                 tg3_set_power_state(tp, PCI_D3hot);
9496
9497 }
9498
9499 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9500 {
9501         struct mii_ioctl_data *data = if_mii(ifr);
9502         struct tg3 *tp = netdev_priv(dev);
9503         int err;
9504
9505         switch(cmd) {
9506         case SIOCGMIIPHY:
9507                 data->phy_id = PHY_ADDR;
9508
9509                 /* fallthru */
9510         case SIOCGMIIREG: {
9511                 u32 mii_regval;
9512
9513                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9514                         break;                  /* We have no PHY */
9515
9516                 if (tp->link_config.phy_is_low_power)
9517                         return -EAGAIN;
9518
9519                 spin_lock_bh(&tp->lock);
9520                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9521                 spin_unlock_bh(&tp->lock);
9522
9523                 data->val_out = mii_regval;
9524
9525                 return err;
9526         }
9527
9528         case SIOCSMIIREG:
9529                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9530                         break;                  /* We have no PHY */
9531
9532                 if (!capable(CAP_NET_ADMIN))
9533                         return -EPERM;
9534
9535                 if (tp->link_config.phy_is_low_power)
9536                         return -EAGAIN;
9537
9538                 spin_lock_bh(&tp->lock);
9539                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9540                 spin_unlock_bh(&tp->lock);
9541
9542                 return err;
9543
9544         default:
9545                 /* do nothing */
9546                 break;
9547         }
9548         return -EOPNOTSUPP;
9549 }
9550
9551 #if TG3_VLAN_TAG_USED
9552 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9553 {
9554         struct tg3 *tp = netdev_priv(dev);
9555
9556         if (netif_running(dev))
9557                 tg3_netif_stop(tp);
9558
9559         tg3_full_lock(tp, 0);
9560
9561         tp->vlgrp = grp;
9562
9563         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9564         __tg3_set_rx_mode(dev);
9565
9566         if (netif_running(dev))
9567                 tg3_netif_start(tp);
9568
9569         tg3_full_unlock(tp);
9570 }
9571 #endif
9572
9573 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9574 {
9575         struct tg3 *tp = netdev_priv(dev);
9576
9577         memcpy(ec, &tp->coal, sizeof(*ec));
9578         return 0;
9579 }
9580
9581 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9582 {
9583         struct tg3 *tp = netdev_priv(dev);
9584         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9585         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9586
9587         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9588                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9589                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9590                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9591                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9592         }
9593
9594         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9595             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9596             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9597             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9598             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9599             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9600             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9601             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9602             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9603             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9604                 return -EINVAL;
9605
9606         /* No rx interrupts will be generated if both are zero */
9607         if ((ec->rx_coalesce_usecs == 0) &&
9608             (ec->rx_max_coalesced_frames == 0))
9609                 return -EINVAL;
9610
9611         /* No tx interrupts will be generated if both are zero */
9612         if ((ec->tx_coalesce_usecs == 0) &&
9613             (ec->tx_max_coalesced_frames == 0))
9614                 return -EINVAL;
9615
9616         /* Only copy relevant parameters, ignore all others. */
9617         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9618         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9619         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9620         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9621         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9622         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9623         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9624         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9625         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9626
9627         if (netif_running(dev)) {
9628                 tg3_full_lock(tp, 0);
9629                 __tg3_set_coalesce(tp, &tp->coal);
9630                 tg3_full_unlock(tp);
9631         }
9632         return 0;
9633 }
9634
9635 static const struct ethtool_ops tg3_ethtool_ops = {
9636         .get_settings           = tg3_get_settings,
9637         .set_settings           = tg3_set_settings,
9638         .get_drvinfo            = tg3_get_drvinfo,
9639         .get_regs_len           = tg3_get_regs_len,
9640         .get_regs               = tg3_get_regs,
9641         .get_wol                = tg3_get_wol,
9642         .set_wol                = tg3_set_wol,
9643         .get_msglevel           = tg3_get_msglevel,
9644         .set_msglevel           = tg3_set_msglevel,
9645         .nway_reset             = tg3_nway_reset,
9646         .get_link               = ethtool_op_get_link,
9647         .get_eeprom_len         = tg3_get_eeprom_len,
9648         .get_eeprom             = tg3_get_eeprom,
9649         .set_eeprom             = tg3_set_eeprom,
9650         .get_ringparam          = tg3_get_ringparam,
9651         .set_ringparam          = tg3_set_ringparam,
9652         .get_pauseparam         = tg3_get_pauseparam,
9653         .set_pauseparam         = tg3_set_pauseparam,
9654         .get_rx_csum            = tg3_get_rx_csum,
9655         .set_rx_csum            = tg3_set_rx_csum,
9656         .set_tx_csum            = tg3_set_tx_csum,
9657         .set_sg                 = ethtool_op_set_sg,
9658         .set_tso                = tg3_set_tso,
9659         .self_test              = tg3_self_test,
9660         .get_strings            = tg3_get_strings,
9661         .phys_id                = tg3_phys_id,
9662         .get_ethtool_stats      = tg3_get_ethtool_stats,
9663         .get_coalesce           = tg3_get_coalesce,
9664         .set_coalesce           = tg3_set_coalesce,
9665         .get_sset_count         = tg3_get_sset_count,
9666 };
9667
9668 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9669 {
9670         u32 cursize, val, magic;
9671
9672         tp->nvram_size = EEPROM_CHIP_SIZE;
9673
9674         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9675                 return;
9676
9677         if ((magic != TG3_EEPROM_MAGIC) &&
9678             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9679             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9680                 return;
9681
9682         /*
9683          * Size the chip by reading offsets at increasing powers of two.
9684          * When we encounter our validation signature, we know the addressing
9685          * has wrapped around, and thus have our chip size.
9686          */
9687         cursize = 0x10;
9688
9689         while (cursize < tp->nvram_size) {
9690                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9691                         return;
9692
9693                 if (val == magic)
9694                         break;
9695
9696                 cursize <<= 1;
9697         }
9698
9699         tp->nvram_size = cursize;
9700 }
9701
9702 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9703 {
9704         u32 val;
9705
9706         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9707                 return;
9708
9709         /* Selfboot format */
9710         if (val != TG3_EEPROM_MAGIC) {
9711                 tg3_get_eeprom_size(tp);
9712                 return;
9713         }
9714
9715         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9716                 if (val != 0) {
9717                         tp->nvram_size = (val >> 16) * 1024;
9718                         return;
9719                 }
9720         }
9721         tp->nvram_size = 0x80000;
9722 }
9723
9724 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9725 {
9726         u32 nvcfg1;
9727
9728         nvcfg1 = tr32(NVRAM_CFG1);
9729         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9730                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9731         }
9732         else {
9733                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9734                 tw32(NVRAM_CFG1, nvcfg1);
9735         }
9736
9737         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9738             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9739                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9740                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9741                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9742                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9743                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9744                                 break;
9745                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9746                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9747                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9748                                 break;
9749                         case FLASH_VENDOR_ATMEL_EEPROM:
9750                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9751                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9752                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9753                                 break;
9754                         case FLASH_VENDOR_ST:
9755                                 tp->nvram_jedecnum = JEDEC_ST;
9756                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9757                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9758                                 break;
9759                         case FLASH_VENDOR_SAIFUN:
9760                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9761                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9762                                 break;
9763                         case FLASH_VENDOR_SST_SMALL:
9764                         case FLASH_VENDOR_SST_LARGE:
9765                                 tp->nvram_jedecnum = JEDEC_SST;
9766                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9767                                 break;
9768                 }
9769         }
9770         else {
9771                 tp->nvram_jedecnum = JEDEC_ATMEL;
9772                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9773                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9774         }
9775 }
9776
9777 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9778 {
9779         u32 nvcfg1;
9780
9781         nvcfg1 = tr32(NVRAM_CFG1);
9782
9783         /* NVRAM protection for TPM */
9784         if (nvcfg1 & (1 << 27))
9785                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9786
9787         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9788                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9789                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9790                         tp->nvram_jedecnum = JEDEC_ATMEL;
9791                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9792                         break;
9793                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9794                         tp->nvram_jedecnum = JEDEC_ATMEL;
9795                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9796                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9797                         break;
9798                 case FLASH_5752VENDOR_ST_M45PE10:
9799                 case FLASH_5752VENDOR_ST_M45PE20:
9800                 case FLASH_5752VENDOR_ST_M45PE40:
9801                         tp->nvram_jedecnum = JEDEC_ST;
9802                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9803                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9804                         break;
9805         }
9806
9807         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9808                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9809                         case FLASH_5752PAGE_SIZE_256:
9810                                 tp->nvram_pagesize = 256;
9811                                 break;
9812                         case FLASH_5752PAGE_SIZE_512:
9813                                 tp->nvram_pagesize = 512;
9814                                 break;
9815                         case FLASH_5752PAGE_SIZE_1K:
9816                                 tp->nvram_pagesize = 1024;
9817                                 break;
9818                         case FLASH_5752PAGE_SIZE_2K:
9819                                 tp->nvram_pagesize = 2048;
9820                                 break;
9821                         case FLASH_5752PAGE_SIZE_4K:
9822                                 tp->nvram_pagesize = 4096;
9823                                 break;
9824                         case FLASH_5752PAGE_SIZE_264:
9825                                 tp->nvram_pagesize = 264;
9826                                 break;
9827                 }
9828         }
9829         else {
9830                 /* For eeprom, set pagesize to maximum eeprom size */
9831                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9832
9833                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9834                 tw32(NVRAM_CFG1, nvcfg1);
9835         }
9836 }
9837
9838 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9839 {
9840         u32 nvcfg1, protect = 0;
9841
9842         nvcfg1 = tr32(NVRAM_CFG1);
9843
9844         /* NVRAM protection for TPM */
9845         if (nvcfg1 & (1 << 27)) {
9846                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9847                 protect = 1;
9848         }
9849
9850         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9851         switch (nvcfg1) {
9852                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9853                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9854                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9855                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
9856                         tp->nvram_jedecnum = JEDEC_ATMEL;
9857                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9858                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9859                         tp->nvram_pagesize = 264;
9860                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
9861                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
9862                                 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9863                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9864                                 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
9865                         else
9866                                 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
9867                         break;
9868                 case FLASH_5752VENDOR_ST_M45PE10:
9869                 case FLASH_5752VENDOR_ST_M45PE20:
9870                 case FLASH_5752VENDOR_ST_M45PE40:
9871                         tp->nvram_jedecnum = JEDEC_ST;
9872                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9873                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9874                         tp->nvram_pagesize = 256;
9875                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
9876                                 tp->nvram_size = (protect ? 0x10000 : 0x20000);
9877                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
9878                                 tp->nvram_size = (protect ? 0x10000 : 0x40000);
9879                         else
9880                                 tp->nvram_size = (protect ? 0x20000 : 0x80000);
9881                         break;
9882         }
9883 }
9884
9885 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9886 {
9887         u32 nvcfg1;
9888
9889         nvcfg1 = tr32(NVRAM_CFG1);
9890
9891         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9892                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9893                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9894                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9895                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9896                         tp->nvram_jedecnum = JEDEC_ATMEL;
9897                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9898                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9899
9900                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9901                         tw32(NVRAM_CFG1, nvcfg1);
9902                         break;
9903                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9904                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9905                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9906                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9907                         tp->nvram_jedecnum = JEDEC_ATMEL;
9908                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9909                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9910                         tp->nvram_pagesize = 264;
9911                         break;
9912                 case FLASH_5752VENDOR_ST_M45PE10:
9913                 case FLASH_5752VENDOR_ST_M45PE20:
9914                 case FLASH_5752VENDOR_ST_M45PE40:
9915                         tp->nvram_jedecnum = JEDEC_ST;
9916                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9917                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9918                         tp->nvram_pagesize = 256;
9919                         break;
9920         }
9921 }
9922
9923 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
9924 {
9925         u32 nvcfg1, protect = 0;
9926
9927         nvcfg1 = tr32(NVRAM_CFG1);
9928
9929         /* NVRAM protection for TPM */
9930         if (nvcfg1 & (1 << 27)) {
9931                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9932                 protect = 1;
9933         }
9934
9935         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9936         switch (nvcfg1) {
9937                 case FLASH_5761VENDOR_ATMEL_ADB021D:
9938                 case FLASH_5761VENDOR_ATMEL_ADB041D:
9939                 case FLASH_5761VENDOR_ATMEL_ADB081D:
9940                 case FLASH_5761VENDOR_ATMEL_ADB161D:
9941                 case FLASH_5761VENDOR_ATMEL_MDB021D:
9942                 case FLASH_5761VENDOR_ATMEL_MDB041D:
9943                 case FLASH_5761VENDOR_ATMEL_MDB081D:
9944                 case FLASH_5761VENDOR_ATMEL_MDB161D:
9945                         tp->nvram_jedecnum = JEDEC_ATMEL;
9946                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9947                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9948                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
9949                         tp->nvram_pagesize = 256;
9950                         break;
9951                 case FLASH_5761VENDOR_ST_A_M45PE20:
9952                 case FLASH_5761VENDOR_ST_A_M45PE40:
9953                 case FLASH_5761VENDOR_ST_A_M45PE80:
9954                 case FLASH_5761VENDOR_ST_A_M45PE16:
9955                 case FLASH_5761VENDOR_ST_M_M45PE20:
9956                 case FLASH_5761VENDOR_ST_M_M45PE40:
9957                 case FLASH_5761VENDOR_ST_M_M45PE80:
9958                 case FLASH_5761VENDOR_ST_M_M45PE16:
9959                         tp->nvram_jedecnum = JEDEC_ST;
9960                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9961                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9962                         tp->nvram_pagesize = 256;
9963                         break;
9964         }
9965
9966         if (protect) {
9967                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
9968         } else {
9969                 switch (nvcfg1) {
9970                         case FLASH_5761VENDOR_ATMEL_ADB161D:
9971                         case FLASH_5761VENDOR_ATMEL_MDB161D:
9972                         case FLASH_5761VENDOR_ST_A_M45PE16:
9973                         case FLASH_5761VENDOR_ST_M_M45PE16:
9974                                 tp->nvram_size = 0x100000;
9975                                 break;
9976                         case FLASH_5761VENDOR_ATMEL_ADB081D:
9977                         case FLASH_5761VENDOR_ATMEL_MDB081D:
9978                         case FLASH_5761VENDOR_ST_A_M45PE80:
9979                         case FLASH_5761VENDOR_ST_M_M45PE80:
9980                                 tp->nvram_size = 0x80000;
9981                                 break;
9982                         case FLASH_5761VENDOR_ATMEL_ADB041D:
9983                         case FLASH_5761VENDOR_ATMEL_MDB041D:
9984                         case FLASH_5761VENDOR_ST_A_M45PE40:
9985                         case FLASH_5761VENDOR_ST_M_M45PE40:
9986                                 tp->nvram_size = 0x40000;
9987                                 break;
9988                         case FLASH_5761VENDOR_ATMEL_ADB021D:
9989                         case FLASH_5761VENDOR_ATMEL_MDB021D:
9990                         case FLASH_5761VENDOR_ST_A_M45PE20:
9991                         case FLASH_5761VENDOR_ST_M_M45PE20:
9992                                 tp->nvram_size = 0x20000;
9993                                 break;
9994                 }
9995         }
9996 }
9997
9998 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9999 {
10000         tp->nvram_jedecnum = JEDEC_ATMEL;
10001         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10002         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10003 }
10004
10005 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10006 static void __devinit tg3_nvram_init(struct tg3 *tp)
10007 {
10008         tw32_f(GRC_EEPROM_ADDR,
10009              (EEPROM_ADDR_FSM_RESET |
10010               (EEPROM_DEFAULT_CLOCK_PERIOD <<
10011                EEPROM_ADDR_CLKPERD_SHIFT)));
10012
10013         msleep(1);
10014
10015         /* Enable seeprom accesses. */
10016         tw32_f(GRC_LOCAL_CTRL,
10017              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10018         udelay(100);
10019
10020         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10021             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10022                 tp->tg3_flags |= TG3_FLAG_NVRAM;
10023
10024                 if (tg3_nvram_lock(tp)) {
10025                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10026                                "tg3_nvram_init failed.\n", tp->dev->name);
10027                         return;
10028                 }
10029                 tg3_enable_nvram_access(tp);
10030
10031                 tp->nvram_size = 0;
10032
10033                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10034                         tg3_get_5752_nvram_info(tp);
10035                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10036                         tg3_get_5755_nvram_info(tp);
10037                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10038                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
10039                         tg3_get_5787_nvram_info(tp);
10040                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10041                         tg3_get_5761_nvram_info(tp);
10042                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10043                         tg3_get_5906_nvram_info(tp);
10044                 else
10045                         tg3_get_nvram_info(tp);
10046
10047                 if (tp->nvram_size == 0)
10048                         tg3_get_nvram_size(tp);
10049
10050                 tg3_disable_nvram_access(tp);
10051                 tg3_nvram_unlock(tp);
10052
10053         } else {
10054                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10055
10056                 tg3_get_eeprom_size(tp);
10057         }
10058 }
10059
10060 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10061                                         u32 offset, u32 *val)
10062 {
10063         u32 tmp;
10064         int i;
10065
10066         if (offset > EEPROM_ADDR_ADDR_MASK ||
10067             (offset % 4) != 0)
10068                 return -EINVAL;
10069
10070         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10071                                         EEPROM_ADDR_DEVID_MASK |
10072                                         EEPROM_ADDR_READ);
10073         tw32(GRC_EEPROM_ADDR,
10074              tmp |
10075              (0 << EEPROM_ADDR_DEVID_SHIFT) |
10076              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10077               EEPROM_ADDR_ADDR_MASK) |
10078              EEPROM_ADDR_READ | EEPROM_ADDR_START);
10079
10080         for (i = 0; i < 1000; i++) {
10081                 tmp = tr32(GRC_EEPROM_ADDR);
10082
10083                 if (tmp & EEPROM_ADDR_COMPLETE)
10084                         break;
10085                 msleep(1);
10086         }
10087         if (!(tmp & EEPROM_ADDR_COMPLETE))
10088                 return -EBUSY;
10089
10090         *val = tr32(GRC_EEPROM_DATA);
10091         return 0;
10092 }
10093
10094 #define NVRAM_CMD_TIMEOUT 10000
10095
10096 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10097 {
10098         int i;
10099
10100         tw32(NVRAM_CMD, nvram_cmd);
10101         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10102                 udelay(10);
10103                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10104                         udelay(10);
10105                         break;
10106                 }
10107         }
10108         if (i == NVRAM_CMD_TIMEOUT) {
10109                 return -EBUSY;
10110         }
10111         return 0;
10112 }
10113
10114 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10115 {
10116         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10117             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10118             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10119            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10120             (tp->nvram_jedecnum == JEDEC_ATMEL))
10121
10122                 addr = ((addr / tp->nvram_pagesize) <<
10123                         ATMEL_AT45DB0X1B_PAGE_POS) +
10124                        (addr % tp->nvram_pagesize);
10125
10126         return addr;
10127 }
10128
10129 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10130 {
10131         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10132             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10133             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10134            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10135             (tp->nvram_jedecnum == JEDEC_ATMEL))
10136
10137                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10138                         tp->nvram_pagesize) +
10139                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10140
10141         return addr;
10142 }
10143
10144 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10145 {
10146         int ret;
10147
10148         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10149                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10150
10151         offset = tg3_nvram_phys_addr(tp, offset);
10152
10153         if (offset > NVRAM_ADDR_MSK)
10154                 return -EINVAL;
10155
10156         ret = tg3_nvram_lock(tp);
10157         if (ret)
10158                 return ret;
10159
10160         tg3_enable_nvram_access(tp);
10161
10162         tw32(NVRAM_ADDR, offset);
10163         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10164                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10165
10166         if (ret == 0)
10167                 *val = swab32(tr32(NVRAM_RDDATA));
10168
10169         tg3_disable_nvram_access(tp);
10170
10171         tg3_nvram_unlock(tp);
10172
10173         return ret;
10174 }
10175
10176 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10177 {
10178         int err;
10179         u32 tmp;
10180
10181         err = tg3_nvram_read(tp, offset, &tmp);
10182         *val = swab32(tmp);
10183         return err;
10184 }
10185
10186 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10187                                     u32 offset, u32 len, u8 *buf)
10188 {
10189         int i, j, rc = 0;
10190         u32 val;
10191
10192         for (i = 0; i < len; i += 4) {
10193                 u32 addr, data;
10194
10195                 addr = offset + i;
10196
10197                 memcpy(&data, buf + i, 4);
10198
10199                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
10200
10201                 val = tr32(GRC_EEPROM_ADDR);
10202                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10203
10204                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10205                         EEPROM_ADDR_READ);
10206                 tw32(GRC_EEPROM_ADDR, val |
10207                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10208                         (addr & EEPROM_ADDR_ADDR_MASK) |
10209                         EEPROM_ADDR_START |
10210                         EEPROM_ADDR_WRITE);
10211
10212                 for (j = 0; j < 1000; j++) {
10213                         val = tr32(GRC_EEPROM_ADDR);
10214
10215                         if (val & EEPROM_ADDR_COMPLETE)
10216                                 break;
10217                         msleep(1);
10218                 }
10219                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10220                         rc = -EBUSY;
10221                         break;
10222                 }
10223         }
10224
10225         return rc;
10226 }
10227
10228 /* offset and length are dword aligned */
10229 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10230                 u8 *buf)
10231 {
10232         int ret = 0;
10233         u32 pagesize = tp->nvram_pagesize;
10234         u32 pagemask = pagesize - 1;
10235         u32 nvram_cmd;
10236         u8 *tmp;
10237
10238         tmp = kmalloc(pagesize, GFP_KERNEL);
10239         if (tmp == NULL)
10240                 return -ENOMEM;
10241
10242         while (len) {
10243                 int j;
10244                 u32 phy_addr, page_off, size;
10245
10246                 phy_addr = offset & ~pagemask;
10247
10248                 for (j = 0; j < pagesize; j += 4) {
10249                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
10250                                                 (u32 *) (tmp + j))))
10251                                 break;
10252                 }
10253                 if (ret)
10254                         break;
10255
10256                 page_off = offset & pagemask;
10257                 size = pagesize;
10258                 if (len < size)
10259                         size = len;
10260
10261                 len -= size;
10262
10263                 memcpy(tmp + page_off, buf, size);
10264
10265                 offset = offset + (pagesize - page_off);
10266
10267                 tg3_enable_nvram_access(tp);
10268
10269                 /*
10270                  * Before we can erase the flash page, we need
10271                  * to issue a special "write enable" command.
10272                  */
10273                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10274
10275                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10276                         break;
10277
10278                 /* Erase the target page */
10279                 tw32(NVRAM_ADDR, phy_addr);
10280
10281                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10282                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10283
10284                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10285                         break;
10286
10287                 /* Issue another write enable to start the write. */
10288                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10289
10290                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10291                         break;
10292
10293                 for (j = 0; j < pagesize; j += 4) {
10294                         u32 data;
10295
10296                         data = *((u32 *) (tmp + j));
10297                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
10298
10299                         tw32(NVRAM_ADDR, phy_addr + j);
10300
10301                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10302                                 NVRAM_CMD_WR;
10303
10304                         if (j == 0)
10305                                 nvram_cmd |= NVRAM_CMD_FIRST;
10306                         else if (j == (pagesize - 4))
10307                                 nvram_cmd |= NVRAM_CMD_LAST;
10308
10309                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10310                                 break;
10311                 }
10312                 if (ret)
10313                         break;
10314         }
10315
10316         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10317         tg3_nvram_exec_cmd(tp, nvram_cmd);
10318
10319         kfree(tmp);
10320
10321         return ret;
10322 }
10323
10324 /* offset and length are dword aligned */
10325 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10326                 u8 *buf)
10327 {
10328         int i, ret = 0;
10329
10330         for (i = 0; i < len; i += 4, offset += 4) {
10331                 u32 data, page_off, phy_addr, nvram_cmd;
10332
10333                 memcpy(&data, buf + i, 4);
10334                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
10335
10336                 page_off = offset % tp->nvram_pagesize;
10337
10338                 phy_addr = tg3_nvram_phys_addr(tp, offset);
10339
10340                 tw32(NVRAM_ADDR, phy_addr);
10341
10342                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10343
10344                 if ((page_off == 0) || (i == 0))
10345                         nvram_cmd |= NVRAM_CMD_FIRST;
10346                 if (page_off == (tp->nvram_pagesize - 4))
10347                         nvram_cmd |= NVRAM_CMD_LAST;
10348
10349                 if (i == (len - 4))
10350                         nvram_cmd |= NVRAM_CMD_LAST;
10351
10352                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
10353                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10354                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10355                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10356                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
10357                     (tp->nvram_jedecnum == JEDEC_ST) &&
10358                     (nvram_cmd & NVRAM_CMD_FIRST)) {
10359
10360                         if ((ret = tg3_nvram_exec_cmd(tp,
10361                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10362                                 NVRAM_CMD_DONE)))
10363
10364                                 break;
10365                 }
10366                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10367                         /* We always do complete word writes to eeprom. */
10368                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10369                 }
10370
10371                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10372                         break;
10373         }
10374         return ret;
10375 }
10376
10377 /* offset and length are dword aligned */
10378 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10379 {
10380         int ret;
10381
10382         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10383                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10384                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
10385                 udelay(40);
10386         }
10387
10388         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10389                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10390         }
10391         else {
10392                 u32 grc_mode;
10393
10394                 ret = tg3_nvram_lock(tp);
10395                 if (ret)
10396                         return ret;
10397
10398                 tg3_enable_nvram_access(tp);
10399                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10400                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10401                         tw32(NVRAM_WRITE1, 0x406);
10402
10403                 grc_mode = tr32(GRC_MODE);
10404                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10405
10406                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10407                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10408
10409                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
10410                                 buf);
10411                 }
10412                 else {
10413                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10414                                 buf);
10415                 }
10416
10417                 grc_mode = tr32(GRC_MODE);
10418                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10419
10420                 tg3_disable_nvram_access(tp);
10421                 tg3_nvram_unlock(tp);
10422         }
10423
10424         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10425                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10426                 udelay(40);
10427         }
10428
10429         return ret;
10430 }
10431
10432 struct subsys_tbl_ent {
10433         u16 subsys_vendor, subsys_devid;
10434         u32 phy_id;
10435 };
10436
10437 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10438         /* Broadcom boards. */
10439         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10440         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10441         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10442         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
10443         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10444         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10445         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
10446         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10447         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10448         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10449         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10450
10451         /* 3com boards. */
10452         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10453         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10454         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
10455         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10456         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10457
10458         /* DELL boards. */
10459         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10460         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10461         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10462         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10463
10464         /* Compaq boards. */
10465         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10466         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10467         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
10468         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10469         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10470
10471         /* IBM boards. */
10472         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10473 };
10474
10475 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10476 {
10477         int i;
10478
10479         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10480                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10481                      tp->pdev->subsystem_vendor) &&
10482                     (subsys_id_to_phy_id[i].subsys_devid ==
10483                      tp->pdev->subsystem_device))
10484                         return &subsys_id_to_phy_id[i];
10485         }
10486         return NULL;
10487 }
10488
10489 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10490 {
10491         u32 val;
10492         u16 pmcsr;
10493
10494         /* On some early chips the SRAM cannot be accessed in D3hot state,
10495          * so need make sure we're in D0.
10496          */
10497         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10498         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10499         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10500         msleep(1);
10501
10502         /* Make sure register accesses (indirect or otherwise)
10503          * will function correctly.
10504          */
10505         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10506                                tp->misc_host_ctrl);
10507
10508         /* The memory arbiter has to be enabled in order for SRAM accesses
10509          * to succeed.  Normally on powerup the tg3 chip firmware will make
10510          * sure it is enabled, but other entities such as system netboot
10511          * code might disable it.
10512          */
10513         val = tr32(MEMARB_MODE);
10514         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10515
10516         tp->phy_id = PHY_ID_INVALID;
10517         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10518
10519         /* Assume an onboard device and WOL capable by default.  */
10520         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10521
10522         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10523                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10524                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10525                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10526                 }
10527                 val = tr32(VCPU_CFGSHDW);
10528                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
10529                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10530                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10531                     (val & VCPU_CFGSHDW_WOL_MAGPKT))
10532                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10533                 return;
10534         }
10535
10536         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10537         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10538                 u32 nic_cfg, led_cfg;
10539                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10540                 int eeprom_phy_serdes = 0;
10541
10542                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10543                 tp->nic_sram_data_cfg = nic_cfg;
10544
10545                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10546                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10547                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10548                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10549                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10550                     (ver > 0) && (ver < 0x100))
10551                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10552
10553                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10554                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10555                         eeprom_phy_serdes = 1;
10556
10557                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10558                 if (nic_phy_id != 0) {
10559                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10560                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10561
10562                         eeprom_phy_id  = (id1 >> 16) << 10;
10563                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
10564                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10565                 } else
10566                         eeprom_phy_id = 0;
10567
10568                 tp->phy_id = eeprom_phy_id;
10569                 if (eeprom_phy_serdes) {
10570                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10571                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10572                         else
10573                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10574                 }
10575
10576                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10577                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10578                                     SHASTA_EXT_LED_MODE_MASK);
10579                 else
10580                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10581
10582                 switch (led_cfg) {
10583                 default:
10584                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10585                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10586                         break;
10587
10588                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10589                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10590                         break;
10591
10592                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10593                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10594
10595                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10596                          * read on some older 5700/5701 bootcode.
10597                          */
10598                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10599                             ASIC_REV_5700 ||
10600                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10601                             ASIC_REV_5701)
10602                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10603
10604                         break;
10605
10606                 case SHASTA_EXT_LED_SHARED:
10607                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10608                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10609                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10610                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10611                                                  LED_CTRL_MODE_PHY_2);
10612                         break;
10613
10614                 case SHASTA_EXT_LED_MAC:
10615                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10616                         break;
10617
10618                 case SHASTA_EXT_LED_COMBO:
10619                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10620                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10621                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10622                                                  LED_CTRL_MODE_PHY_2);
10623                         break;
10624
10625                 };
10626
10627                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10628                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10629                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10630                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10631
10632                 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0)
10633                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10634
10635                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10636                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10637                         if ((tp->pdev->subsystem_vendor ==
10638                              PCI_VENDOR_ID_ARIMA) &&
10639                             (tp->pdev->subsystem_device == 0x205a ||
10640                              tp->pdev->subsystem_device == 0x2063))
10641                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10642                 } else {
10643                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10644                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10645                 }
10646
10647                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10648                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10649                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10650                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10651                 }
10652                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10653                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
10654                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10655                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10656                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10657
10658                 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10659                     nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10660                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10661
10662                 if (cfg2 & (1 << 17))
10663                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10664
10665                 /* serdes signal pre-emphasis in register 0x590 set by */
10666                 /* bootcode if bit 18 is set */
10667                 if (cfg2 & (1 << 18))
10668                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10669
10670                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10671                         u32 cfg3;
10672
10673                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10674                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10675                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10676                 }
10677         }
10678 }
10679
10680 static int __devinit tg3_phy_probe(struct tg3 *tp)
10681 {
10682         u32 hw_phy_id_1, hw_phy_id_2;
10683         u32 hw_phy_id, hw_phy_id_masked;
10684         int err;
10685
10686         /* Reading the PHY ID register can conflict with ASF
10687          * firwmare access to the PHY hardware.
10688          */
10689         err = 0;
10690         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10691             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
10692                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10693         } else {
10694                 /* Now read the physical PHY_ID from the chip and verify
10695                  * that it is sane.  If it doesn't look good, we fall back
10696                  * to either the hard-coded table based PHY_ID and failing
10697                  * that the value found in the eeprom area.
10698                  */
10699                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10700                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10701
10702                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
10703                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10704                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
10705
10706                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10707         }
10708
10709         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10710                 tp->phy_id = hw_phy_id;
10711                 if (hw_phy_id_masked == PHY_ID_BCM8002)
10712                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10713                 else
10714                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10715         } else {
10716                 if (tp->phy_id != PHY_ID_INVALID) {
10717                         /* Do nothing, phy ID already set up in
10718                          * tg3_get_eeprom_hw_cfg().
10719                          */
10720                 } else {
10721                         struct subsys_tbl_ent *p;
10722
10723                         /* No eeprom signature?  Try the hardcoded
10724                          * subsys device table.
10725                          */
10726                         p = lookup_by_subsys(tp);
10727                         if (!p)
10728                                 return -ENODEV;
10729
10730                         tp->phy_id = p->phy_id;
10731                         if (!tp->phy_id ||
10732                             tp->phy_id == PHY_ID_BCM8002)
10733                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10734                 }
10735         }
10736
10737         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10738             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
10739             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10740                 u32 bmsr, adv_reg, tg3_ctrl, mask;
10741
10742                 tg3_readphy(tp, MII_BMSR, &bmsr);
10743                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10744                     (bmsr & BMSR_LSTATUS))
10745                         goto skip_phy_reset;
10746
10747                 err = tg3_phy_reset(tp);
10748                 if (err)
10749                         return err;
10750
10751                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10752                            ADVERTISE_100HALF | ADVERTISE_100FULL |
10753                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10754                 tg3_ctrl = 0;
10755                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10756                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10757                                     MII_TG3_CTRL_ADV_1000_FULL);
10758                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10759                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10760                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10761                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
10762                 }
10763
10764                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10765                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10766                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10767                 if (!tg3_copper_is_advertising_all(tp, mask)) {
10768                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10769
10770                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10771                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10772
10773                         tg3_writephy(tp, MII_BMCR,
10774                                      BMCR_ANENABLE | BMCR_ANRESTART);
10775                 }
10776                 tg3_phy_set_wirespeed(tp);
10777
10778                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10779                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10780                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10781         }
10782
10783 skip_phy_reset:
10784         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10785                 err = tg3_init_5401phy_dsp(tp);
10786                 if (err)
10787                         return err;
10788         }
10789
10790         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10791                 err = tg3_init_5401phy_dsp(tp);
10792         }
10793
10794         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10795                 tp->link_config.advertising =
10796                         (ADVERTISED_1000baseT_Half |
10797                          ADVERTISED_1000baseT_Full |
10798                          ADVERTISED_Autoneg |
10799                          ADVERTISED_FIBRE);
10800         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10801                 tp->link_config.advertising &=
10802                         ~(ADVERTISED_1000baseT_Half |
10803                           ADVERTISED_1000baseT_Full);
10804
10805         return err;
10806 }
10807
10808 static void __devinit tg3_read_partno(struct tg3 *tp)
10809 {
10810         unsigned char vpd_data[256];
10811         unsigned int i;
10812         u32 magic;
10813
10814         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10815                 goto out_not_found;
10816
10817         if (magic == TG3_EEPROM_MAGIC) {
10818                 for (i = 0; i < 256; i += 4) {
10819                         u32 tmp;
10820
10821                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10822                                 goto out_not_found;
10823
10824                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10825                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10826                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10827                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10828                 }
10829         } else {
10830                 int vpd_cap;
10831
10832                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10833                 for (i = 0; i < 256; i += 4) {
10834                         u32 tmp, j = 0;
10835                         u16 tmp16;
10836
10837                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10838                                               i);
10839                         while (j++ < 100) {
10840                                 pci_read_config_word(tp->pdev, vpd_cap +
10841                                                      PCI_VPD_ADDR, &tmp16);
10842                                 if (tmp16 & 0x8000)
10843                                         break;
10844                                 msleep(1);
10845                         }
10846                         if (!(tmp16 & 0x8000))
10847                                 goto out_not_found;
10848
10849                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10850                                               &tmp);
10851                         tmp = cpu_to_le32(tmp);
10852                         memcpy(&vpd_data[i], &tmp, 4);
10853                 }
10854         }
10855
10856         /* Now parse and find the part number. */
10857         for (i = 0; i < 254; ) {
10858                 unsigned char val = vpd_data[i];
10859                 unsigned int block_end;
10860
10861                 if (val == 0x82 || val == 0x91) {
10862                         i = (i + 3 +
10863                              (vpd_data[i + 1] +
10864                               (vpd_data[i + 2] << 8)));
10865                         continue;
10866                 }
10867
10868                 if (val != 0x90)
10869                         goto out_not_found;
10870
10871                 block_end = (i + 3 +
10872                              (vpd_data[i + 1] +
10873                               (vpd_data[i + 2] << 8)));
10874                 i += 3;
10875
10876                 if (block_end > 256)
10877                         goto out_not_found;
10878
10879                 while (i < (block_end - 2)) {
10880                         if (vpd_data[i + 0] == 'P' &&
10881                             vpd_data[i + 1] == 'N') {
10882                                 int partno_len = vpd_data[i + 2];
10883
10884                                 i += 3;
10885                                 if (partno_len > 24 || (partno_len + i) > 256)
10886                                         goto out_not_found;
10887
10888                                 memcpy(tp->board_part_number,
10889                                        &vpd_data[i], partno_len);
10890
10891                                 /* Success. */
10892                                 return;
10893                         }
10894                         i += 3 + vpd_data[i + 2];
10895                 }
10896
10897                 /* Part number not found. */
10898                 goto out_not_found;
10899         }
10900
10901 out_not_found:
10902         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10903                 strcpy(tp->board_part_number, "BCM95906");
10904         else
10905                 strcpy(tp->board_part_number, "none");
10906 }
10907
10908 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
10909 {
10910         u32 val;
10911
10912         if (tg3_nvram_read_swab(tp, offset, &val) ||
10913             (val & 0xfc000000) != 0x0c000000 ||
10914             tg3_nvram_read_swab(tp, offset + 4, &val) ||
10915             val != 0)
10916                 return 0;
10917
10918         return 1;
10919 }
10920
10921 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10922 {
10923         u32 val, offset, start;
10924         u32 ver_offset;
10925         int i, bcnt;
10926
10927         if (tg3_nvram_read_swab(tp, 0, &val))
10928                 return;
10929
10930         if (val != TG3_EEPROM_MAGIC)
10931                 return;
10932
10933         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10934             tg3_nvram_read_swab(tp, 0x4, &start))
10935                 return;
10936
10937         offset = tg3_nvram_logical_addr(tp, offset);
10938
10939         if (!tg3_fw_img_is_valid(tp, offset) ||
10940             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10941                 return;
10942
10943         offset = offset + ver_offset - start;
10944         for (i = 0; i < 16; i += 4) {
10945                 if (tg3_nvram_read(tp, offset + i, &val))
10946                         return;
10947
10948                 val = le32_to_cpu(val);
10949                 memcpy(tp->fw_ver + i, &val, 4);
10950         }
10951
10952         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10953              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
10954                 return;
10955
10956         for (offset = TG3_NVM_DIR_START;
10957              offset < TG3_NVM_DIR_END;
10958              offset += TG3_NVM_DIRENT_SIZE) {
10959                 if (tg3_nvram_read_swab(tp, offset, &val))
10960                         return;
10961
10962                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
10963                         break;
10964         }
10965
10966         if (offset == TG3_NVM_DIR_END)
10967                 return;
10968
10969         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10970                 start = 0x08000000;
10971         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
10972                 return;
10973
10974         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
10975             !tg3_fw_img_is_valid(tp, offset) ||
10976             tg3_nvram_read_swab(tp, offset + 8, &val))
10977                 return;
10978
10979         offset += val - start;
10980
10981         bcnt = strlen(tp->fw_ver);
10982
10983         tp->fw_ver[bcnt++] = ',';
10984         tp->fw_ver[bcnt++] = ' ';
10985
10986         for (i = 0; i < 4; i++) {
10987                 if (tg3_nvram_read(tp, offset, &val))
10988                         return;
10989
10990                 val = le32_to_cpu(val);
10991                 offset += sizeof(val);
10992
10993                 if (bcnt > TG3_VER_SIZE - sizeof(val)) {
10994                         memcpy(&tp->fw_ver[bcnt], &val, TG3_VER_SIZE - bcnt);
10995                         break;
10996                 }
10997
10998                 memcpy(&tp->fw_ver[bcnt], &val, sizeof(val));
10999                 bcnt += sizeof(val);
11000         }
11001
11002         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11003 }
11004
11005 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11006
11007 static int __devinit tg3_get_invariants(struct tg3 *tp)
11008 {
11009         static struct pci_device_id write_reorder_chipsets[] = {
11010                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11011                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11012                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11013                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11014                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11015                              PCI_DEVICE_ID_VIA_8385_0) },
11016                 { },
11017         };
11018         u32 misc_ctrl_reg;
11019         u32 cacheline_sz_reg;
11020         u32 pci_state_reg, grc_misc_cfg;
11021         u32 val;
11022         u16 pci_cmd;
11023         int err, pcie_cap;
11024
11025         /* Force memory write invalidate off.  If we leave it on,
11026          * then on 5700_BX chips we have to enable a workaround.
11027          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11028          * to match the cacheline size.  The Broadcom driver have this
11029          * workaround but turns MWI off all the times so never uses
11030          * it.  This seems to suggest that the workaround is insufficient.
11031          */
11032         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11033         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11034         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11035
11036         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11037          * has the register indirect write enable bit set before
11038          * we try to access any of the MMIO registers.  It is also
11039          * critical that the PCI-X hw workaround situation is decided
11040          * before that as well.
11041          */
11042         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11043                               &misc_ctrl_reg);
11044
11045         tp->pci_chip_rev_id = (misc_ctrl_reg >>
11046                                MISC_HOST_CTRL_CHIPREV_SHIFT);
11047         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11048                 u32 prod_id_asic_rev;
11049
11050                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11051                                       &prod_id_asic_rev);
11052                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11053         }
11054
11055         /* Wrong chip ID in 5752 A0. This code can be removed later
11056          * as A0 is not in production.
11057          */
11058         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11059                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11060
11061         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11062          * we need to disable memory and use config. cycles
11063          * only to access all registers. The 5702/03 chips
11064          * can mistakenly decode the special cycles from the
11065          * ICH chipsets as memory write cycles, causing corruption
11066          * of register and memory space. Only certain ICH bridges
11067          * will drive special cycles with non-zero data during the
11068          * address phase which can fall within the 5703's address
11069          * range. This is not an ICH bug as the PCI spec allows
11070          * non-zero address during special cycles. However, only
11071          * these ICH bridges are known to drive non-zero addresses
11072          * during special cycles.
11073          *
11074          * Since special cycles do not cross PCI bridges, we only
11075          * enable this workaround if the 5703 is on the secondary
11076          * bus of these ICH bridges.
11077          */
11078         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11079             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11080                 static struct tg3_dev_id {
11081                         u32     vendor;
11082                         u32     device;
11083                         u32     rev;
11084                 } ich_chipsets[] = {
11085                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11086                           PCI_ANY_ID },
11087                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11088                           PCI_ANY_ID },
11089                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11090                           0xa },
11091                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11092                           PCI_ANY_ID },
11093                         { },
11094                 };
11095                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11096                 struct pci_dev *bridge = NULL;
11097
11098                 while (pci_id->vendor != 0) {
11099                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
11100                                                 bridge);
11101                         if (!bridge) {
11102                                 pci_id++;
11103                                 continue;
11104                         }
11105                         if (pci_id->rev != PCI_ANY_ID) {
11106                                 if (bridge->revision > pci_id->rev)
11107                                         continue;
11108                         }
11109                         if (bridge->subordinate &&
11110                             (bridge->subordinate->number ==
11111                              tp->pdev->bus->number)) {
11112
11113                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11114                                 pci_dev_put(bridge);
11115                                 break;
11116                         }
11117                 }
11118         }
11119
11120         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11121          * DMA addresses > 40-bit. This bridge may have other additional
11122          * 57xx devices behind it in some 4-port NIC designs for example.
11123          * Any tg3 device found behind the bridge will also need the 40-bit
11124          * DMA workaround.
11125          */
11126         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11127             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11128                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11129                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11130                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11131         }
11132         else {
11133                 struct pci_dev *bridge = NULL;
11134
11135                 do {
11136                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11137                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
11138                                                 bridge);
11139                         if (bridge && bridge->subordinate &&
11140                             (bridge->subordinate->number <=
11141                              tp->pdev->bus->number) &&
11142                             (bridge->subordinate->subordinate >=
11143                              tp->pdev->bus->number)) {
11144                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11145                                 pci_dev_put(bridge);
11146                                 break;
11147                         }
11148                 } while (bridge);
11149         }
11150
11151         /* Initialize misc host control in PCI block. */
11152         tp->misc_host_ctrl |= (misc_ctrl_reg &
11153                                MISC_HOST_CTRL_CHIPREV);
11154         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11155                                tp->misc_host_ctrl);
11156
11157         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11158                               &cacheline_sz_reg);
11159
11160         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
11161         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
11162         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
11163         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
11164
11165         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11166             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11167                 tp->pdev_peer = tg3_find_peer(tp);
11168
11169         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11170             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11171             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11172             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11173             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11174             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11175             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11176             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11177                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11178
11179         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11180             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11181                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11182
11183         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11184                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11185                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11186                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11187                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11188                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11189                      tp->pdev_peer == tp->pdev))
11190                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11191
11192                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11193                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11194                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11195                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11196                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11197                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11198                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11199                 } else {
11200                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11201                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11202                                 ASIC_REV_5750 &&
11203                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11204                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11205                 }
11206         }
11207
11208         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11209             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
11210             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11211             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
11212             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
11213             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
11214             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
11215             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11216                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11217
11218         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11219         if (pcie_cap != 0) {
11220                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11221
11222                 pcie_set_readrq(tp->pdev, 4096);
11223
11224                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11225                         u16 lnkctl;
11226
11227                         pci_read_config_word(tp->pdev,
11228                                              pcie_cap + PCI_EXP_LNKCTL,
11229                                              &lnkctl);
11230                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11231                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11232                 }
11233         }
11234
11235         /* If we have an AMD 762 or VIA K8T800 chipset, write
11236          * reordering to the mailbox registers done by the host
11237          * controller can cause major troubles.  We read back from
11238          * every mailbox register write to force the writes to be
11239          * posted to the chip in order.
11240          */
11241         if (pci_dev_present(write_reorder_chipsets) &&
11242             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11243                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11244
11245         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11246             tp->pci_lat_timer < 64) {
11247                 tp->pci_lat_timer = 64;
11248
11249                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
11250                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
11251                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
11252                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
11253
11254                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11255                                        cacheline_sz_reg);
11256         }
11257
11258         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11259             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11260                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11261                 if (!tp->pcix_cap) {
11262                         printk(KERN_ERR PFX "Cannot find PCI-X "
11263                                             "capability, aborting.\n");
11264                         return -EIO;
11265                 }
11266         }
11267
11268         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11269                               &pci_state_reg);
11270
11271         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
11272                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11273
11274                 /* If this is a 5700 BX chipset, and we are in PCI-X
11275                  * mode, enable register write workaround.
11276                  *
11277                  * The workaround is to use indirect register accesses
11278                  * for all chip writes not to mailbox registers.
11279                  */
11280                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11281                         u32 pm_reg;
11282
11283                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11284
11285                         /* The chip can have it's power management PCI config
11286                          * space registers clobbered due to this bug.
11287                          * So explicitly force the chip into D0 here.
11288                          */
11289                         pci_read_config_dword(tp->pdev,
11290                                               tp->pm_cap + PCI_PM_CTRL,
11291                                               &pm_reg);
11292                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11293                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11294                         pci_write_config_dword(tp->pdev,
11295                                                tp->pm_cap + PCI_PM_CTRL,
11296                                                pm_reg);
11297
11298                         /* Also, force SERR#/PERR# in PCI command. */
11299                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11300                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11301                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11302                 }
11303         }
11304
11305         /* 5700 BX chips need to have their TX producer index mailboxes
11306          * written twice to workaround a bug.
11307          */
11308         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11309                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11310
11311         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11312                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11313         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11314                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11315
11316         /* Chip-specific fixup from Broadcom driver */
11317         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11318             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11319                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11320                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11321         }
11322
11323         /* Default fast path register access methods */
11324         tp->read32 = tg3_read32;
11325         tp->write32 = tg3_write32;
11326         tp->read32_mbox = tg3_read32;
11327         tp->write32_mbox = tg3_write32;
11328         tp->write32_tx_mbox = tg3_write32;
11329         tp->write32_rx_mbox = tg3_write32;
11330
11331         /* Various workaround register access methods */
11332         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11333                 tp->write32 = tg3_write_indirect_reg32;
11334         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11335                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11336                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11337                 /*
11338                  * Back to back register writes can cause problems on these
11339                  * chips, the workaround is to read back all reg writes
11340                  * except those to mailbox regs.
11341                  *
11342                  * See tg3_write_indirect_reg32().
11343                  */
11344                 tp->write32 = tg3_write_flush_reg32;
11345         }
11346
11347
11348         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11349             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11350                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11351                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11352                         tp->write32_rx_mbox = tg3_write_flush_reg32;
11353         }
11354
11355         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11356                 tp->read32 = tg3_read_indirect_reg32;
11357                 tp->write32 = tg3_write_indirect_reg32;
11358                 tp->read32_mbox = tg3_read_indirect_mbox;
11359                 tp->write32_mbox = tg3_write_indirect_mbox;
11360                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11361                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11362
11363                 iounmap(tp->regs);
11364                 tp->regs = NULL;
11365
11366                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11367                 pci_cmd &= ~PCI_COMMAND_MEMORY;
11368                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11369         }
11370         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11371                 tp->read32_mbox = tg3_read32_mbox_5906;
11372                 tp->write32_mbox = tg3_write32_mbox_5906;
11373                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11374                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11375         }
11376
11377         if (tp->write32 == tg3_write_indirect_reg32 ||
11378             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11379              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11380               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11381                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11382
11383         /* Get eeprom hw config before calling tg3_set_power_state().
11384          * In particular, the TG3_FLG2_IS_NIC flag must be
11385          * determined before calling tg3_set_power_state() so that
11386          * we know whether or not to switch out of Vaux power.
11387          * When the flag is set, it means that GPIO1 is used for eeprom
11388          * write protect and also implies that it is a LOM where GPIOs
11389          * are not used to switch power.
11390          */
11391         tg3_get_eeprom_hw_cfg(tp);
11392
11393         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11394                 /* Allow reads and writes to the
11395                  * APE register and memory space.
11396                  */
11397                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11398                                  PCISTATE_ALLOW_APE_SHMEM_WR;
11399                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11400                                        pci_state_reg);
11401         }
11402
11403         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11404             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11405                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11406
11407         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11408          * GPIO1 driven high will bring 5700's external PHY out of reset.
11409          * It is also used as eeprom write protect on LOMs.
11410          */
11411         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11412         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11413             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11414                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11415                                        GRC_LCLCTRL_GPIO_OUTPUT1);
11416         /* Unused GPIO3 must be driven as output on 5752 because there
11417          * are no pull-up resistors on unused GPIO pins.
11418          */
11419         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11420                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
11421
11422         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11423                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11424
11425         /* Force the chip into D0. */
11426         err = tg3_set_power_state(tp, PCI_D0);
11427         if (err) {
11428                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11429                        pci_name(tp->pdev));
11430                 return err;
11431         }
11432
11433         /* 5700 B0 chips do not support checksumming correctly due
11434          * to hardware bugs.
11435          */
11436         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11437                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11438
11439         /* Derive initial jumbo mode from MTU assigned in
11440          * ether_setup() via the alloc_etherdev() call
11441          */
11442         if (tp->dev->mtu > ETH_DATA_LEN &&
11443             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11444                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
11445
11446         /* Determine WakeOnLan speed to use. */
11447         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11448             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11449             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11450             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11451                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11452         } else {
11453                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11454         }
11455
11456         /* A few boards don't want Ethernet@WireSpeed phy feature */
11457         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11458             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11459              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
11460              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
11461             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
11462             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
11463                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11464
11465         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11466             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11467                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11468         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11469                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11470
11471         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11472                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11473                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11474                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11475                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11476                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11477                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11478                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11479                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11480                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11481                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11482                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11483         }
11484
11485         tp->coalesce_mode = 0;
11486         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11487             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11488                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11489
11490         /* Initialize MAC MI mode, polling disabled. */
11491         tw32_f(MAC_MI_MODE, tp->mi_mode);
11492         udelay(80);
11493
11494         /* Initialize data/descriptor byte/word swapping. */
11495         val = tr32(GRC_MODE);
11496         val &= GRC_MODE_HOST_STACKUP;
11497         tw32(GRC_MODE, val | tp->grc_mode);
11498
11499         tg3_switch_clocks(tp);
11500
11501         /* Clear this out for sanity. */
11502         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11503
11504         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11505                               &pci_state_reg);
11506         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11507             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11508                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11509
11510                 if (chiprevid == CHIPREV_ID_5701_A0 ||
11511                     chiprevid == CHIPREV_ID_5701_B0 ||
11512                     chiprevid == CHIPREV_ID_5701_B2 ||
11513                     chiprevid == CHIPREV_ID_5701_B5) {
11514                         void __iomem *sram_base;
11515
11516                         /* Write some dummy words into the SRAM status block
11517                          * area, see if it reads back correctly.  If the return
11518                          * value is bad, force enable the PCIX workaround.
11519                          */
11520                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11521
11522                         writel(0x00000000, sram_base);
11523                         writel(0x00000000, sram_base + 4);
11524                         writel(0xffffffff, sram_base + 4);
11525                         if (readl(sram_base) != 0x00000000)
11526                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11527                 }
11528         }
11529
11530         udelay(50);
11531         tg3_nvram_init(tp);
11532
11533         grc_misc_cfg = tr32(GRC_MISC_CFG);
11534         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11535
11536         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11537             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11538              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11539                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11540
11541         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11542             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11543                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11544         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11545                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11546                                       HOSTCC_MODE_CLRTICK_TXBD);
11547
11548                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11549                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11550                                        tp->misc_host_ctrl);
11551         }
11552
11553         /* these are limited to 10/100 only */
11554         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11555              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11556             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11557              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11558              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11559               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11560               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11561             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11562              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
11563               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11564               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
11565             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11566                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11567
11568         err = tg3_phy_probe(tp);
11569         if (err) {
11570                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11571                        pci_name(tp->pdev), err);
11572                 /* ... but do not return immediately ... */
11573         }
11574
11575         tg3_read_partno(tp);
11576         tg3_read_fw_ver(tp);
11577
11578         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11579                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11580         } else {
11581                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11582                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11583                 else
11584                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11585         }
11586
11587         /* 5700 {AX,BX} chips have a broken status block link
11588          * change bit implementation, so we must use the
11589          * status register in those cases.
11590          */
11591         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11592                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11593         else
11594                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11595
11596         /* The led_ctrl is set during tg3_phy_probe, here we might
11597          * have to force the link status polling mechanism based
11598          * upon subsystem IDs.
11599          */
11600         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
11601             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11602             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11603                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11604                                   TG3_FLAG_USE_LINKCHG_REG);
11605         }
11606
11607         /* For all SERDES we poll the MAC status register. */
11608         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11609                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11610         else
11611                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11612
11613         /* All chips before 5787 can get confused if TX buffers
11614          * straddle the 4GB address boundary in some cases.
11615          */
11616         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11617             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11618             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11619             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11620             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11621                 tp->dev->hard_start_xmit = tg3_start_xmit;
11622         else
11623                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
11624
11625         tp->rx_offset = 2;
11626         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11627             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11628                 tp->rx_offset = 0;
11629
11630         tp->rx_std_max_post = TG3_RX_RING_SIZE;
11631
11632         /* Increment the rx prod index on the rx std ring by at most
11633          * 8 for these chips to workaround hw errata.
11634          */
11635         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11636             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11637             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11638                 tp->rx_std_max_post = 8;
11639
11640         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11641                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11642                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
11643
11644         return err;
11645 }
11646
11647 #ifdef CONFIG_SPARC
11648 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11649 {
11650         struct net_device *dev = tp->dev;
11651         struct pci_dev *pdev = tp->pdev;
11652         struct device_node *dp = pci_device_to_OF_node(pdev);
11653         const unsigned char *addr;
11654         int len;
11655
11656         addr = of_get_property(dp, "local-mac-address", &len);
11657         if (addr && len == 6) {
11658                 memcpy(dev->dev_addr, addr, 6);
11659                 memcpy(dev->perm_addr, dev->dev_addr, 6);
11660                 return 0;
11661         }
11662         return -ENODEV;
11663 }
11664
11665 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11666 {
11667         struct net_device *dev = tp->dev;
11668
11669         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
11670         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
11671         return 0;
11672 }
11673 #endif
11674
11675 static int __devinit tg3_get_device_address(struct tg3 *tp)
11676 {
11677         struct net_device *dev = tp->dev;
11678         u32 hi, lo, mac_offset;
11679         int addr_ok = 0;
11680
11681 #ifdef CONFIG_SPARC
11682         if (!tg3_get_macaddr_sparc(tp))
11683                 return 0;
11684 #endif
11685
11686         mac_offset = 0x7c;
11687         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11688             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11689                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11690                         mac_offset = 0xcc;
11691                 if (tg3_nvram_lock(tp))
11692                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11693                 else
11694                         tg3_nvram_unlock(tp);
11695         }
11696         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11697                 mac_offset = 0x10;
11698
11699         /* First try to get it from MAC address mailbox. */
11700         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11701         if ((hi >> 16) == 0x484b) {
11702                 dev->dev_addr[0] = (hi >>  8) & 0xff;
11703                 dev->dev_addr[1] = (hi >>  0) & 0xff;
11704
11705                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11706                 dev->dev_addr[2] = (lo >> 24) & 0xff;
11707                 dev->dev_addr[3] = (lo >> 16) & 0xff;
11708                 dev->dev_addr[4] = (lo >>  8) & 0xff;
11709                 dev->dev_addr[5] = (lo >>  0) & 0xff;
11710
11711                 /* Some old bootcode may report a 0 MAC address in SRAM */
11712                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11713         }
11714         if (!addr_ok) {
11715                 /* Next, try NVRAM. */
11716                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
11717                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11718                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
11719                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
11720                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
11721                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
11722                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
11723                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
11724                 }
11725                 /* Finally just fetch it out of the MAC control regs. */
11726                 else {
11727                         hi = tr32(MAC_ADDR_0_HIGH);
11728                         lo = tr32(MAC_ADDR_0_LOW);
11729
11730                         dev->dev_addr[5] = lo & 0xff;
11731                         dev->dev_addr[4] = (lo >> 8) & 0xff;
11732                         dev->dev_addr[3] = (lo >> 16) & 0xff;
11733                         dev->dev_addr[2] = (lo >> 24) & 0xff;
11734                         dev->dev_addr[1] = hi & 0xff;
11735                         dev->dev_addr[0] = (hi >> 8) & 0xff;
11736                 }
11737         }
11738
11739         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11740 #ifdef CONFIG_SPARC64
11741                 if (!tg3_get_default_macaddr_sparc(tp))
11742                         return 0;
11743 #endif
11744                 return -EINVAL;
11745         }
11746         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
11747         return 0;
11748 }
11749
11750 #define BOUNDARY_SINGLE_CACHELINE       1
11751 #define BOUNDARY_MULTI_CACHELINE        2
11752
11753 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11754 {
11755         int cacheline_size;
11756         u8 byte;
11757         int goal;
11758
11759         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11760         if (byte == 0)
11761                 cacheline_size = 1024;
11762         else
11763                 cacheline_size = (int) byte * 4;
11764
11765         /* On 5703 and later chips, the boundary bits have no
11766          * effect.
11767          */
11768         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11769             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11770             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11771                 goto out;
11772
11773 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11774         goal = BOUNDARY_MULTI_CACHELINE;
11775 #else
11776 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11777         goal = BOUNDARY_SINGLE_CACHELINE;
11778 #else
11779         goal = 0;
11780 #endif
11781 #endif
11782
11783         if (!goal)
11784                 goto out;
11785
11786         /* PCI controllers on most RISC systems tend to disconnect
11787          * when a device tries to burst across a cache-line boundary.
11788          * Therefore, letting tg3 do so just wastes PCI bandwidth.
11789          *
11790          * Unfortunately, for PCI-E there are only limited
11791          * write-side controls for this, and thus for reads
11792          * we will still get the disconnects.  We'll also waste
11793          * these PCI cycles for both read and write for chips
11794          * other than 5700 and 5701 which do not implement the
11795          * boundary bits.
11796          */
11797         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11798             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11799                 switch (cacheline_size) {
11800                 case 16:
11801                 case 32:
11802                 case 64:
11803                 case 128:
11804                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11805                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11806                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11807                         } else {
11808                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11809                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11810                         }
11811                         break;
11812
11813                 case 256:
11814                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11815                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11816                         break;
11817
11818                 default:
11819                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11820                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11821                         break;
11822                 };
11823         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11824                 switch (cacheline_size) {
11825                 case 16:
11826                 case 32:
11827                 case 64:
11828                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11829                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11830                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11831                                 break;
11832                         }
11833                         /* fallthrough */
11834                 case 128:
11835                 default:
11836                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11837                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11838                         break;
11839                 };
11840         } else {
11841                 switch (cacheline_size) {
11842                 case 16:
11843                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11844                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11845                                         DMA_RWCTRL_WRITE_BNDRY_16);
11846                                 break;
11847                         }
11848                         /* fallthrough */
11849                 case 32:
11850                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11851                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11852                                         DMA_RWCTRL_WRITE_BNDRY_32);
11853                                 break;
11854                         }
11855                         /* fallthrough */
11856                 case 64:
11857                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11858                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11859                                         DMA_RWCTRL_WRITE_BNDRY_64);
11860                                 break;
11861                         }
11862                         /* fallthrough */
11863                 case 128:
11864                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11865                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11866                                         DMA_RWCTRL_WRITE_BNDRY_128);
11867                                 break;
11868                         }
11869                         /* fallthrough */
11870                 case 256:
11871                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
11872                                 DMA_RWCTRL_WRITE_BNDRY_256);
11873                         break;
11874                 case 512:
11875                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
11876                                 DMA_RWCTRL_WRITE_BNDRY_512);
11877                         break;
11878                 case 1024:
11879                 default:
11880                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11881                                 DMA_RWCTRL_WRITE_BNDRY_1024);
11882                         break;
11883                 };
11884         }
11885
11886 out:
11887         return val;
11888 }
11889
11890 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11891 {
11892         struct tg3_internal_buffer_desc test_desc;
11893         u32 sram_dma_descs;
11894         int i, ret;
11895
11896         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11897
11898         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11899         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11900         tw32(RDMAC_STATUS, 0);
11901         tw32(WDMAC_STATUS, 0);
11902
11903         tw32(BUFMGR_MODE, 0);
11904         tw32(FTQ_RESET, 0);
11905
11906         test_desc.addr_hi = ((u64) buf_dma) >> 32;
11907         test_desc.addr_lo = buf_dma & 0xffffffff;
11908         test_desc.nic_mbuf = 0x00002100;
11909         test_desc.len = size;
11910
11911         /*
11912          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11913          * the *second* time the tg3 driver was getting loaded after an
11914          * initial scan.
11915          *
11916          * Broadcom tells me:
11917          *   ...the DMA engine is connected to the GRC block and a DMA
11918          *   reset may affect the GRC block in some unpredictable way...
11919          *   The behavior of resets to individual blocks has not been tested.
11920          *
11921          * Broadcom noted the GRC reset will also reset all sub-components.
11922          */
11923         if (to_device) {
11924                 test_desc.cqid_sqid = (13 << 8) | 2;
11925
11926                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11927                 udelay(40);
11928         } else {
11929                 test_desc.cqid_sqid = (16 << 8) | 7;
11930
11931                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11932                 udelay(40);
11933         }
11934         test_desc.flags = 0x00000005;
11935
11936         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11937                 u32 val;
11938
11939                 val = *(((u32 *)&test_desc) + i);
11940                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11941                                        sram_dma_descs + (i * sizeof(u32)));
11942                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11943         }
11944         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11945
11946         if (to_device) {
11947                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11948         } else {
11949                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11950         }
11951
11952         ret = -ENODEV;
11953         for (i = 0; i < 40; i++) {
11954                 u32 val;
11955
11956                 if (to_device)
11957                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11958                 else
11959                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11960                 if ((val & 0xffff) == sram_dma_descs) {
11961                         ret = 0;
11962                         break;
11963                 }
11964
11965                 udelay(100);
11966         }
11967
11968         return ret;
11969 }
11970
11971 #define TEST_BUFFER_SIZE        0x2000
11972
11973 static int __devinit tg3_test_dma(struct tg3 *tp)
11974 {
11975         dma_addr_t buf_dma;
11976         u32 *buf, saved_dma_rwctrl;
11977         int ret;
11978
11979         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11980         if (!buf) {
11981                 ret = -ENOMEM;
11982                 goto out_nofree;
11983         }
11984
11985         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11986                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11987
11988         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11989
11990         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11991                 /* DMA read watermark not used on PCIE */
11992                 tp->dma_rwctrl |= 0x00180000;
11993         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11994                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11995                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11996                         tp->dma_rwctrl |= 0x003f0000;
11997                 else
11998                         tp->dma_rwctrl |= 0x003f000f;
11999         } else {
12000                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12001                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12002                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12003                         u32 read_water = 0x7;
12004
12005                         /* If the 5704 is behind the EPB bridge, we can
12006                          * do the less restrictive ONE_DMA workaround for
12007                          * better performance.
12008                          */
12009                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12010                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12011                                 tp->dma_rwctrl |= 0x8000;
12012                         else if (ccval == 0x6 || ccval == 0x7)
12013                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12014
12015                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12016                                 read_water = 4;
12017                         /* Set bit 23 to enable PCIX hw bug fix */
12018                         tp->dma_rwctrl |=
12019                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12020                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12021                                 (1 << 23);
12022                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12023                         /* 5780 always in PCIX mode */
12024                         tp->dma_rwctrl |= 0x00144000;
12025                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12026                         /* 5714 always in PCIX mode */
12027                         tp->dma_rwctrl |= 0x00148000;
12028                 } else {
12029                         tp->dma_rwctrl |= 0x001b000f;
12030                 }
12031         }
12032
12033         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12034             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12035                 tp->dma_rwctrl &= 0xfffffff0;
12036
12037         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12038             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12039                 /* Remove this if it causes problems for some boards. */
12040                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12041
12042                 /* On 5700/5701 chips, we need to set this bit.
12043                  * Otherwise the chip will issue cacheline transactions
12044                  * to streamable DMA memory with not all the byte
12045                  * enables turned on.  This is an error on several
12046                  * RISC PCI controllers, in particular sparc64.
12047                  *
12048                  * On 5703/5704 chips, this bit has been reassigned
12049                  * a different meaning.  In particular, it is used
12050                  * on those chips to enable a PCI-X workaround.
12051                  */
12052                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12053         }
12054
12055         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12056
12057 #if 0
12058         /* Unneeded, already done by tg3_get_invariants.  */
12059         tg3_switch_clocks(tp);
12060 #endif
12061
12062         ret = 0;
12063         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12064             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12065                 goto out;
12066
12067         /* It is best to perform DMA test with maximum write burst size
12068          * to expose the 5700/5701 write DMA bug.
12069          */
12070         saved_dma_rwctrl = tp->dma_rwctrl;
12071         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12072         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12073
12074         while (1) {
12075                 u32 *p = buf, i;
12076
12077                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12078                         p[i] = i;
12079
12080                 /* Send the buffer to the chip. */
12081                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12082                 if (ret) {
12083                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12084                         break;
12085                 }
12086
12087 #if 0
12088                 /* validate data reached card RAM correctly. */
12089                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12090                         u32 val;
12091                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
12092                         if (le32_to_cpu(val) != p[i]) {
12093                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
12094                                 /* ret = -ENODEV here? */
12095                         }
12096                         p[i] = 0;
12097                 }
12098 #endif
12099                 /* Now read it back. */
12100                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12101                 if (ret) {
12102                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12103
12104                         break;
12105                 }
12106
12107                 /* Verify it. */
12108                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12109                         if (p[i] == i)
12110                                 continue;
12111
12112                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12113                             DMA_RWCTRL_WRITE_BNDRY_16) {
12114                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12115                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12116                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12117                                 break;
12118                         } else {
12119                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12120                                 ret = -ENODEV;
12121                                 goto out;
12122                         }
12123                 }
12124
12125                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12126                         /* Success. */
12127                         ret = 0;
12128                         break;
12129                 }
12130         }
12131         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12132             DMA_RWCTRL_WRITE_BNDRY_16) {
12133                 static struct pci_device_id dma_wait_state_chipsets[] = {
12134                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12135                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12136                         { },
12137                 };
12138
12139                 /* DMA test passed without adjusting DMA boundary,
12140                  * now look for chipsets that are known to expose the
12141                  * DMA bug without failing the test.
12142                  */
12143                 if (pci_dev_present(dma_wait_state_chipsets)) {
12144                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12145                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12146                 }
12147                 else
12148                         /* Safe to use the calculated DMA boundary. */
12149                         tp->dma_rwctrl = saved_dma_rwctrl;
12150
12151                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12152         }
12153
12154 out:
12155         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12156 out_nofree:
12157         return ret;
12158 }
12159
12160 static void __devinit tg3_init_link_config(struct tg3 *tp)
12161 {
12162         tp->link_config.advertising =
12163                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12164                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12165                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12166                  ADVERTISED_Autoneg | ADVERTISED_MII);
12167         tp->link_config.speed = SPEED_INVALID;
12168         tp->link_config.duplex = DUPLEX_INVALID;
12169         tp->link_config.autoneg = AUTONEG_ENABLE;
12170         tp->link_config.active_speed = SPEED_INVALID;
12171         tp->link_config.active_duplex = DUPLEX_INVALID;
12172         tp->link_config.phy_is_low_power = 0;
12173         tp->link_config.orig_speed = SPEED_INVALID;
12174         tp->link_config.orig_duplex = DUPLEX_INVALID;
12175         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12176 }
12177
12178 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12179 {
12180         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12181                 tp->bufmgr_config.mbuf_read_dma_low_water =
12182                         DEFAULT_MB_RDMA_LOW_WATER_5705;
12183                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12184                         DEFAULT_MB_MACRX_LOW_WATER_5705;
12185                 tp->bufmgr_config.mbuf_high_water =
12186                         DEFAULT_MB_HIGH_WATER_5705;
12187                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12188                         tp->bufmgr_config.mbuf_mac_rx_low_water =
12189                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
12190                         tp->bufmgr_config.mbuf_high_water =
12191                                 DEFAULT_MB_HIGH_WATER_5906;
12192                 }
12193
12194                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12195                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12196                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12197                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12198                 tp->bufmgr_config.mbuf_high_water_jumbo =
12199                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12200         } else {
12201                 tp->bufmgr_config.mbuf_read_dma_low_water =
12202                         DEFAULT_MB_RDMA_LOW_WATER;
12203                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12204                         DEFAULT_MB_MACRX_LOW_WATER;
12205                 tp->bufmgr_config.mbuf_high_water =
12206                         DEFAULT_MB_HIGH_WATER;
12207
12208                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12209                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12210                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12211                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12212                 tp->bufmgr_config.mbuf_high_water_jumbo =
12213                         DEFAULT_MB_HIGH_WATER_JUMBO;
12214         }
12215
12216         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12217         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12218 }
12219
12220 static char * __devinit tg3_phy_string(struct tg3 *tp)
12221 {
12222         switch (tp->phy_id & PHY_ID_MASK) {
12223         case PHY_ID_BCM5400:    return "5400";
12224         case PHY_ID_BCM5401:    return "5401";
12225         case PHY_ID_BCM5411:    return "5411";
12226         case PHY_ID_BCM5701:    return "5701";
12227         case PHY_ID_BCM5703:    return "5703";
12228         case PHY_ID_BCM5704:    return "5704";
12229         case PHY_ID_BCM5705:    return "5705";
12230         case PHY_ID_BCM5750:    return "5750";
12231         case PHY_ID_BCM5752:    return "5752";
12232         case PHY_ID_BCM5714:    return "5714";
12233         case PHY_ID_BCM5780:    return "5780";
12234         case PHY_ID_BCM5755:    return "5755";
12235         case PHY_ID_BCM5787:    return "5787";
12236         case PHY_ID_BCM5784:    return "5784";
12237         case PHY_ID_BCM5756:    return "5722/5756";
12238         case PHY_ID_BCM5906:    return "5906";
12239         case PHY_ID_BCM5761:    return "5761";
12240         case PHY_ID_BCM8002:    return "8002/serdes";
12241         case 0:                 return "serdes";
12242         default:                return "unknown";
12243         };
12244 }
12245
12246 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12247 {
12248         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12249                 strcpy(str, "PCI Express");
12250                 return str;
12251         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12252                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12253
12254                 strcpy(str, "PCIX:");
12255
12256                 if ((clock_ctrl == 7) ||
12257                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12258                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12259                         strcat(str, "133MHz");
12260                 else if (clock_ctrl == 0)
12261                         strcat(str, "33MHz");
12262                 else if (clock_ctrl == 2)
12263                         strcat(str, "50MHz");
12264                 else if (clock_ctrl == 4)
12265                         strcat(str, "66MHz");
12266                 else if (clock_ctrl == 6)
12267                         strcat(str, "100MHz");
12268         } else {
12269                 strcpy(str, "PCI:");
12270                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12271                         strcat(str, "66MHz");
12272                 else
12273                         strcat(str, "33MHz");
12274         }
12275         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12276                 strcat(str, ":32-bit");
12277         else
12278                 strcat(str, ":64-bit");
12279         return str;
12280 }
12281
12282 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12283 {
12284         struct pci_dev *peer;
12285         unsigned int func, devnr = tp->pdev->devfn & ~7;
12286
12287         for (func = 0; func < 8; func++) {
12288                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12289                 if (peer && peer != tp->pdev)
12290                         break;
12291                 pci_dev_put(peer);
12292         }
12293         /* 5704 can be configured in single-port mode, set peer to
12294          * tp->pdev in that case.
12295          */
12296         if (!peer) {
12297                 peer = tp->pdev;
12298                 return peer;
12299         }
12300
12301         /*
12302          * We don't need to keep the refcount elevated; there's no way
12303          * to remove one half of this device without removing the other
12304          */
12305         pci_dev_put(peer);
12306
12307         return peer;
12308 }
12309
12310 static void __devinit tg3_init_coal(struct tg3 *tp)
12311 {
12312         struct ethtool_coalesce *ec = &tp->coal;
12313
12314         memset(ec, 0, sizeof(*ec));
12315         ec->cmd = ETHTOOL_GCOALESCE;
12316         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12317         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12318         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12319         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12320         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12321         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12322         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12323         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12324         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12325
12326         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12327                                  HOSTCC_MODE_CLRTICK_TXBD)) {
12328                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12329                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12330                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12331                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12332         }
12333
12334         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12335                 ec->rx_coalesce_usecs_irq = 0;
12336                 ec->tx_coalesce_usecs_irq = 0;
12337                 ec->stats_block_coalesce_usecs = 0;
12338         }
12339 }
12340
12341 static int __devinit tg3_init_one(struct pci_dev *pdev,
12342                                   const struct pci_device_id *ent)
12343 {
12344         static int tg3_version_printed = 0;
12345         unsigned long tg3reg_base, tg3reg_len;
12346         struct net_device *dev;
12347         struct tg3 *tp;
12348         int i, err, pm_cap;
12349         char str[40];
12350         u64 dma_mask, persist_dma_mask;
12351
12352         if (tg3_version_printed++ == 0)
12353                 printk(KERN_INFO "%s", version);
12354
12355         err = pci_enable_device(pdev);
12356         if (err) {
12357                 printk(KERN_ERR PFX "Cannot enable PCI device, "
12358                        "aborting.\n");
12359                 return err;
12360         }
12361
12362         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12363                 printk(KERN_ERR PFX "Cannot find proper PCI device "
12364                        "base address, aborting.\n");
12365                 err = -ENODEV;
12366                 goto err_out_disable_pdev;
12367         }
12368
12369         err = pci_request_regions(pdev, DRV_MODULE_NAME);
12370         if (err) {
12371                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12372                        "aborting.\n");
12373                 goto err_out_disable_pdev;
12374         }
12375
12376         pci_set_master(pdev);
12377
12378         /* Find power-management capability. */
12379         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12380         if (pm_cap == 0) {
12381                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12382                        "aborting.\n");
12383                 err = -EIO;
12384                 goto err_out_free_res;
12385         }
12386
12387         tg3reg_base = pci_resource_start(pdev, 0);
12388         tg3reg_len = pci_resource_len(pdev, 0);
12389
12390         dev = alloc_etherdev(sizeof(*tp));
12391         if (!dev) {
12392                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12393                 err = -ENOMEM;
12394                 goto err_out_free_res;
12395         }
12396
12397         SET_NETDEV_DEV(dev, &pdev->dev);
12398
12399 #if TG3_VLAN_TAG_USED
12400         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12401         dev->vlan_rx_register = tg3_vlan_rx_register;
12402 #endif
12403
12404         tp = netdev_priv(dev);
12405         tp->pdev = pdev;
12406         tp->dev = dev;
12407         tp->pm_cap = pm_cap;
12408         tp->mac_mode = TG3_DEF_MAC_MODE;
12409         tp->rx_mode = TG3_DEF_RX_MODE;
12410         tp->tx_mode = TG3_DEF_TX_MODE;
12411         tp->mi_mode = MAC_MI_MODE_BASE;
12412         if (tg3_debug > 0)
12413                 tp->msg_enable = tg3_debug;
12414         else
12415                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12416
12417         /* The word/byte swap controls here control register access byte
12418          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
12419          * setting below.
12420          */
12421         tp->misc_host_ctrl =
12422                 MISC_HOST_CTRL_MASK_PCI_INT |
12423                 MISC_HOST_CTRL_WORD_SWAP |
12424                 MISC_HOST_CTRL_INDIR_ACCESS |
12425                 MISC_HOST_CTRL_PCISTATE_RW;
12426
12427         /* The NONFRM (non-frame) byte/word swap controls take effect
12428          * on descriptor entries, anything which isn't packet data.
12429          *
12430          * The StrongARM chips on the board (one for tx, one for rx)
12431          * are running in big-endian mode.
12432          */
12433         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12434                         GRC_MODE_WSWAP_NONFRM_DATA);
12435 #ifdef __BIG_ENDIAN
12436         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12437 #endif
12438         spin_lock_init(&tp->lock);
12439         spin_lock_init(&tp->indirect_lock);
12440         INIT_WORK(&tp->reset_task, tg3_reset_task);
12441
12442         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
12443         if (!tp->regs) {
12444                 printk(KERN_ERR PFX "Cannot map device registers, "
12445                        "aborting.\n");
12446                 err = -ENOMEM;
12447                 goto err_out_free_dev;
12448         }
12449
12450         tg3_init_link_config(tp);
12451
12452         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12453         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12454         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12455
12456         dev->open = tg3_open;
12457         dev->stop = tg3_close;
12458         dev->get_stats = tg3_get_stats;
12459         dev->set_multicast_list = tg3_set_rx_mode;
12460         dev->set_mac_address = tg3_set_mac_addr;
12461         dev->do_ioctl = tg3_ioctl;
12462         dev->tx_timeout = tg3_tx_timeout;
12463         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
12464         dev->ethtool_ops = &tg3_ethtool_ops;
12465         dev->watchdog_timeo = TG3_TX_TIMEOUT;
12466         dev->change_mtu = tg3_change_mtu;
12467         dev->irq = pdev->irq;
12468 #ifdef CONFIG_NET_POLL_CONTROLLER
12469         dev->poll_controller = tg3_poll_controller;
12470 #endif
12471
12472         err = tg3_get_invariants(tp);
12473         if (err) {
12474                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12475                        "aborting.\n");
12476                 goto err_out_iounmap;
12477         }
12478
12479         /* The EPB bridge inside 5714, 5715, and 5780 and any
12480          * device behind the EPB cannot support DMA addresses > 40-bit.
12481          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12482          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12483          * do DMA address check in tg3_start_xmit().
12484          */
12485         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12486                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12487         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
12488                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12489 #ifdef CONFIG_HIGHMEM
12490                 dma_mask = DMA_64BIT_MASK;
12491 #endif
12492         } else
12493                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12494
12495         /* Configure DMA attributes. */
12496         if (dma_mask > DMA_32BIT_MASK) {
12497                 err = pci_set_dma_mask(pdev, dma_mask);
12498                 if (!err) {
12499                         dev->features |= NETIF_F_HIGHDMA;
12500                         err = pci_set_consistent_dma_mask(pdev,
12501                                                           persist_dma_mask);
12502                         if (err < 0) {
12503                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12504                                        "DMA for consistent allocations\n");
12505                                 goto err_out_iounmap;
12506                         }
12507                 }
12508         }
12509         if (err || dma_mask == DMA_32BIT_MASK) {
12510                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12511                 if (err) {
12512                         printk(KERN_ERR PFX "No usable DMA configuration, "
12513                                "aborting.\n");
12514                         goto err_out_iounmap;
12515                 }
12516         }
12517
12518         tg3_init_bufmgr_config(tp);
12519
12520         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12521                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12522         }
12523         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12524             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12525             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
12526             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12527             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12528                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12529         } else {
12530                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
12531         }
12532
12533         /* TSO is on by default on chips that support hardware TSO.
12534          * Firmware TSO on older chips gives lower performance, so it
12535          * is off by default, but can be enabled using ethtool.
12536          */
12537         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12538                 dev->features |= NETIF_F_TSO;
12539                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12540                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12541                         dev->features |= NETIF_F_TSO6;
12542                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12543                         dev->features |= NETIF_F_TSO_ECN;
12544         }
12545
12546
12547         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12548             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12549             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12550                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12551                 tp->rx_pending = 63;
12552         }
12553
12554         err = tg3_get_device_address(tp);
12555         if (err) {
12556                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12557                        "aborting.\n");
12558                 goto err_out_iounmap;
12559         }
12560
12561         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12562                 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12563                         printk(KERN_ERR PFX "Cannot find proper PCI device "
12564                                "base address for APE, aborting.\n");
12565                         err = -ENODEV;
12566                         goto err_out_iounmap;
12567                 }
12568
12569                 tg3reg_base = pci_resource_start(pdev, 2);
12570                 tg3reg_len = pci_resource_len(pdev, 2);
12571
12572                 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12573                 if (tp->aperegs == 0UL) {
12574                         printk(KERN_ERR PFX "Cannot map APE registers, "
12575                                "aborting.\n");
12576                         err = -ENOMEM;
12577                         goto err_out_iounmap;
12578                 }
12579
12580                 tg3_ape_lock_init(tp);
12581         }
12582
12583         /*
12584          * Reset chip in case UNDI or EFI driver did not shutdown
12585          * DMA self test will enable WDMAC and we'll see (spurious)
12586          * pending DMA on the PCI bus at that point.
12587          */
12588         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12589             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12590                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12591                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12592         }
12593
12594         err = tg3_test_dma(tp);
12595         if (err) {
12596                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12597                 goto err_out_apeunmap;
12598         }
12599
12600         /* Tigon3 can do ipv4 only... and some chips have buggy
12601          * checksumming.
12602          */
12603         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12604                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12605                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12606                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12607                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12608                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12609                         dev->features |= NETIF_F_IPV6_CSUM;
12610
12611                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12612         } else
12613                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12614
12615         /* flow control autonegotiation is default behavior */
12616         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12617
12618         tg3_init_coal(tp);
12619
12620         pci_set_drvdata(pdev, dev);
12621
12622         err = register_netdev(dev);
12623         if (err) {
12624                 printk(KERN_ERR PFX "Cannot register net device, "
12625                        "aborting.\n");
12626                 goto err_out_apeunmap;
12627         }
12628
12629         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
12630                dev->name,
12631                tp->board_part_number,
12632                tp->pci_chip_rev_id,
12633                tg3_phy_string(tp),
12634                tg3_bus_string(tp, str),
12635                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12636                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12637                  "10/100/1000Base-T")));
12638
12639         for (i = 0; i < 6; i++)
12640                 printk("%2.2x%c", dev->dev_addr[i],
12641                        i == 5 ? '\n' : ':');
12642
12643         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
12644                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
12645                dev->name,
12646                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12647                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12648                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12649                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
12650                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12651                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
12652         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12653                dev->name, tp->dma_rwctrl,
12654                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12655                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
12656
12657         return 0;
12658
12659 err_out_apeunmap:
12660         if (tp->aperegs) {
12661                 iounmap(tp->aperegs);
12662                 tp->aperegs = NULL;
12663         }
12664
12665 err_out_iounmap:
12666         if (tp->regs) {
12667                 iounmap(tp->regs);
12668                 tp->regs = NULL;
12669         }
12670
12671 err_out_free_dev:
12672         free_netdev(dev);
12673
12674 err_out_free_res:
12675         pci_release_regions(pdev);
12676
12677 err_out_disable_pdev:
12678         pci_disable_device(pdev);
12679         pci_set_drvdata(pdev, NULL);
12680         return err;
12681 }
12682
12683 static void __devexit tg3_remove_one(struct pci_dev *pdev)
12684 {
12685         struct net_device *dev = pci_get_drvdata(pdev);
12686
12687         if (dev) {
12688                 struct tg3 *tp = netdev_priv(dev);
12689
12690                 flush_scheduled_work();
12691                 unregister_netdev(dev);
12692                 if (tp->aperegs) {
12693                         iounmap(tp->aperegs);
12694                         tp->aperegs = NULL;
12695                 }
12696                 if (tp->regs) {
12697                         iounmap(tp->regs);
12698                         tp->regs = NULL;
12699                 }
12700                 free_netdev(dev);
12701                 pci_release_regions(pdev);
12702                 pci_disable_device(pdev);
12703                 pci_set_drvdata(pdev, NULL);
12704         }
12705 }
12706
12707 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12708 {
12709         struct net_device *dev = pci_get_drvdata(pdev);
12710         struct tg3 *tp = netdev_priv(dev);
12711         int err;
12712
12713         /* PCI register 4 needs to be saved whether netif_running() or not.
12714          * MSI address and data need to be saved if using MSI and
12715          * netif_running().
12716          */
12717         pci_save_state(pdev);
12718
12719         if (!netif_running(dev))
12720                 return 0;
12721
12722         flush_scheduled_work();
12723         tg3_netif_stop(tp);
12724
12725         del_timer_sync(&tp->timer);
12726
12727         tg3_full_lock(tp, 1);
12728         tg3_disable_ints(tp);
12729         tg3_full_unlock(tp);
12730
12731         netif_device_detach(dev);
12732
12733         tg3_full_lock(tp, 0);
12734         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12735         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
12736         tg3_full_unlock(tp);
12737
12738         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12739         if (err) {
12740                 tg3_full_lock(tp, 0);
12741
12742                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12743                 if (tg3_restart_hw(tp, 1))
12744                         goto out;
12745
12746                 tp->timer.expires = jiffies + tp->timer_offset;
12747                 add_timer(&tp->timer);
12748
12749                 netif_device_attach(dev);
12750                 tg3_netif_start(tp);
12751
12752 out:
12753                 tg3_full_unlock(tp);
12754         }
12755
12756         return err;
12757 }
12758
12759 static int tg3_resume(struct pci_dev *pdev)
12760 {
12761         struct net_device *dev = pci_get_drvdata(pdev);
12762         struct tg3 *tp = netdev_priv(dev);
12763         int err;
12764
12765         pci_restore_state(tp->pdev);
12766
12767         if (!netif_running(dev))
12768                 return 0;
12769
12770         err = tg3_set_power_state(tp, PCI_D0);
12771         if (err)
12772                 return err;
12773
12774         netif_device_attach(dev);
12775
12776         tg3_full_lock(tp, 0);
12777
12778         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12779         err = tg3_restart_hw(tp, 1);
12780         if (err)
12781                 goto out;
12782
12783         tp->timer.expires = jiffies + tp->timer_offset;
12784         add_timer(&tp->timer);
12785
12786         tg3_netif_start(tp);
12787
12788 out:
12789         tg3_full_unlock(tp);
12790
12791         return err;
12792 }
12793
12794 static struct pci_driver tg3_driver = {
12795         .name           = DRV_MODULE_NAME,
12796         .id_table       = tg3_pci_tbl,
12797         .probe          = tg3_init_one,
12798         .remove         = __devexit_p(tg3_remove_one),
12799         .suspend        = tg3_suspend,
12800         .resume         = tg3_resume
12801 };
12802
12803 static int __init tg3_init(void)
12804 {
12805         return pci_register_driver(&tg3_driver);
12806 }
12807
12808 static void __exit tg3_cleanup(void)
12809 {
12810         pci_unregister_driver(&tg3_driver);
12811 }
12812
12813 module_init(tg3_init);
12814 module_exit(tg3_cleanup);