]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/tg3.c
Merge branch 'upstream-next-davem' of master.kernel.org:/pub/scm/linux/kernel/git...
[linux-2.6-omap-h63xx.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43
44 #include <net/checksum.h>
45 #include <net/ip.h>
46
47 #include <asm/system.h>
48 #include <asm/io.h>
49 #include <asm/byteorder.h>
50 #include <asm/uaccess.h>
51
52 #ifdef CONFIG_SPARC
53 #include <asm/idprom.h>
54 #include <asm/prom.h>
55 #endif
56
57 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
58 #define TG3_VLAN_TAG_USED 1
59 #else
60 #define TG3_VLAN_TAG_USED 0
61 #endif
62
63 #define TG3_TSO_SUPPORT 1
64
65 #include "tg3.h"
66
67 #define DRV_MODULE_NAME         "tg3"
68 #define PFX DRV_MODULE_NAME     ": "
69 #define DRV_MODULE_VERSION      "3.93"
70 #define DRV_MODULE_RELDATE      "May 22, 2008"
71
72 #define TG3_DEF_MAC_MODE        0
73 #define TG3_DEF_RX_MODE         0
74 #define TG3_DEF_TX_MODE         0
75 #define TG3_DEF_MSG_ENABLE        \
76         (NETIF_MSG_DRV          | \
77          NETIF_MSG_PROBE        | \
78          NETIF_MSG_LINK         | \
79          NETIF_MSG_TIMER        | \
80          NETIF_MSG_IFDOWN       | \
81          NETIF_MSG_IFUP         | \
82          NETIF_MSG_RX_ERR       | \
83          NETIF_MSG_TX_ERR)
84
85 /* length of time before we decide the hardware is borked,
86  * and dev->tx_timeout() should be called to fix the problem
87  */
88 #define TG3_TX_TIMEOUT                  (5 * HZ)
89
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU                     60
92 #define TG3_MAX_MTU(tp) \
93         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
94
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96  * You can't change the ring sizes, but you can change where you place
97  * them in the NIC onboard memory.
98  */
99 #define TG3_RX_RING_SIZE                512
100 #define TG3_DEF_RX_RING_PENDING         200
101 #define TG3_RX_JUMBO_RING_SIZE          256
102 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
103
104 /* Do not place this n-ring entries value into the tp struct itself,
105  * we really want to expose these constants to GCC so that modulo et
106  * al.  operations are done with shifts and masks instead of with
107  * hw multiply/modulo instructions.  Another solution would be to
108  * replace things like '% foo' with '& (foo - 1)'.
109  */
110 #define TG3_RX_RCB_RING_SIZE(tp)        \
111         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
112
113 #define TG3_TX_RING_SIZE                512
114 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
115
116 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_RING_SIZE)
118 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_JUMBO_RING_SIZE)
120 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121                                    TG3_RX_RCB_RING_SIZE(tp))
122 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
123                                  TG3_TX_RING_SIZE)
124 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
125
126 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
127 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
128
129 /* minimum number of free TX descriptors required to wake up TX process */
130 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
131
132 /* number of ETHTOOL_GSTATS u64's */
133 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
134
135 #define TG3_NUM_TEST            6
136
137 static char version[] __devinitdata =
138         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
139
140 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
141 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
142 MODULE_LICENSE("GPL");
143 MODULE_VERSION(DRV_MODULE_VERSION);
144
145 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
146 module_param(tg3_debug, int, 0);
147 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
148
149 static struct pci_device_id tg3_pci_tbl[] = {
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
206         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
207         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
208         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
209         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
210         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
212         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
213         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
214         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
215         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
216         {}
217 };
218
219 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
220
221 static const struct {
222         const char string[ETH_GSTRING_LEN];
223 } ethtool_stats_keys[TG3_NUM_STATS] = {
224         { "rx_octets" },
225         { "rx_fragments" },
226         { "rx_ucast_packets" },
227         { "rx_mcast_packets" },
228         { "rx_bcast_packets" },
229         { "rx_fcs_errors" },
230         { "rx_align_errors" },
231         { "rx_xon_pause_rcvd" },
232         { "rx_xoff_pause_rcvd" },
233         { "rx_mac_ctrl_rcvd" },
234         { "rx_xoff_entered" },
235         { "rx_frame_too_long_errors" },
236         { "rx_jabbers" },
237         { "rx_undersize_packets" },
238         { "rx_in_length_errors" },
239         { "rx_out_length_errors" },
240         { "rx_64_or_less_octet_packets" },
241         { "rx_65_to_127_octet_packets" },
242         { "rx_128_to_255_octet_packets" },
243         { "rx_256_to_511_octet_packets" },
244         { "rx_512_to_1023_octet_packets" },
245         { "rx_1024_to_1522_octet_packets" },
246         { "rx_1523_to_2047_octet_packets" },
247         { "rx_2048_to_4095_octet_packets" },
248         { "rx_4096_to_8191_octet_packets" },
249         { "rx_8192_to_9022_octet_packets" },
250
251         { "tx_octets" },
252         { "tx_collisions" },
253
254         { "tx_xon_sent" },
255         { "tx_xoff_sent" },
256         { "tx_flow_control" },
257         { "tx_mac_errors" },
258         { "tx_single_collisions" },
259         { "tx_mult_collisions" },
260         { "tx_deferred" },
261         { "tx_excessive_collisions" },
262         { "tx_late_collisions" },
263         { "tx_collide_2times" },
264         { "tx_collide_3times" },
265         { "tx_collide_4times" },
266         { "tx_collide_5times" },
267         { "tx_collide_6times" },
268         { "tx_collide_7times" },
269         { "tx_collide_8times" },
270         { "tx_collide_9times" },
271         { "tx_collide_10times" },
272         { "tx_collide_11times" },
273         { "tx_collide_12times" },
274         { "tx_collide_13times" },
275         { "tx_collide_14times" },
276         { "tx_collide_15times" },
277         { "tx_ucast_packets" },
278         { "tx_mcast_packets" },
279         { "tx_bcast_packets" },
280         { "tx_carrier_sense_errors" },
281         { "tx_discards" },
282         { "tx_errors" },
283
284         { "dma_writeq_full" },
285         { "dma_write_prioq_full" },
286         { "rxbds_empty" },
287         { "rx_discards" },
288         { "rx_errors" },
289         { "rx_threshold_hit" },
290
291         { "dma_readq_full" },
292         { "dma_read_prioq_full" },
293         { "tx_comp_queue_full" },
294
295         { "ring_set_send_prod_index" },
296         { "ring_status_update" },
297         { "nic_irqs" },
298         { "nic_avoided_irqs" },
299         { "nic_tx_threshold_hit" }
300 };
301
302 static const struct {
303         const char string[ETH_GSTRING_LEN];
304 } ethtool_test_keys[TG3_NUM_TEST] = {
305         { "nvram test     (online) " },
306         { "link test      (online) " },
307         { "register test  (offline)" },
308         { "memory test    (offline)" },
309         { "loopback test  (offline)" },
310         { "interrupt test (offline)" },
311 };
312
313 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
314 {
315         writel(val, tp->regs + off);
316 }
317
318 static u32 tg3_read32(struct tg3 *tp, u32 off)
319 {
320         return (readl(tp->regs + off));
321 }
322
323 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
324 {
325         writel(val, tp->aperegs + off);
326 }
327
328 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
329 {
330         return (readl(tp->aperegs + off));
331 }
332
333 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
334 {
335         unsigned long flags;
336
337         spin_lock_irqsave(&tp->indirect_lock, flags);
338         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
339         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
340         spin_unlock_irqrestore(&tp->indirect_lock, flags);
341 }
342
343 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
344 {
345         writel(val, tp->regs + off);
346         readl(tp->regs + off);
347 }
348
349 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
350 {
351         unsigned long flags;
352         u32 val;
353
354         spin_lock_irqsave(&tp->indirect_lock, flags);
355         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
356         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
357         spin_unlock_irqrestore(&tp->indirect_lock, flags);
358         return val;
359 }
360
361 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
362 {
363         unsigned long flags;
364
365         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
366                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
367                                        TG3_64BIT_REG_LOW, val);
368                 return;
369         }
370         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
371                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
372                                        TG3_64BIT_REG_LOW, val);
373                 return;
374         }
375
376         spin_lock_irqsave(&tp->indirect_lock, flags);
377         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
378         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
379         spin_unlock_irqrestore(&tp->indirect_lock, flags);
380
381         /* In indirect mode when disabling interrupts, we also need
382          * to clear the interrupt bit in the GRC local ctrl register.
383          */
384         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
385             (val == 0x1)) {
386                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
387                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
388         }
389 }
390
391 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
392 {
393         unsigned long flags;
394         u32 val;
395
396         spin_lock_irqsave(&tp->indirect_lock, flags);
397         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
398         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
399         spin_unlock_irqrestore(&tp->indirect_lock, flags);
400         return val;
401 }
402
403 /* usec_wait specifies the wait time in usec when writing to certain registers
404  * where it is unsafe to read back the register without some delay.
405  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
406  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
407  */
408 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
409 {
410         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
411             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
412                 /* Non-posted methods */
413                 tp->write32(tp, off, val);
414         else {
415                 /* Posted method */
416                 tg3_write32(tp, off, val);
417                 if (usec_wait)
418                         udelay(usec_wait);
419                 tp->read32(tp, off);
420         }
421         /* Wait again after the read for the posted method to guarantee that
422          * the wait time is met.
423          */
424         if (usec_wait)
425                 udelay(usec_wait);
426 }
427
428 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
429 {
430         tp->write32_mbox(tp, off, val);
431         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
432             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
433                 tp->read32_mbox(tp, off);
434 }
435
436 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
437 {
438         void __iomem *mbox = tp->regs + off;
439         writel(val, mbox);
440         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
441                 writel(val, mbox);
442         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
443                 readl(mbox);
444 }
445
446 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
447 {
448         return (readl(tp->regs + off + GRCMBOX_BASE));
449 }
450
451 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
452 {
453         writel(val, tp->regs + off + GRCMBOX_BASE);
454 }
455
456 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
457 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
458 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
459 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
460 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
461
462 #define tw32(reg,val)           tp->write32(tp, reg, val)
463 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
464 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
465 #define tr32(reg)               tp->read32(tp, reg)
466
467 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
468 {
469         unsigned long flags;
470
471         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
472             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
473                 return;
474
475         spin_lock_irqsave(&tp->indirect_lock, flags);
476         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
477                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
478                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
479
480                 /* Always leave this as zero. */
481                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
482         } else {
483                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
484                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
485
486                 /* Always leave this as zero. */
487                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
488         }
489         spin_unlock_irqrestore(&tp->indirect_lock, flags);
490 }
491
492 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
493 {
494         unsigned long flags;
495
496         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
497             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
498                 *val = 0;
499                 return;
500         }
501
502         spin_lock_irqsave(&tp->indirect_lock, flags);
503         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
504                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
505                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
506
507                 /* Always leave this as zero. */
508                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
509         } else {
510                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
511                 *val = tr32(TG3PCI_MEM_WIN_DATA);
512
513                 /* Always leave this as zero. */
514                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
515         }
516         spin_unlock_irqrestore(&tp->indirect_lock, flags);
517 }
518
519 static void tg3_ape_lock_init(struct tg3 *tp)
520 {
521         int i;
522
523         /* Make sure the driver hasn't any stale locks. */
524         for (i = 0; i < 8; i++)
525                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
526                                 APE_LOCK_GRANT_DRIVER);
527 }
528
529 static int tg3_ape_lock(struct tg3 *tp, int locknum)
530 {
531         int i, off;
532         int ret = 0;
533         u32 status;
534
535         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
536                 return 0;
537
538         switch (locknum) {
539                 case TG3_APE_LOCK_MEM:
540                         break;
541                 default:
542                         return -EINVAL;
543         }
544
545         off = 4 * locknum;
546
547         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
548
549         /* Wait for up to 1 millisecond to acquire lock. */
550         for (i = 0; i < 100; i++) {
551                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
552                 if (status == APE_LOCK_GRANT_DRIVER)
553                         break;
554                 udelay(10);
555         }
556
557         if (status != APE_LOCK_GRANT_DRIVER) {
558                 /* Revoke the lock request. */
559                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
560                                 APE_LOCK_GRANT_DRIVER);
561
562                 ret = -EBUSY;
563         }
564
565         return ret;
566 }
567
568 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
569 {
570         int off;
571
572         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
573                 return;
574
575         switch (locknum) {
576                 case TG3_APE_LOCK_MEM:
577                         break;
578                 default:
579                         return;
580         }
581
582         off = 4 * locknum;
583         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
584 }
585
586 static void tg3_disable_ints(struct tg3 *tp)
587 {
588         tw32(TG3PCI_MISC_HOST_CTRL,
589              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
590         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
591 }
592
593 static inline void tg3_cond_int(struct tg3 *tp)
594 {
595         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
596             (tp->hw_status->status & SD_STATUS_UPDATED))
597                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
598         else
599                 tw32(HOSTCC_MODE, tp->coalesce_mode |
600                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
601 }
602
603 static void tg3_enable_ints(struct tg3 *tp)
604 {
605         tp->irq_sync = 0;
606         wmb();
607
608         tw32(TG3PCI_MISC_HOST_CTRL,
609              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
610         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611                        (tp->last_tag << 24));
612         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
613                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
614                                (tp->last_tag << 24));
615         tg3_cond_int(tp);
616 }
617
618 static inline unsigned int tg3_has_work(struct tg3 *tp)
619 {
620         struct tg3_hw_status *sblk = tp->hw_status;
621         unsigned int work_exists = 0;
622
623         /* check for phy events */
624         if (!(tp->tg3_flags &
625               (TG3_FLAG_USE_LINKCHG_REG |
626                TG3_FLAG_POLL_SERDES))) {
627                 if (sblk->status & SD_STATUS_LINK_CHG)
628                         work_exists = 1;
629         }
630         /* check for RX/TX work to do */
631         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
632             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
633                 work_exists = 1;
634
635         return work_exists;
636 }
637
638 /* tg3_restart_ints
639  *  similar to tg3_enable_ints, but it accurately determines whether there
640  *  is new work pending and can return without flushing the PIO write
641  *  which reenables interrupts
642  */
643 static void tg3_restart_ints(struct tg3 *tp)
644 {
645         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
646                      tp->last_tag << 24);
647         mmiowb();
648
649         /* When doing tagged status, this work check is unnecessary.
650          * The last_tag we write above tells the chip which piece of
651          * work we've completed.
652          */
653         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
654             tg3_has_work(tp))
655                 tw32(HOSTCC_MODE, tp->coalesce_mode |
656                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
657 }
658
659 static inline void tg3_netif_stop(struct tg3 *tp)
660 {
661         tp->dev->trans_start = jiffies; /* prevent tx timeout */
662         napi_disable(&tp->napi);
663         netif_tx_disable(tp->dev);
664 }
665
666 static inline void tg3_netif_start(struct tg3 *tp)
667 {
668         netif_wake_queue(tp->dev);
669         /* NOTE: unconditional netif_wake_queue is only appropriate
670          * so long as all callers are assured to have free tx slots
671          * (such as after tg3_init_hw)
672          */
673         napi_enable(&tp->napi);
674         tp->hw_status->status |= SD_STATUS_UPDATED;
675         tg3_enable_ints(tp);
676 }
677
678 static void tg3_switch_clocks(struct tg3 *tp)
679 {
680         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
681         u32 orig_clock_ctrl;
682
683         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
684             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
685                 return;
686
687         orig_clock_ctrl = clock_ctrl;
688         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
689                        CLOCK_CTRL_CLKRUN_OENABLE |
690                        0x1f);
691         tp->pci_clock_ctrl = clock_ctrl;
692
693         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
694                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
695                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
696                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
697                 }
698         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
699                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
700                             clock_ctrl |
701                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
702                             40);
703                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
704                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
705                             40);
706         }
707         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
708 }
709
710 #define PHY_BUSY_LOOPS  5000
711
712 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
713 {
714         u32 frame_val;
715         unsigned int loops;
716         int ret;
717
718         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
719                 tw32_f(MAC_MI_MODE,
720                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
721                 udelay(80);
722         }
723
724         *val = 0x0;
725
726         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
727                       MI_COM_PHY_ADDR_MASK);
728         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
729                       MI_COM_REG_ADDR_MASK);
730         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
731
732         tw32_f(MAC_MI_COM, frame_val);
733
734         loops = PHY_BUSY_LOOPS;
735         while (loops != 0) {
736                 udelay(10);
737                 frame_val = tr32(MAC_MI_COM);
738
739                 if ((frame_val & MI_COM_BUSY) == 0) {
740                         udelay(5);
741                         frame_val = tr32(MAC_MI_COM);
742                         break;
743                 }
744                 loops -= 1;
745         }
746
747         ret = -EBUSY;
748         if (loops != 0) {
749                 *val = frame_val & MI_COM_DATA_MASK;
750                 ret = 0;
751         }
752
753         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
754                 tw32_f(MAC_MI_MODE, tp->mi_mode);
755                 udelay(80);
756         }
757
758         return ret;
759 }
760
761 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
762 {
763         u32 frame_val;
764         unsigned int loops;
765         int ret;
766
767         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
768             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
769                 return 0;
770
771         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
772                 tw32_f(MAC_MI_MODE,
773                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
774                 udelay(80);
775         }
776
777         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
778                       MI_COM_PHY_ADDR_MASK);
779         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
780                       MI_COM_REG_ADDR_MASK);
781         frame_val |= (val & MI_COM_DATA_MASK);
782         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
783
784         tw32_f(MAC_MI_COM, frame_val);
785
786         loops = PHY_BUSY_LOOPS;
787         while (loops != 0) {
788                 udelay(10);
789                 frame_val = tr32(MAC_MI_COM);
790                 if ((frame_val & MI_COM_BUSY) == 0) {
791                         udelay(5);
792                         frame_val = tr32(MAC_MI_COM);
793                         break;
794                 }
795                 loops -= 1;
796         }
797
798         ret = -EBUSY;
799         if (loops != 0)
800                 ret = 0;
801
802         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
803                 tw32_f(MAC_MI_MODE, tp->mi_mode);
804                 udelay(80);
805         }
806
807         return ret;
808 }
809
810 static int tg3_bmcr_reset(struct tg3 *tp)
811 {
812         u32 phy_control;
813         int limit, err;
814
815         /* OK, reset it, and poll the BMCR_RESET bit until it
816          * clears or we time out.
817          */
818         phy_control = BMCR_RESET;
819         err = tg3_writephy(tp, MII_BMCR, phy_control);
820         if (err != 0)
821                 return -EBUSY;
822
823         limit = 5000;
824         while (limit--) {
825                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
826                 if (err != 0)
827                         return -EBUSY;
828
829                 if ((phy_control & BMCR_RESET) == 0) {
830                         udelay(40);
831                         break;
832                 }
833                 udelay(10);
834         }
835         if (limit <= 0)
836                 return -EBUSY;
837
838         return 0;
839 }
840
841 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
842 {
843         struct tg3 *tp = (struct tg3 *)bp->priv;
844         u32 val;
845
846         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
847                 return -EAGAIN;
848
849         if (tg3_readphy(tp, reg, &val))
850                 return -EIO;
851
852         return val;
853 }
854
855 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
856 {
857         struct tg3 *tp = (struct tg3 *)bp->priv;
858
859         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
860                 return -EAGAIN;
861
862         if (tg3_writephy(tp, reg, val))
863                 return -EIO;
864
865         return 0;
866 }
867
868 static int tg3_mdio_reset(struct mii_bus *bp)
869 {
870         return 0;
871 }
872
873 static void tg3_mdio_config(struct tg3 *tp)
874 {
875         u32 val;
876
877         if (tp->mdio_bus.phy_map[PHY_ADDR]->interface !=
878             PHY_INTERFACE_MODE_RGMII)
879                 return;
880
881         val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
882                                     MAC_PHYCFG1_RGMII_SND_STAT_EN);
883         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
884                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
885                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
886                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
887                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
888         }
889         tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
890
891         val = tr32(MAC_PHYCFG2) & ~(MAC_PHYCFG2_INBAND_ENABLE);
892         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
893                 val |= MAC_PHYCFG2_INBAND_ENABLE;
894         tw32(MAC_PHYCFG2, val);
895
896         val = tr32(MAC_EXT_RGMII_MODE);
897         val &= ~(MAC_RGMII_MODE_RX_INT_B |
898                  MAC_RGMII_MODE_RX_QUALITY |
899                  MAC_RGMII_MODE_RX_ACTIVITY |
900                  MAC_RGMII_MODE_RX_ENG_DET |
901                  MAC_RGMII_MODE_TX_ENABLE |
902                  MAC_RGMII_MODE_TX_LOWPWR |
903                  MAC_RGMII_MODE_TX_RESET);
904         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
905                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
906                         val |= MAC_RGMII_MODE_RX_INT_B |
907                                MAC_RGMII_MODE_RX_QUALITY |
908                                MAC_RGMII_MODE_RX_ACTIVITY |
909                                MAC_RGMII_MODE_RX_ENG_DET;
910                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
911                         val |= MAC_RGMII_MODE_TX_ENABLE |
912                                MAC_RGMII_MODE_TX_LOWPWR |
913                                MAC_RGMII_MODE_TX_RESET;
914         }
915         tw32(MAC_EXT_RGMII_MODE, val);
916 }
917
918 static void tg3_mdio_start(struct tg3 *tp)
919 {
920         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
921                 mutex_lock(&tp->mdio_bus.mdio_lock);
922                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
923                 mutex_unlock(&tp->mdio_bus.mdio_lock);
924         }
925
926         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
927         tw32_f(MAC_MI_MODE, tp->mi_mode);
928         udelay(80);
929
930         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)
931                 tg3_mdio_config(tp);
932 }
933
934 static void tg3_mdio_stop(struct tg3 *tp)
935 {
936         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
937                 mutex_lock(&tp->mdio_bus.mdio_lock);
938                 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
939                 mutex_unlock(&tp->mdio_bus.mdio_lock);
940         }
941 }
942
943 static int tg3_mdio_init(struct tg3 *tp)
944 {
945         int i;
946         u32 reg;
947         struct phy_device *phydev;
948         struct mii_bus *mdio_bus = &tp->mdio_bus;
949
950         tg3_mdio_start(tp);
951
952         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
953             (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
954                 return 0;
955
956         memset(mdio_bus, 0, sizeof(*mdio_bus));
957
958         mdio_bus->name     = "tg3 mdio bus";
959         snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%x",
960                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
961         mdio_bus->priv     = tp;
962         mdio_bus->dev      = &tp->pdev->dev;
963         mdio_bus->read     = &tg3_mdio_read;
964         mdio_bus->write    = &tg3_mdio_write;
965         mdio_bus->reset    = &tg3_mdio_reset;
966         mdio_bus->phy_mask = ~(1 << PHY_ADDR);
967         mdio_bus->irq      = &tp->mdio_irq[0];
968
969         for (i = 0; i < PHY_MAX_ADDR; i++)
970                 mdio_bus->irq[i] = PHY_POLL;
971
972         /* The bus registration will look for all the PHYs on the mdio bus.
973          * Unfortunately, it does not ensure the PHY is powered up before
974          * accessing the PHY ID registers.  A chip reset is the
975          * quickest way to bring the device back to an operational state..
976          */
977         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
978                 tg3_bmcr_reset(tp);
979
980         i = mdiobus_register(mdio_bus);
981         if (i) {
982                 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
983                         tp->dev->name, i);
984                 return i;
985         }
986
987         tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
988
989         phydev = tp->mdio_bus.phy_map[PHY_ADDR];
990
991         switch (phydev->phy_id) {
992         case TG3_PHY_ID_BCM50610:
993                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
994                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
995                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
996                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
997                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
998                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
999                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1000                 break;
1001         case TG3_PHY_ID_BCMAC131:
1002                 phydev->interface = PHY_INTERFACE_MODE_MII;
1003                 break;
1004         }
1005
1006         tg3_mdio_config(tp);
1007
1008         return 0;
1009 }
1010
1011 static void tg3_mdio_fini(struct tg3 *tp)
1012 {
1013         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1014                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1015                 mdiobus_unregister(&tp->mdio_bus);
1016                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1017         }
1018 }
1019
1020 /* tp->lock is held. */
1021 static void tg3_wait_for_event_ack(struct tg3 *tp)
1022 {
1023         int i;
1024
1025         /* Wait for up to 2.5 milliseconds */
1026         for (i = 0; i < 250000; i++) {
1027                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1028                         break;
1029                 udelay(10);
1030         }
1031 }
1032
1033 /* tp->lock is held. */
1034 static void tg3_ump_link_report(struct tg3 *tp)
1035 {
1036         u32 reg;
1037         u32 val;
1038
1039         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1040             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1041                 return;
1042
1043         tg3_wait_for_event_ack(tp);
1044
1045         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1046
1047         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1048
1049         val = 0;
1050         if (!tg3_readphy(tp, MII_BMCR, &reg))
1051                 val = reg << 16;
1052         if (!tg3_readphy(tp, MII_BMSR, &reg))
1053                 val |= (reg & 0xffff);
1054         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1055
1056         val = 0;
1057         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1058                 val = reg << 16;
1059         if (!tg3_readphy(tp, MII_LPA, &reg))
1060                 val |= (reg & 0xffff);
1061         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1062
1063         val = 0;
1064         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1065                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1066                         val = reg << 16;
1067                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1068                         val |= (reg & 0xffff);
1069         }
1070         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1071
1072         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1073                 val = reg << 16;
1074         else
1075                 val = 0;
1076         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1077
1078         val = tr32(GRC_RX_CPU_EVENT);
1079         val |= GRC_RX_CPU_DRIVER_EVENT;
1080         tw32_f(GRC_RX_CPU_EVENT, val);
1081 }
1082
1083 static void tg3_link_report(struct tg3 *tp)
1084 {
1085         if (!netif_carrier_ok(tp->dev)) {
1086                 if (netif_msg_link(tp))
1087                         printk(KERN_INFO PFX "%s: Link is down.\n",
1088                                tp->dev->name);
1089                 tg3_ump_link_report(tp);
1090         } else if (netif_msg_link(tp)) {
1091                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1092                        tp->dev->name,
1093                        (tp->link_config.active_speed == SPEED_1000 ?
1094                         1000 :
1095                         (tp->link_config.active_speed == SPEED_100 ?
1096                          100 : 10)),
1097                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1098                         "full" : "half"));
1099
1100                 printk(KERN_INFO PFX
1101                        "%s: Flow control is %s for TX and %s for RX.\n",
1102                        tp->dev->name,
1103                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1104                        "on" : "off",
1105                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1106                        "on" : "off");
1107                 tg3_ump_link_report(tp);
1108         }
1109 }
1110
1111 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1112 {
1113         u16 miireg;
1114
1115         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1116                 miireg = ADVERTISE_PAUSE_CAP;
1117         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1118                 miireg = ADVERTISE_PAUSE_ASYM;
1119         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1120                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1121         else
1122                 miireg = 0;
1123
1124         return miireg;
1125 }
1126
1127 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1128 {
1129         u16 miireg;
1130
1131         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1132                 miireg = ADVERTISE_1000XPAUSE;
1133         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1134                 miireg = ADVERTISE_1000XPSE_ASYM;
1135         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1136                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1137         else
1138                 miireg = 0;
1139
1140         return miireg;
1141 }
1142
1143 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1144 {
1145         u8 cap = 0;
1146
1147         if (lcladv & ADVERTISE_PAUSE_CAP) {
1148                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1149                         if (rmtadv & LPA_PAUSE_CAP)
1150                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1151                         else if (rmtadv & LPA_PAUSE_ASYM)
1152                                 cap = TG3_FLOW_CTRL_RX;
1153                 } else {
1154                         if (rmtadv & LPA_PAUSE_CAP)
1155                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1156                 }
1157         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1158                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1159                         cap = TG3_FLOW_CTRL_TX;
1160         }
1161
1162         return cap;
1163 }
1164
1165 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1166 {
1167         u8 cap = 0;
1168
1169         if (lcladv & ADVERTISE_1000XPAUSE) {
1170                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1171                         if (rmtadv & LPA_1000XPAUSE)
1172                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1173                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1174                                 cap = TG3_FLOW_CTRL_RX;
1175                 } else {
1176                         if (rmtadv & LPA_1000XPAUSE)
1177                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1178                 }
1179         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1180                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1181                         cap = TG3_FLOW_CTRL_TX;
1182         }
1183
1184         return cap;
1185 }
1186
1187 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1188 {
1189         u8 autoneg;
1190         u8 flowctrl = 0;
1191         u32 old_rx_mode = tp->rx_mode;
1192         u32 old_tx_mode = tp->tx_mode;
1193
1194         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1195                 autoneg = tp->mdio_bus.phy_map[PHY_ADDR]->autoneg;
1196         else
1197                 autoneg = tp->link_config.autoneg;
1198
1199         if (autoneg == AUTONEG_ENABLE &&
1200             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1201                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1202                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1203                 else
1204                         flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1205         } else
1206                 flowctrl = tp->link_config.flowctrl;
1207
1208         tp->link_config.active_flowctrl = flowctrl;
1209
1210         if (flowctrl & TG3_FLOW_CTRL_RX)
1211                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1212         else
1213                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1214
1215         if (old_rx_mode != tp->rx_mode)
1216                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1217
1218         if (flowctrl & TG3_FLOW_CTRL_TX)
1219                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1220         else
1221                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1222
1223         if (old_tx_mode != tp->tx_mode)
1224                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1225 }
1226
1227 static void tg3_adjust_link(struct net_device *dev)
1228 {
1229         u8 oldflowctrl, linkmesg = 0;
1230         u32 mac_mode, lcl_adv, rmt_adv;
1231         struct tg3 *tp = netdev_priv(dev);
1232         struct phy_device *phydev = tp->mdio_bus.phy_map[PHY_ADDR];
1233
1234         spin_lock(&tp->lock);
1235
1236         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1237                                     MAC_MODE_HALF_DUPLEX);
1238
1239         oldflowctrl = tp->link_config.active_flowctrl;
1240
1241         if (phydev->link) {
1242                 lcl_adv = 0;
1243                 rmt_adv = 0;
1244
1245                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1246                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1247                 else
1248                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1249
1250                 if (phydev->duplex == DUPLEX_HALF)
1251                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1252                 else {
1253                         lcl_adv = tg3_advert_flowctrl_1000T(
1254                                   tp->link_config.flowctrl);
1255
1256                         if (phydev->pause)
1257                                 rmt_adv = LPA_PAUSE_CAP;
1258                         if (phydev->asym_pause)
1259                                 rmt_adv |= LPA_PAUSE_ASYM;
1260                 }
1261
1262                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1263         } else
1264                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1265
1266         if (mac_mode != tp->mac_mode) {
1267                 tp->mac_mode = mac_mode;
1268                 tw32_f(MAC_MODE, tp->mac_mode);
1269                 udelay(40);
1270         }
1271
1272         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1273                 tw32(MAC_TX_LENGTHS,
1274                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1275                       (6 << TX_LENGTHS_IPG_SHIFT) |
1276                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1277         else
1278                 tw32(MAC_TX_LENGTHS,
1279                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1280                       (6 << TX_LENGTHS_IPG_SHIFT) |
1281                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1282
1283         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1284             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1285             phydev->speed != tp->link_config.active_speed ||
1286             phydev->duplex != tp->link_config.active_duplex ||
1287             oldflowctrl != tp->link_config.active_flowctrl)
1288             linkmesg = 1;
1289
1290         tp->link_config.active_speed = phydev->speed;
1291         tp->link_config.active_duplex = phydev->duplex;
1292
1293         spin_unlock(&tp->lock);
1294
1295         if (linkmesg)
1296                 tg3_link_report(tp);
1297 }
1298
1299 static int tg3_phy_init(struct tg3 *tp)
1300 {
1301         struct phy_device *phydev;
1302
1303         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1304                 return 0;
1305
1306         /* Bring the PHY back to a known state. */
1307         tg3_bmcr_reset(tp);
1308
1309         phydev = tp->mdio_bus.phy_map[PHY_ADDR];
1310
1311         /* Attach the MAC to the PHY. */
1312         phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
1313                              phydev->dev_flags, phydev->interface);
1314         if (IS_ERR(phydev)) {
1315                 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1316                 return PTR_ERR(phydev);
1317         }
1318
1319         tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1320
1321         /* Mask with MAC supported features. */
1322         phydev->supported &= (PHY_GBIT_FEATURES |
1323                               SUPPORTED_Pause |
1324                               SUPPORTED_Asym_Pause);
1325
1326         phydev->advertising = phydev->supported;
1327
1328         printk(KERN_INFO
1329                "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
1330                tp->dev->name, phydev->drv->name, phydev->dev.bus_id);
1331
1332         return 0;
1333 }
1334
1335 static void tg3_phy_start(struct tg3 *tp)
1336 {
1337         struct phy_device *phydev;
1338
1339         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1340                 return;
1341
1342         phydev = tp->mdio_bus.phy_map[PHY_ADDR];
1343
1344         if (tp->link_config.phy_is_low_power) {
1345                 tp->link_config.phy_is_low_power = 0;
1346                 phydev->speed = tp->link_config.orig_speed;
1347                 phydev->duplex = tp->link_config.orig_duplex;
1348                 phydev->autoneg = tp->link_config.orig_autoneg;
1349                 phydev->advertising = tp->link_config.orig_advertising;
1350         }
1351
1352         phy_start(phydev);
1353
1354         phy_start_aneg(phydev);
1355 }
1356
1357 static void tg3_phy_stop(struct tg3 *tp)
1358 {
1359         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1360                 return;
1361
1362         phy_stop(tp->mdio_bus.phy_map[PHY_ADDR]);
1363 }
1364
1365 static void tg3_phy_fini(struct tg3 *tp)
1366 {
1367         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1368                 phy_disconnect(tp->mdio_bus.phy_map[PHY_ADDR]);
1369                 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1370         }
1371 }
1372
1373 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1374 {
1375         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1376         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1377 }
1378
1379 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1380 {
1381         u32 phy;
1382
1383         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1384             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1385                 return;
1386
1387         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1388                 u32 ephy;
1389
1390                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1391                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1392                                      ephy | MII_TG3_EPHY_SHADOW_EN);
1393                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1394                                 if (enable)
1395                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1396                                 else
1397                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1398                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1399                         }
1400                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1401                 }
1402         } else {
1403                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1404                       MII_TG3_AUXCTL_SHDWSEL_MISC;
1405                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1406                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1407                         if (enable)
1408                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1409                         else
1410                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1411                         phy |= MII_TG3_AUXCTL_MISC_WREN;
1412                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1413                 }
1414         }
1415 }
1416
1417 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1418 {
1419         u32 val;
1420
1421         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1422                 return;
1423
1424         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1425             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1426                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1427                              (val | (1 << 15) | (1 << 4)));
1428 }
1429
1430 static void tg3_phy_apply_otp(struct tg3 *tp)
1431 {
1432         u32 otp, phy;
1433
1434         if (!tp->phy_otp)
1435                 return;
1436
1437         otp = tp->phy_otp;
1438
1439         /* Enable SM_DSP clock and tx 6dB coding. */
1440         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1441               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1442               MII_TG3_AUXCTL_ACTL_TX_6DB;
1443         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1444
1445         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1446         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1447         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1448
1449         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1450               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1451         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1452
1453         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1454         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1455         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1456
1457         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1458         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1459
1460         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1461         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1462
1463         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1464               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1465         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1466
1467         /* Turn off SM_DSP clock. */
1468         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1469               MII_TG3_AUXCTL_ACTL_TX_6DB;
1470         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1471 }
1472
1473 static int tg3_wait_macro_done(struct tg3 *tp)
1474 {
1475         int limit = 100;
1476
1477         while (limit--) {
1478                 u32 tmp32;
1479
1480                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1481                         if ((tmp32 & 0x1000) == 0)
1482                                 break;
1483                 }
1484         }
1485         if (limit <= 0)
1486                 return -EBUSY;
1487
1488         return 0;
1489 }
1490
1491 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1492 {
1493         static const u32 test_pat[4][6] = {
1494         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1495         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1496         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1497         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1498         };
1499         int chan;
1500
1501         for (chan = 0; chan < 4; chan++) {
1502                 int i;
1503
1504                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1505                              (chan * 0x2000) | 0x0200);
1506                 tg3_writephy(tp, 0x16, 0x0002);
1507
1508                 for (i = 0; i < 6; i++)
1509                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1510                                      test_pat[chan][i]);
1511
1512                 tg3_writephy(tp, 0x16, 0x0202);
1513                 if (tg3_wait_macro_done(tp)) {
1514                         *resetp = 1;
1515                         return -EBUSY;
1516                 }
1517
1518                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1519                              (chan * 0x2000) | 0x0200);
1520                 tg3_writephy(tp, 0x16, 0x0082);
1521                 if (tg3_wait_macro_done(tp)) {
1522                         *resetp = 1;
1523                         return -EBUSY;
1524                 }
1525
1526                 tg3_writephy(tp, 0x16, 0x0802);
1527                 if (tg3_wait_macro_done(tp)) {
1528                         *resetp = 1;
1529                         return -EBUSY;
1530                 }
1531
1532                 for (i = 0; i < 6; i += 2) {
1533                         u32 low, high;
1534
1535                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1536                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1537                             tg3_wait_macro_done(tp)) {
1538                                 *resetp = 1;
1539                                 return -EBUSY;
1540                         }
1541                         low &= 0x7fff;
1542                         high &= 0x000f;
1543                         if (low != test_pat[chan][i] ||
1544                             high != test_pat[chan][i+1]) {
1545                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1546                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1547                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1548
1549                                 return -EBUSY;
1550                         }
1551                 }
1552         }
1553
1554         return 0;
1555 }
1556
1557 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1558 {
1559         int chan;
1560
1561         for (chan = 0; chan < 4; chan++) {
1562                 int i;
1563
1564                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1565                              (chan * 0x2000) | 0x0200);
1566                 tg3_writephy(tp, 0x16, 0x0002);
1567                 for (i = 0; i < 6; i++)
1568                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1569                 tg3_writephy(tp, 0x16, 0x0202);
1570                 if (tg3_wait_macro_done(tp))
1571                         return -EBUSY;
1572         }
1573
1574         return 0;
1575 }
1576
1577 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1578 {
1579         u32 reg32, phy9_orig;
1580         int retries, do_phy_reset, err;
1581
1582         retries = 10;
1583         do_phy_reset = 1;
1584         do {
1585                 if (do_phy_reset) {
1586                         err = tg3_bmcr_reset(tp);
1587                         if (err)
1588                                 return err;
1589                         do_phy_reset = 0;
1590                 }
1591
1592                 /* Disable transmitter and interrupt.  */
1593                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1594                         continue;
1595
1596                 reg32 |= 0x3000;
1597                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1598
1599                 /* Set full-duplex, 1000 mbps.  */
1600                 tg3_writephy(tp, MII_BMCR,
1601                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1602
1603                 /* Set to master mode.  */
1604                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1605                         continue;
1606
1607                 tg3_writephy(tp, MII_TG3_CTRL,
1608                              (MII_TG3_CTRL_AS_MASTER |
1609                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1610
1611                 /* Enable SM_DSP_CLOCK and 6dB.  */
1612                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1613
1614                 /* Block the PHY control access.  */
1615                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1616                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1617
1618                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1619                 if (!err)
1620                         break;
1621         } while (--retries);
1622
1623         err = tg3_phy_reset_chanpat(tp);
1624         if (err)
1625                 return err;
1626
1627         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1628         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1629
1630         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1631         tg3_writephy(tp, 0x16, 0x0000);
1632
1633         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1634             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1635                 /* Set Extended packet length bit for jumbo frames */
1636                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1637         }
1638         else {
1639                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1640         }
1641
1642         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1643
1644         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1645                 reg32 &= ~0x3000;
1646                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1647         } else if (!err)
1648                 err = -EBUSY;
1649
1650         return err;
1651 }
1652
1653 /* This will reset the tigon3 PHY if there is no valid
1654  * link unless the FORCE argument is non-zero.
1655  */
1656 static int tg3_phy_reset(struct tg3 *tp)
1657 {
1658         u32 cpmuctrl;
1659         u32 phy_status;
1660         int err;
1661
1662         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1663                 u32 val;
1664
1665                 val = tr32(GRC_MISC_CFG);
1666                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1667                 udelay(40);
1668         }
1669         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1670         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1671         if (err != 0)
1672                 return -EBUSY;
1673
1674         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1675                 netif_carrier_off(tp->dev);
1676                 tg3_link_report(tp);
1677         }
1678
1679         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1680             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1681             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1682                 err = tg3_phy_reset_5703_4_5(tp);
1683                 if (err)
1684                         return err;
1685                 goto out;
1686         }
1687
1688         cpmuctrl = 0;
1689         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1690             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1691                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1692                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1693                         tw32(TG3_CPMU_CTRL,
1694                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1695         }
1696
1697         err = tg3_bmcr_reset(tp);
1698         if (err)
1699                 return err;
1700
1701         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1702                 u32 phy;
1703
1704                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1705                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1706
1707                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1708         }
1709
1710         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1711                 u32 val;
1712
1713                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1714                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1715                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1716                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1717                         udelay(40);
1718                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1719                 }
1720
1721                 /* Disable GPHY autopowerdown. */
1722                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1723                              MII_TG3_MISC_SHDW_WREN |
1724                              MII_TG3_MISC_SHDW_APD_SEL |
1725                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1726         }
1727
1728         tg3_phy_apply_otp(tp);
1729
1730 out:
1731         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1732                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1733                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1734                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1735                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1736                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1737                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1738         }
1739         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1740                 tg3_writephy(tp, 0x1c, 0x8d68);
1741                 tg3_writephy(tp, 0x1c, 0x8d68);
1742         }
1743         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1744                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1745                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1746                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1747                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1748                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1749                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1750                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1751                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1752         }
1753         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1754                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1755                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1756                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1757                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1758                         tg3_writephy(tp, MII_TG3_TEST1,
1759                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1760                 } else
1761                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1762                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1763         }
1764         /* Set Extended packet length bit (bit 14) on all chips that */
1765         /* support jumbo frames */
1766         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1767                 /* Cannot do read-modify-write on 5401 */
1768                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1769         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1770                 u32 phy_reg;
1771
1772                 /* Set bit 14 with read-modify-write to preserve other bits */
1773                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1774                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1775                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1776         }
1777
1778         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1779          * jumbo frames transmission.
1780          */
1781         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1782                 u32 phy_reg;
1783
1784                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1785                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1786                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1787         }
1788
1789         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1790                 /* adjust output voltage */
1791                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1792         }
1793
1794         tg3_phy_toggle_automdix(tp, 1);
1795         tg3_phy_set_wirespeed(tp);
1796         return 0;
1797 }
1798
1799 static void tg3_frob_aux_power(struct tg3 *tp)
1800 {
1801         struct tg3 *tp_peer = tp;
1802
1803         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1804                 return;
1805
1806         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1807             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1808                 struct net_device *dev_peer;
1809
1810                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1811                 /* remove_one() may have been run on the peer. */
1812                 if (!dev_peer)
1813                         tp_peer = tp;
1814                 else
1815                         tp_peer = netdev_priv(dev_peer);
1816         }
1817
1818         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1819             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1820             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1821             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1822                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1823                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1824                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1825                                     (GRC_LCLCTRL_GPIO_OE0 |
1826                                      GRC_LCLCTRL_GPIO_OE1 |
1827                                      GRC_LCLCTRL_GPIO_OE2 |
1828                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1829                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1830                                     100);
1831                 } else {
1832                         u32 no_gpio2;
1833                         u32 grc_local_ctrl = 0;
1834
1835                         if (tp_peer != tp &&
1836                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1837                                 return;
1838
1839                         /* Workaround to prevent overdrawing Amps. */
1840                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1841                             ASIC_REV_5714) {
1842                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1843                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1844                                             grc_local_ctrl, 100);
1845                         }
1846
1847                         /* On 5753 and variants, GPIO2 cannot be used. */
1848                         no_gpio2 = tp->nic_sram_data_cfg &
1849                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1850
1851                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1852                                          GRC_LCLCTRL_GPIO_OE1 |
1853                                          GRC_LCLCTRL_GPIO_OE2 |
1854                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1855                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1856                         if (no_gpio2) {
1857                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1858                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1859                         }
1860                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1861                                                     grc_local_ctrl, 100);
1862
1863                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1864
1865                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1866                                                     grc_local_ctrl, 100);
1867
1868                         if (!no_gpio2) {
1869                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1870                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1871                                             grc_local_ctrl, 100);
1872                         }
1873                 }
1874         } else {
1875                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1876                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1877                         if (tp_peer != tp &&
1878                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1879                                 return;
1880
1881                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1882                                     (GRC_LCLCTRL_GPIO_OE1 |
1883                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1884
1885                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1886                                     GRC_LCLCTRL_GPIO_OE1, 100);
1887
1888                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1889                                     (GRC_LCLCTRL_GPIO_OE1 |
1890                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1891                 }
1892         }
1893 }
1894
1895 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1896 {
1897         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1898                 return 1;
1899         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1900                 if (speed != SPEED_10)
1901                         return 1;
1902         } else if (speed == SPEED_10)
1903                 return 1;
1904
1905         return 0;
1906 }
1907
1908 static int tg3_setup_phy(struct tg3 *, int);
1909
1910 #define RESET_KIND_SHUTDOWN     0
1911 #define RESET_KIND_INIT         1
1912 #define RESET_KIND_SUSPEND      2
1913
1914 static void tg3_write_sig_post_reset(struct tg3 *, int);
1915 static int tg3_halt_cpu(struct tg3 *, u32);
1916 static int tg3_nvram_lock(struct tg3 *);
1917 static void tg3_nvram_unlock(struct tg3 *);
1918
1919 static void tg3_power_down_phy(struct tg3 *tp)
1920 {
1921         u32 val;
1922
1923         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1924                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1925                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1926                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1927
1928                         sg_dig_ctrl |=
1929                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1930                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1931                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1932                 }
1933                 return;
1934         }
1935
1936         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1937                 tg3_bmcr_reset(tp);
1938                 val = tr32(GRC_MISC_CFG);
1939                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1940                 udelay(40);
1941                 return;
1942         } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1943                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1944                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1945                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1946         }
1947
1948         /* The PHY should not be powered down on some chips because
1949          * of bugs.
1950          */
1951         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1952             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1953             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1954              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1955                 return;
1956
1957         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1958                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1959                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1960                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1961                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1962         }
1963
1964         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1965 }
1966
1967 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1968 {
1969         u32 misc_host_ctrl;
1970         u16 power_control, power_caps;
1971         int pm = tp->pm_cap;
1972
1973         /* Make sure register accesses (indirect or otherwise)
1974          * will function correctly.
1975          */
1976         pci_write_config_dword(tp->pdev,
1977                                TG3PCI_MISC_HOST_CTRL,
1978                                tp->misc_host_ctrl);
1979
1980         pci_read_config_word(tp->pdev,
1981                              pm + PCI_PM_CTRL,
1982                              &power_control);
1983         power_control |= PCI_PM_CTRL_PME_STATUS;
1984         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1985         switch (state) {
1986         case PCI_D0:
1987                 power_control |= 0;
1988                 pci_write_config_word(tp->pdev,
1989                                       pm + PCI_PM_CTRL,
1990                                       power_control);
1991                 udelay(100);    /* Delay after power state change */
1992
1993                 /* Switch out of Vaux if it is a NIC */
1994                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1995                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1996
1997                 return 0;
1998
1999         case PCI_D1:
2000                 power_control |= 1;
2001                 break;
2002
2003         case PCI_D2:
2004                 power_control |= 2;
2005                 break;
2006
2007         case PCI_D3hot:
2008                 power_control |= 3;
2009                 break;
2010
2011         default:
2012                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
2013                        "requested.\n",
2014                        tp->dev->name, state);
2015                 return -EINVAL;
2016         }
2017
2018         power_control |= PCI_PM_CTRL_PME_ENABLE;
2019
2020         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2021         tw32(TG3PCI_MISC_HOST_CTRL,
2022              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2023
2024         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2025                 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2026                     !tp->link_config.phy_is_low_power) {
2027                         struct phy_device *phydev;
2028                         u32 advertising;
2029
2030                         phydev = tp->mdio_bus.phy_map[PHY_ADDR];
2031
2032                         tp->link_config.phy_is_low_power = 1;
2033
2034                         tp->link_config.orig_speed = phydev->speed;
2035                         tp->link_config.orig_duplex = phydev->duplex;
2036                         tp->link_config.orig_autoneg = phydev->autoneg;
2037                         tp->link_config.orig_advertising = phydev->advertising;
2038
2039                         advertising = ADVERTISED_TP |
2040                                       ADVERTISED_Pause |
2041                                       ADVERTISED_Autoneg |
2042                                       ADVERTISED_10baseT_Half;
2043
2044                         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2045                             (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
2046                                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2047                                         advertising |=
2048                                                 ADVERTISED_100baseT_Half |
2049                                                 ADVERTISED_100baseT_Full |
2050                                                 ADVERTISED_10baseT_Full;
2051                                 else
2052                                         advertising |= ADVERTISED_10baseT_Full;
2053                         }
2054
2055                         phydev->advertising = advertising;
2056
2057                         phy_start_aneg(phydev);
2058                 }
2059         } else {
2060                 if (tp->link_config.phy_is_low_power == 0) {
2061                         tp->link_config.phy_is_low_power = 1;
2062                         tp->link_config.orig_speed = tp->link_config.speed;
2063                         tp->link_config.orig_duplex = tp->link_config.duplex;
2064                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2065                 }
2066
2067                 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2068                         tp->link_config.speed = SPEED_10;
2069                         tp->link_config.duplex = DUPLEX_HALF;
2070                         tp->link_config.autoneg = AUTONEG_ENABLE;
2071                         tg3_setup_phy(tp, 0);
2072                 }
2073         }
2074
2075         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2076                 u32 val;
2077
2078                 val = tr32(GRC_VCPU_EXT_CTRL);
2079                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2080         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2081                 int i;
2082                 u32 val;
2083
2084                 for (i = 0; i < 200; i++) {
2085                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2086                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2087                                 break;
2088                         msleep(1);
2089                 }
2090         }
2091         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2092                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2093                                                      WOL_DRV_STATE_SHUTDOWN |
2094                                                      WOL_DRV_WOL |
2095                                                      WOL_SET_MAGIC_PKT);
2096
2097         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
2098
2099         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
2100                 u32 mac_mode;
2101
2102                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2103                         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
2104                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2105                                 udelay(40);
2106                         }
2107
2108                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2109                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2110                         else
2111                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2112
2113                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2114                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2115                             ASIC_REV_5700) {
2116                                 u32 speed = (tp->tg3_flags &
2117                                              TG3_FLAG_WOL_SPEED_100MB) ?
2118                                              SPEED_100 : SPEED_10;
2119                                 if (tg3_5700_link_polarity(tp, speed))
2120                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2121                                 else
2122                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2123                         }
2124                 } else {
2125                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2126                 }
2127
2128                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2129                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2130
2131                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
2132                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
2133                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2134
2135                 tw32_f(MAC_MODE, mac_mode);
2136                 udelay(100);
2137
2138                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2139                 udelay(10);
2140         }
2141
2142         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2143             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2144              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2145                 u32 base_val;
2146
2147                 base_val = tp->pci_clock_ctrl;
2148                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2149                              CLOCK_CTRL_TXCLK_DISABLE);
2150
2151                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2152                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2153         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2154                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2155                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2156                 /* do nothing */
2157         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2158                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2159                 u32 newbits1, newbits2;
2160
2161                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2162                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2163                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2164                                     CLOCK_CTRL_TXCLK_DISABLE |
2165                                     CLOCK_CTRL_ALTCLK);
2166                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2167                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2168                         newbits1 = CLOCK_CTRL_625_CORE;
2169                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2170                 } else {
2171                         newbits1 = CLOCK_CTRL_ALTCLK;
2172                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2173                 }
2174
2175                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2176                             40);
2177
2178                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2179                             40);
2180
2181                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2182                         u32 newbits3;
2183
2184                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2185                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2186                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2187                                             CLOCK_CTRL_TXCLK_DISABLE |
2188                                             CLOCK_CTRL_44MHZ_CORE);
2189                         } else {
2190                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2191                         }
2192
2193                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2194                                     tp->pci_clock_ctrl | newbits3, 40);
2195                 }
2196         }
2197
2198         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
2199             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2200             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
2201                 tg3_power_down_phy(tp);
2202
2203         tg3_frob_aux_power(tp);
2204
2205         /* Workaround for unstable PLL clock */
2206         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2207             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2208                 u32 val = tr32(0x7d00);
2209
2210                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2211                 tw32(0x7d00, val);
2212                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2213                         int err;
2214
2215                         err = tg3_nvram_lock(tp);
2216                         tg3_halt_cpu(tp, RX_CPU_BASE);
2217                         if (!err)
2218                                 tg3_nvram_unlock(tp);
2219                 }
2220         }
2221
2222         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2223
2224         /* Finally, set the new power state. */
2225         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
2226         udelay(100);    /* Delay after power state change */
2227
2228         return 0;
2229 }
2230
2231 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2232 {
2233         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2234         case MII_TG3_AUX_STAT_10HALF:
2235                 *speed = SPEED_10;
2236                 *duplex = DUPLEX_HALF;
2237                 break;
2238
2239         case MII_TG3_AUX_STAT_10FULL:
2240                 *speed = SPEED_10;
2241                 *duplex = DUPLEX_FULL;
2242                 break;
2243
2244         case MII_TG3_AUX_STAT_100HALF:
2245                 *speed = SPEED_100;
2246                 *duplex = DUPLEX_HALF;
2247                 break;
2248
2249         case MII_TG3_AUX_STAT_100FULL:
2250                 *speed = SPEED_100;
2251                 *duplex = DUPLEX_FULL;
2252                 break;
2253
2254         case MII_TG3_AUX_STAT_1000HALF:
2255                 *speed = SPEED_1000;
2256                 *duplex = DUPLEX_HALF;
2257                 break;
2258
2259         case MII_TG3_AUX_STAT_1000FULL:
2260                 *speed = SPEED_1000;
2261                 *duplex = DUPLEX_FULL;
2262                 break;
2263
2264         default:
2265                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2266                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2267                                  SPEED_10;
2268                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2269                                   DUPLEX_HALF;
2270                         break;
2271                 }
2272                 *speed = SPEED_INVALID;
2273                 *duplex = DUPLEX_INVALID;
2274                 break;
2275         }
2276 }
2277
2278 static void tg3_phy_copper_begin(struct tg3 *tp)
2279 {
2280         u32 new_adv;
2281         int i;
2282
2283         if (tp->link_config.phy_is_low_power) {
2284                 /* Entering low power mode.  Disable gigabit and
2285                  * 100baseT advertisements.
2286                  */
2287                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2288
2289                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2290                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2291                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2292                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2293
2294                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2295         } else if (tp->link_config.speed == SPEED_INVALID) {
2296                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2297                         tp->link_config.advertising &=
2298                                 ~(ADVERTISED_1000baseT_Half |
2299                                   ADVERTISED_1000baseT_Full);
2300
2301                 new_adv = ADVERTISE_CSMA;
2302                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2303                         new_adv |= ADVERTISE_10HALF;
2304                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2305                         new_adv |= ADVERTISE_10FULL;
2306                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2307                         new_adv |= ADVERTISE_100HALF;
2308                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2309                         new_adv |= ADVERTISE_100FULL;
2310
2311                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2312
2313                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2314
2315                 if (tp->link_config.advertising &
2316                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2317                         new_adv = 0;
2318                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2319                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2320                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2321                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2322                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2323                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2324                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2325                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2326                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2327                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2328                 } else {
2329                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2330                 }
2331         } else {
2332                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2333                 new_adv |= ADVERTISE_CSMA;
2334
2335                 /* Asking for a specific link mode. */
2336                 if (tp->link_config.speed == SPEED_1000) {
2337                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2338
2339                         if (tp->link_config.duplex == DUPLEX_FULL)
2340                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2341                         else
2342                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2343                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2344                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2345                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2346                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2347                 } else {
2348                         if (tp->link_config.speed == SPEED_100) {
2349                                 if (tp->link_config.duplex == DUPLEX_FULL)
2350                                         new_adv |= ADVERTISE_100FULL;
2351                                 else
2352                                         new_adv |= ADVERTISE_100HALF;
2353                         } else {
2354                                 if (tp->link_config.duplex == DUPLEX_FULL)
2355                                         new_adv |= ADVERTISE_10FULL;
2356                                 else
2357                                         new_adv |= ADVERTISE_10HALF;
2358                         }
2359                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2360
2361                         new_adv = 0;
2362                 }
2363
2364                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2365         }
2366
2367         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2368             tp->link_config.speed != SPEED_INVALID) {
2369                 u32 bmcr, orig_bmcr;
2370
2371                 tp->link_config.active_speed = tp->link_config.speed;
2372                 tp->link_config.active_duplex = tp->link_config.duplex;
2373
2374                 bmcr = 0;
2375                 switch (tp->link_config.speed) {
2376                 default:
2377                 case SPEED_10:
2378                         break;
2379
2380                 case SPEED_100:
2381                         bmcr |= BMCR_SPEED100;
2382                         break;
2383
2384                 case SPEED_1000:
2385                         bmcr |= TG3_BMCR_SPEED1000;
2386                         break;
2387                 }
2388
2389                 if (tp->link_config.duplex == DUPLEX_FULL)
2390                         bmcr |= BMCR_FULLDPLX;
2391
2392                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2393                     (bmcr != orig_bmcr)) {
2394                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2395                         for (i = 0; i < 1500; i++) {
2396                                 u32 tmp;
2397
2398                                 udelay(10);
2399                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2400                                     tg3_readphy(tp, MII_BMSR, &tmp))
2401                                         continue;
2402                                 if (!(tmp & BMSR_LSTATUS)) {
2403                                         udelay(40);
2404                                         break;
2405                                 }
2406                         }
2407                         tg3_writephy(tp, MII_BMCR, bmcr);
2408                         udelay(40);
2409                 }
2410         } else {
2411                 tg3_writephy(tp, MII_BMCR,
2412                              BMCR_ANENABLE | BMCR_ANRESTART);
2413         }
2414 }
2415
2416 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2417 {
2418         int err;
2419
2420         /* Turn off tap power management. */
2421         /* Set Extended packet length bit */
2422         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2423
2424         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2425         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2426
2427         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2428         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2429
2430         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2431         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2432
2433         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2434         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2435
2436         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2437         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2438
2439         udelay(40);
2440
2441         return err;
2442 }
2443
2444 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2445 {
2446         u32 adv_reg, all_mask = 0;
2447
2448         if (mask & ADVERTISED_10baseT_Half)
2449                 all_mask |= ADVERTISE_10HALF;
2450         if (mask & ADVERTISED_10baseT_Full)
2451                 all_mask |= ADVERTISE_10FULL;
2452         if (mask & ADVERTISED_100baseT_Half)
2453                 all_mask |= ADVERTISE_100HALF;
2454         if (mask & ADVERTISED_100baseT_Full)
2455                 all_mask |= ADVERTISE_100FULL;
2456
2457         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2458                 return 0;
2459
2460         if ((adv_reg & all_mask) != all_mask)
2461                 return 0;
2462         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2463                 u32 tg3_ctrl;
2464
2465                 all_mask = 0;
2466                 if (mask & ADVERTISED_1000baseT_Half)
2467                         all_mask |= ADVERTISE_1000HALF;
2468                 if (mask & ADVERTISED_1000baseT_Full)
2469                         all_mask |= ADVERTISE_1000FULL;
2470
2471                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2472                         return 0;
2473
2474                 if ((tg3_ctrl & all_mask) != all_mask)
2475                         return 0;
2476         }
2477         return 1;
2478 }
2479
2480 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2481 {
2482         u32 curadv, reqadv;
2483
2484         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2485                 return 1;
2486
2487         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2488         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2489
2490         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2491                 if (curadv != reqadv)
2492                         return 0;
2493
2494                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2495                         tg3_readphy(tp, MII_LPA, rmtadv);
2496         } else {
2497                 /* Reprogram the advertisement register, even if it
2498                  * does not affect the current link.  If the link
2499                  * gets renegotiated in the future, we can save an
2500                  * additional renegotiation cycle by advertising
2501                  * it correctly in the first place.
2502                  */
2503                 if (curadv != reqadv) {
2504                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2505                                      ADVERTISE_PAUSE_ASYM);
2506                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2507                 }
2508         }
2509
2510         return 1;
2511 }
2512
2513 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2514 {
2515         int current_link_up;
2516         u32 bmsr, dummy;
2517         u32 lcl_adv, rmt_adv;
2518         u16 current_speed;
2519         u8 current_duplex;
2520         int i, err;
2521
2522         tw32(MAC_EVENT, 0);
2523
2524         tw32_f(MAC_STATUS,
2525              (MAC_STATUS_SYNC_CHANGED |
2526               MAC_STATUS_CFG_CHANGED |
2527               MAC_STATUS_MI_COMPLETION |
2528               MAC_STATUS_LNKSTATE_CHANGED));
2529         udelay(40);
2530
2531         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2532                 tw32_f(MAC_MI_MODE,
2533                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2534                 udelay(80);
2535         }
2536
2537         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2538
2539         /* Some third-party PHYs need to be reset on link going
2540          * down.
2541          */
2542         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2543              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2544              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2545             netif_carrier_ok(tp->dev)) {
2546                 tg3_readphy(tp, MII_BMSR, &bmsr);
2547                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2548                     !(bmsr & BMSR_LSTATUS))
2549                         force_reset = 1;
2550         }
2551         if (force_reset)
2552                 tg3_phy_reset(tp);
2553
2554         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2555                 tg3_readphy(tp, MII_BMSR, &bmsr);
2556                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2557                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2558                         bmsr = 0;
2559
2560                 if (!(bmsr & BMSR_LSTATUS)) {
2561                         err = tg3_init_5401phy_dsp(tp);
2562                         if (err)
2563                                 return err;
2564
2565                         tg3_readphy(tp, MII_BMSR, &bmsr);
2566                         for (i = 0; i < 1000; i++) {
2567                                 udelay(10);
2568                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2569                                     (bmsr & BMSR_LSTATUS)) {
2570                                         udelay(40);
2571                                         break;
2572                                 }
2573                         }
2574
2575                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2576                             !(bmsr & BMSR_LSTATUS) &&
2577                             tp->link_config.active_speed == SPEED_1000) {
2578                                 err = tg3_phy_reset(tp);
2579                                 if (!err)
2580                                         err = tg3_init_5401phy_dsp(tp);
2581                                 if (err)
2582                                         return err;
2583                         }
2584                 }
2585         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2586                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2587                 /* 5701 {A0,B0} CRC bug workaround */
2588                 tg3_writephy(tp, 0x15, 0x0a75);
2589                 tg3_writephy(tp, 0x1c, 0x8c68);
2590                 tg3_writephy(tp, 0x1c, 0x8d68);
2591                 tg3_writephy(tp, 0x1c, 0x8c68);
2592         }
2593
2594         /* Clear pending interrupts... */
2595         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2596         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2597
2598         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2599                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2600         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2601                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2602
2603         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2604             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2605                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2606                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2607                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2608                 else
2609                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2610         }
2611
2612         current_link_up = 0;
2613         current_speed = SPEED_INVALID;
2614         current_duplex = DUPLEX_INVALID;
2615
2616         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2617                 u32 val;
2618
2619                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2620                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2621                 if (!(val & (1 << 10))) {
2622                         val |= (1 << 10);
2623                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2624                         goto relink;
2625                 }
2626         }
2627
2628         bmsr = 0;
2629         for (i = 0; i < 100; i++) {
2630                 tg3_readphy(tp, MII_BMSR, &bmsr);
2631                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2632                     (bmsr & BMSR_LSTATUS))
2633                         break;
2634                 udelay(40);
2635         }
2636
2637         if (bmsr & BMSR_LSTATUS) {
2638                 u32 aux_stat, bmcr;
2639
2640                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2641                 for (i = 0; i < 2000; i++) {
2642                         udelay(10);
2643                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2644                             aux_stat)
2645                                 break;
2646                 }
2647
2648                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2649                                              &current_speed,
2650                                              &current_duplex);
2651
2652                 bmcr = 0;
2653                 for (i = 0; i < 200; i++) {
2654                         tg3_readphy(tp, MII_BMCR, &bmcr);
2655                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2656                                 continue;
2657                         if (bmcr && bmcr != 0x7fff)
2658                                 break;
2659                         udelay(10);
2660                 }
2661
2662                 lcl_adv = 0;
2663                 rmt_adv = 0;
2664
2665                 tp->link_config.active_speed = current_speed;
2666                 tp->link_config.active_duplex = current_duplex;
2667
2668                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2669                         if ((bmcr & BMCR_ANENABLE) &&
2670                             tg3_copper_is_advertising_all(tp,
2671                                                 tp->link_config.advertising)) {
2672                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2673                                                                   &rmt_adv))
2674                                         current_link_up = 1;
2675                         }
2676                 } else {
2677                         if (!(bmcr & BMCR_ANENABLE) &&
2678                             tp->link_config.speed == current_speed &&
2679                             tp->link_config.duplex == current_duplex &&
2680                             tp->link_config.flowctrl ==
2681                             tp->link_config.active_flowctrl) {
2682                                 current_link_up = 1;
2683                         }
2684                 }
2685
2686                 if (current_link_up == 1 &&
2687                     tp->link_config.active_duplex == DUPLEX_FULL)
2688                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2689         }
2690
2691 relink:
2692         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2693                 u32 tmp;
2694
2695                 tg3_phy_copper_begin(tp);
2696
2697                 tg3_readphy(tp, MII_BMSR, &tmp);
2698                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2699                     (tmp & BMSR_LSTATUS))
2700                         current_link_up = 1;
2701         }
2702
2703         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2704         if (current_link_up == 1) {
2705                 if (tp->link_config.active_speed == SPEED_100 ||
2706                     tp->link_config.active_speed == SPEED_10)
2707                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2708                 else
2709                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2710         } else
2711                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2712
2713         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2714         if (tp->link_config.active_duplex == DUPLEX_HALF)
2715                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2716
2717         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2718                 if (current_link_up == 1 &&
2719                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2720                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2721                 else
2722                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2723         }
2724
2725         /* ??? Without this setting Netgear GA302T PHY does not
2726          * ??? send/receive packets...
2727          */
2728         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2729             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2730                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2731                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2732                 udelay(80);
2733         }
2734
2735         tw32_f(MAC_MODE, tp->mac_mode);
2736         udelay(40);
2737
2738         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2739                 /* Polled via timer. */
2740                 tw32_f(MAC_EVENT, 0);
2741         } else {
2742                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2743         }
2744         udelay(40);
2745
2746         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2747             current_link_up == 1 &&
2748             tp->link_config.active_speed == SPEED_1000 &&
2749             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2750              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2751                 udelay(120);
2752                 tw32_f(MAC_STATUS,
2753                      (MAC_STATUS_SYNC_CHANGED |
2754                       MAC_STATUS_CFG_CHANGED));
2755                 udelay(40);
2756                 tg3_write_mem(tp,
2757                               NIC_SRAM_FIRMWARE_MBOX,
2758                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2759         }
2760
2761         if (current_link_up != netif_carrier_ok(tp->dev)) {
2762                 if (current_link_up)
2763                         netif_carrier_on(tp->dev);
2764                 else
2765                         netif_carrier_off(tp->dev);
2766                 tg3_link_report(tp);
2767         }
2768
2769         return 0;
2770 }
2771
2772 struct tg3_fiber_aneginfo {
2773         int state;
2774 #define ANEG_STATE_UNKNOWN              0
2775 #define ANEG_STATE_AN_ENABLE            1
2776 #define ANEG_STATE_RESTART_INIT         2
2777 #define ANEG_STATE_RESTART              3
2778 #define ANEG_STATE_DISABLE_LINK_OK      4
2779 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2780 #define ANEG_STATE_ABILITY_DETECT       6
2781 #define ANEG_STATE_ACK_DETECT_INIT      7
2782 #define ANEG_STATE_ACK_DETECT           8
2783 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2784 #define ANEG_STATE_COMPLETE_ACK         10
2785 #define ANEG_STATE_IDLE_DETECT_INIT     11
2786 #define ANEG_STATE_IDLE_DETECT          12
2787 #define ANEG_STATE_LINK_OK              13
2788 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2789 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2790
2791         u32 flags;
2792 #define MR_AN_ENABLE            0x00000001
2793 #define MR_RESTART_AN           0x00000002
2794 #define MR_AN_COMPLETE          0x00000004
2795 #define MR_PAGE_RX              0x00000008
2796 #define MR_NP_LOADED            0x00000010
2797 #define MR_TOGGLE_TX            0x00000020
2798 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2799 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2800 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2801 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2802 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2803 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2804 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2805 #define MR_TOGGLE_RX            0x00002000
2806 #define MR_NP_RX                0x00004000
2807
2808 #define MR_LINK_OK              0x80000000
2809
2810         unsigned long link_time, cur_time;
2811
2812         u32 ability_match_cfg;
2813         int ability_match_count;
2814
2815         char ability_match, idle_match, ack_match;
2816
2817         u32 txconfig, rxconfig;
2818 #define ANEG_CFG_NP             0x00000080
2819 #define ANEG_CFG_ACK            0x00000040
2820 #define ANEG_CFG_RF2            0x00000020
2821 #define ANEG_CFG_RF1            0x00000010
2822 #define ANEG_CFG_PS2            0x00000001
2823 #define ANEG_CFG_PS1            0x00008000
2824 #define ANEG_CFG_HD             0x00004000
2825 #define ANEG_CFG_FD             0x00002000
2826 #define ANEG_CFG_INVAL          0x00001f06
2827
2828 };
2829 #define ANEG_OK         0
2830 #define ANEG_DONE       1
2831 #define ANEG_TIMER_ENAB 2
2832 #define ANEG_FAILED     -1
2833
2834 #define ANEG_STATE_SETTLE_TIME  10000
2835
2836 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2837                                    struct tg3_fiber_aneginfo *ap)
2838 {
2839         u16 flowctrl;
2840         unsigned long delta;
2841         u32 rx_cfg_reg;
2842         int ret;
2843
2844         if (ap->state == ANEG_STATE_UNKNOWN) {
2845                 ap->rxconfig = 0;
2846                 ap->link_time = 0;
2847                 ap->cur_time = 0;
2848                 ap->ability_match_cfg = 0;
2849                 ap->ability_match_count = 0;
2850                 ap->ability_match = 0;
2851                 ap->idle_match = 0;
2852                 ap->ack_match = 0;
2853         }
2854         ap->cur_time++;
2855
2856         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2857                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2858
2859                 if (rx_cfg_reg != ap->ability_match_cfg) {
2860                         ap->ability_match_cfg = rx_cfg_reg;
2861                         ap->ability_match = 0;
2862                         ap->ability_match_count = 0;
2863                 } else {
2864                         if (++ap->ability_match_count > 1) {
2865                                 ap->ability_match = 1;
2866                                 ap->ability_match_cfg = rx_cfg_reg;
2867                         }
2868                 }
2869                 if (rx_cfg_reg & ANEG_CFG_ACK)
2870                         ap->ack_match = 1;
2871                 else
2872                         ap->ack_match = 0;
2873
2874                 ap->idle_match = 0;
2875         } else {
2876                 ap->idle_match = 1;
2877                 ap->ability_match_cfg = 0;
2878                 ap->ability_match_count = 0;
2879                 ap->ability_match = 0;
2880                 ap->ack_match = 0;
2881
2882                 rx_cfg_reg = 0;
2883         }
2884
2885         ap->rxconfig = rx_cfg_reg;
2886         ret = ANEG_OK;
2887
2888         switch(ap->state) {
2889         case ANEG_STATE_UNKNOWN:
2890                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2891                         ap->state = ANEG_STATE_AN_ENABLE;
2892
2893                 /* fallthru */
2894         case ANEG_STATE_AN_ENABLE:
2895                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2896                 if (ap->flags & MR_AN_ENABLE) {
2897                         ap->link_time = 0;
2898                         ap->cur_time = 0;
2899                         ap->ability_match_cfg = 0;
2900                         ap->ability_match_count = 0;
2901                         ap->ability_match = 0;
2902                         ap->idle_match = 0;
2903                         ap->ack_match = 0;
2904
2905                         ap->state = ANEG_STATE_RESTART_INIT;
2906                 } else {
2907                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2908                 }
2909                 break;
2910
2911         case ANEG_STATE_RESTART_INIT:
2912                 ap->link_time = ap->cur_time;
2913                 ap->flags &= ~(MR_NP_LOADED);
2914                 ap->txconfig = 0;
2915                 tw32(MAC_TX_AUTO_NEG, 0);
2916                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2917                 tw32_f(MAC_MODE, tp->mac_mode);
2918                 udelay(40);
2919
2920                 ret = ANEG_TIMER_ENAB;
2921                 ap->state = ANEG_STATE_RESTART;
2922
2923                 /* fallthru */
2924         case ANEG_STATE_RESTART:
2925                 delta = ap->cur_time - ap->link_time;
2926                 if (delta > ANEG_STATE_SETTLE_TIME) {
2927                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2928                 } else {
2929                         ret = ANEG_TIMER_ENAB;
2930                 }
2931                 break;
2932
2933         case ANEG_STATE_DISABLE_LINK_OK:
2934                 ret = ANEG_DONE;
2935                 break;
2936
2937         case ANEG_STATE_ABILITY_DETECT_INIT:
2938                 ap->flags &= ~(MR_TOGGLE_TX);
2939                 ap->txconfig = ANEG_CFG_FD;
2940                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2941                 if (flowctrl & ADVERTISE_1000XPAUSE)
2942                         ap->txconfig |= ANEG_CFG_PS1;
2943                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2944                         ap->txconfig |= ANEG_CFG_PS2;
2945                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2946                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2947                 tw32_f(MAC_MODE, tp->mac_mode);
2948                 udelay(40);
2949
2950                 ap->state = ANEG_STATE_ABILITY_DETECT;
2951                 break;
2952
2953         case ANEG_STATE_ABILITY_DETECT:
2954                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2955                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2956                 }
2957                 break;
2958
2959         case ANEG_STATE_ACK_DETECT_INIT:
2960                 ap->txconfig |= ANEG_CFG_ACK;
2961                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2962                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2963                 tw32_f(MAC_MODE, tp->mac_mode);
2964                 udelay(40);
2965
2966                 ap->state = ANEG_STATE_ACK_DETECT;
2967
2968                 /* fallthru */
2969         case ANEG_STATE_ACK_DETECT:
2970                 if (ap->ack_match != 0) {
2971                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2972                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2973                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2974                         } else {
2975                                 ap->state = ANEG_STATE_AN_ENABLE;
2976                         }
2977                 } else if (ap->ability_match != 0 &&
2978                            ap->rxconfig == 0) {
2979                         ap->state = ANEG_STATE_AN_ENABLE;
2980                 }
2981                 break;
2982
2983         case ANEG_STATE_COMPLETE_ACK_INIT:
2984                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2985                         ret = ANEG_FAILED;
2986                         break;
2987                 }
2988                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2989                                MR_LP_ADV_HALF_DUPLEX |
2990                                MR_LP_ADV_SYM_PAUSE |
2991                                MR_LP_ADV_ASYM_PAUSE |
2992                                MR_LP_ADV_REMOTE_FAULT1 |
2993                                MR_LP_ADV_REMOTE_FAULT2 |
2994                                MR_LP_ADV_NEXT_PAGE |
2995                                MR_TOGGLE_RX |
2996                                MR_NP_RX);
2997                 if (ap->rxconfig & ANEG_CFG_FD)
2998                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2999                 if (ap->rxconfig & ANEG_CFG_HD)
3000                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3001                 if (ap->rxconfig & ANEG_CFG_PS1)
3002                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3003                 if (ap->rxconfig & ANEG_CFG_PS2)
3004                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3005                 if (ap->rxconfig & ANEG_CFG_RF1)
3006                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3007                 if (ap->rxconfig & ANEG_CFG_RF2)
3008                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3009                 if (ap->rxconfig & ANEG_CFG_NP)
3010                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3011
3012                 ap->link_time = ap->cur_time;
3013
3014                 ap->flags ^= (MR_TOGGLE_TX);
3015                 if (ap->rxconfig & 0x0008)
3016                         ap->flags |= MR_TOGGLE_RX;
3017                 if (ap->rxconfig & ANEG_CFG_NP)
3018                         ap->flags |= MR_NP_RX;
3019                 ap->flags |= MR_PAGE_RX;
3020
3021                 ap->state = ANEG_STATE_COMPLETE_ACK;
3022                 ret = ANEG_TIMER_ENAB;
3023                 break;
3024
3025         case ANEG_STATE_COMPLETE_ACK:
3026                 if (ap->ability_match != 0 &&
3027                     ap->rxconfig == 0) {
3028                         ap->state = ANEG_STATE_AN_ENABLE;
3029                         break;
3030                 }
3031                 delta = ap->cur_time - ap->link_time;
3032                 if (delta > ANEG_STATE_SETTLE_TIME) {
3033                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3034                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3035                         } else {
3036                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3037                                     !(ap->flags & MR_NP_RX)) {
3038                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3039                                 } else {
3040                                         ret = ANEG_FAILED;
3041                                 }
3042                         }
3043                 }
3044                 break;
3045
3046         case ANEG_STATE_IDLE_DETECT_INIT:
3047                 ap->link_time = ap->cur_time;
3048                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3049                 tw32_f(MAC_MODE, tp->mac_mode);
3050                 udelay(40);
3051
3052                 ap->state = ANEG_STATE_IDLE_DETECT;
3053                 ret = ANEG_TIMER_ENAB;
3054                 break;
3055
3056         case ANEG_STATE_IDLE_DETECT:
3057                 if (ap->ability_match != 0 &&
3058                     ap->rxconfig == 0) {
3059                         ap->state = ANEG_STATE_AN_ENABLE;
3060                         break;
3061                 }
3062                 delta = ap->cur_time - ap->link_time;
3063                 if (delta > ANEG_STATE_SETTLE_TIME) {
3064                         /* XXX another gem from the Broadcom driver :( */
3065                         ap->state = ANEG_STATE_LINK_OK;
3066                 }
3067                 break;
3068
3069         case ANEG_STATE_LINK_OK:
3070                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3071                 ret = ANEG_DONE;
3072                 break;
3073
3074         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3075                 /* ??? unimplemented */
3076                 break;
3077
3078         case ANEG_STATE_NEXT_PAGE_WAIT:
3079                 /* ??? unimplemented */
3080                 break;
3081
3082         default:
3083                 ret = ANEG_FAILED;
3084                 break;
3085         }
3086
3087         return ret;
3088 }
3089
3090 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3091 {
3092         int res = 0;
3093         struct tg3_fiber_aneginfo aninfo;
3094         int status = ANEG_FAILED;
3095         unsigned int tick;
3096         u32 tmp;
3097
3098         tw32_f(MAC_TX_AUTO_NEG, 0);
3099
3100         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3101         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3102         udelay(40);
3103
3104         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3105         udelay(40);
3106
3107         memset(&aninfo, 0, sizeof(aninfo));
3108         aninfo.flags |= MR_AN_ENABLE;
3109         aninfo.state = ANEG_STATE_UNKNOWN;
3110         aninfo.cur_time = 0;
3111         tick = 0;
3112         while (++tick < 195000) {
3113                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3114                 if (status == ANEG_DONE || status == ANEG_FAILED)
3115                         break;
3116
3117                 udelay(1);
3118         }
3119
3120         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3121         tw32_f(MAC_MODE, tp->mac_mode);
3122         udelay(40);
3123
3124         *txflags = aninfo.txconfig;
3125         *rxflags = aninfo.flags;
3126
3127         if (status == ANEG_DONE &&
3128             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3129                              MR_LP_ADV_FULL_DUPLEX)))
3130                 res = 1;
3131
3132         return res;
3133 }
3134
3135 static void tg3_init_bcm8002(struct tg3 *tp)
3136 {
3137         u32 mac_status = tr32(MAC_STATUS);
3138         int i;
3139
3140         /* Reset when initting first time or we have a link. */
3141         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3142             !(mac_status & MAC_STATUS_PCS_SYNCED))
3143                 return;
3144
3145         /* Set PLL lock range. */
3146         tg3_writephy(tp, 0x16, 0x8007);
3147
3148         /* SW reset */
3149         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3150
3151         /* Wait for reset to complete. */
3152         /* XXX schedule_timeout() ... */
3153         for (i = 0; i < 500; i++)
3154                 udelay(10);
3155
3156         /* Config mode; select PMA/Ch 1 regs. */
3157         tg3_writephy(tp, 0x10, 0x8411);
3158
3159         /* Enable auto-lock and comdet, select txclk for tx. */
3160         tg3_writephy(tp, 0x11, 0x0a10);
3161
3162         tg3_writephy(tp, 0x18, 0x00a0);
3163         tg3_writephy(tp, 0x16, 0x41ff);
3164
3165         /* Assert and deassert POR. */
3166         tg3_writephy(tp, 0x13, 0x0400);
3167         udelay(40);
3168         tg3_writephy(tp, 0x13, 0x0000);
3169
3170         tg3_writephy(tp, 0x11, 0x0a50);
3171         udelay(40);
3172         tg3_writephy(tp, 0x11, 0x0a10);
3173
3174         /* Wait for signal to stabilize */
3175         /* XXX schedule_timeout() ... */
3176         for (i = 0; i < 15000; i++)
3177                 udelay(10);
3178
3179         /* Deselect the channel register so we can read the PHYID
3180          * later.
3181          */
3182         tg3_writephy(tp, 0x10, 0x8011);
3183 }
3184
3185 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3186 {
3187         u16 flowctrl;
3188         u32 sg_dig_ctrl, sg_dig_status;
3189         u32 serdes_cfg, expected_sg_dig_ctrl;
3190         int workaround, port_a;
3191         int current_link_up;
3192
3193         serdes_cfg = 0;
3194         expected_sg_dig_ctrl = 0;
3195         workaround = 0;
3196         port_a = 1;
3197         current_link_up = 0;
3198
3199         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3200             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3201                 workaround = 1;
3202                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3203                         port_a = 0;
3204
3205                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3206                 /* preserve bits 20-23 for voltage regulator */
3207                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3208         }
3209
3210         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3211
3212         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3213                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3214                         if (workaround) {
3215                                 u32 val = serdes_cfg;
3216
3217                                 if (port_a)
3218                                         val |= 0xc010000;
3219                                 else
3220                                         val |= 0x4010000;
3221                                 tw32_f(MAC_SERDES_CFG, val);
3222                         }
3223
3224                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3225                 }
3226                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3227                         tg3_setup_flow_control(tp, 0, 0);
3228                         current_link_up = 1;
3229                 }
3230                 goto out;
3231         }
3232
3233         /* Want auto-negotiation.  */
3234         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3235
3236         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3237         if (flowctrl & ADVERTISE_1000XPAUSE)
3238                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3239         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3240                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3241
3242         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3243                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3244                     tp->serdes_counter &&
3245                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3246                                     MAC_STATUS_RCVD_CFG)) ==
3247                      MAC_STATUS_PCS_SYNCED)) {
3248                         tp->serdes_counter--;
3249                         current_link_up = 1;
3250                         goto out;
3251                 }
3252 restart_autoneg:
3253                 if (workaround)
3254                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3255                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3256                 udelay(5);
3257                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3258
3259                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3260                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3261         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3262                                  MAC_STATUS_SIGNAL_DET)) {
3263                 sg_dig_status = tr32(SG_DIG_STATUS);
3264                 mac_status = tr32(MAC_STATUS);
3265
3266                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3267                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3268                         u32 local_adv = 0, remote_adv = 0;
3269
3270                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3271                                 local_adv |= ADVERTISE_1000XPAUSE;
3272                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3273                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3274
3275                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3276                                 remote_adv |= LPA_1000XPAUSE;
3277                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3278                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3279
3280                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3281                         current_link_up = 1;
3282                         tp->serdes_counter = 0;
3283                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3284                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3285                         if (tp->serdes_counter)
3286                                 tp->serdes_counter--;
3287                         else {
3288                                 if (workaround) {
3289                                         u32 val = serdes_cfg;
3290
3291                                         if (port_a)
3292                                                 val |= 0xc010000;
3293                                         else
3294                                                 val |= 0x4010000;
3295
3296                                         tw32_f(MAC_SERDES_CFG, val);
3297                                 }
3298
3299                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3300                                 udelay(40);
3301
3302                                 /* Link parallel detection - link is up */
3303                                 /* only if we have PCS_SYNC and not */
3304                                 /* receiving config code words */
3305                                 mac_status = tr32(MAC_STATUS);
3306                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3307                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
3308                                         tg3_setup_flow_control(tp, 0, 0);
3309                                         current_link_up = 1;
3310                                         tp->tg3_flags2 |=
3311                                                 TG3_FLG2_PARALLEL_DETECT;
3312                                         tp->serdes_counter =
3313                                                 SERDES_PARALLEL_DET_TIMEOUT;
3314                                 } else
3315                                         goto restart_autoneg;
3316                         }
3317                 }
3318         } else {
3319                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3320                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3321         }
3322
3323 out:
3324         return current_link_up;
3325 }
3326
3327 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3328 {
3329         int current_link_up = 0;
3330
3331         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3332                 goto out;
3333
3334         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3335                 u32 txflags, rxflags;
3336                 int i;
3337
3338                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3339                         u32 local_adv = 0, remote_adv = 0;
3340
3341                         if (txflags & ANEG_CFG_PS1)
3342                                 local_adv |= ADVERTISE_1000XPAUSE;
3343                         if (txflags & ANEG_CFG_PS2)
3344                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3345
3346                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
3347                                 remote_adv |= LPA_1000XPAUSE;
3348                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3349                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3350
3351                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3352
3353                         current_link_up = 1;
3354                 }
3355                 for (i = 0; i < 30; i++) {
3356                         udelay(20);
3357                         tw32_f(MAC_STATUS,
3358                                (MAC_STATUS_SYNC_CHANGED |
3359                                 MAC_STATUS_CFG_CHANGED));
3360                         udelay(40);
3361                         if ((tr32(MAC_STATUS) &
3362                              (MAC_STATUS_SYNC_CHANGED |
3363                               MAC_STATUS_CFG_CHANGED)) == 0)
3364                                 break;
3365                 }
3366
3367                 mac_status = tr32(MAC_STATUS);
3368                 if (current_link_up == 0 &&
3369                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3370                     !(mac_status & MAC_STATUS_RCVD_CFG))
3371                         current_link_up = 1;
3372         } else {
3373                 tg3_setup_flow_control(tp, 0, 0);
3374
3375                 /* Forcing 1000FD link up. */
3376                 current_link_up = 1;
3377
3378                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3379                 udelay(40);
3380
3381                 tw32_f(MAC_MODE, tp->mac_mode);
3382                 udelay(40);
3383         }
3384
3385 out:
3386         return current_link_up;
3387 }
3388
3389 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3390 {
3391         u32 orig_pause_cfg;
3392         u16 orig_active_speed;
3393         u8 orig_active_duplex;
3394         u32 mac_status;
3395         int current_link_up;
3396         int i;
3397
3398         orig_pause_cfg = tp->link_config.active_flowctrl;
3399         orig_active_speed = tp->link_config.active_speed;
3400         orig_active_duplex = tp->link_config.active_duplex;
3401
3402         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3403             netif_carrier_ok(tp->dev) &&
3404             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3405                 mac_status = tr32(MAC_STATUS);
3406                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3407                                MAC_STATUS_SIGNAL_DET |
3408                                MAC_STATUS_CFG_CHANGED |
3409                                MAC_STATUS_RCVD_CFG);
3410                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3411                                    MAC_STATUS_SIGNAL_DET)) {
3412                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3413                                             MAC_STATUS_CFG_CHANGED));
3414                         return 0;
3415                 }
3416         }
3417
3418         tw32_f(MAC_TX_AUTO_NEG, 0);
3419
3420         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3421         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3422         tw32_f(MAC_MODE, tp->mac_mode);
3423         udelay(40);
3424
3425         if (tp->phy_id == PHY_ID_BCM8002)
3426                 tg3_init_bcm8002(tp);
3427
3428         /* Enable link change event even when serdes polling.  */
3429         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3430         udelay(40);
3431
3432         current_link_up = 0;
3433         mac_status = tr32(MAC_STATUS);
3434
3435         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3436                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3437         else
3438                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3439
3440         tp->hw_status->status =
3441                 (SD_STATUS_UPDATED |
3442                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3443
3444         for (i = 0; i < 100; i++) {
3445                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3446                                     MAC_STATUS_CFG_CHANGED));
3447                 udelay(5);
3448                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3449                                          MAC_STATUS_CFG_CHANGED |
3450                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3451                         break;
3452         }
3453
3454         mac_status = tr32(MAC_STATUS);
3455         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3456                 current_link_up = 0;
3457                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3458                     tp->serdes_counter == 0) {
3459                         tw32_f(MAC_MODE, (tp->mac_mode |
3460                                           MAC_MODE_SEND_CONFIGS));
3461                         udelay(1);
3462                         tw32_f(MAC_MODE, tp->mac_mode);
3463                 }
3464         }
3465
3466         if (current_link_up == 1) {
3467                 tp->link_config.active_speed = SPEED_1000;
3468                 tp->link_config.active_duplex = DUPLEX_FULL;
3469                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3470                                     LED_CTRL_LNKLED_OVERRIDE |
3471                                     LED_CTRL_1000MBPS_ON));
3472         } else {
3473                 tp->link_config.active_speed = SPEED_INVALID;
3474                 tp->link_config.active_duplex = DUPLEX_INVALID;
3475                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3476                                     LED_CTRL_LNKLED_OVERRIDE |
3477                                     LED_CTRL_TRAFFIC_OVERRIDE));
3478         }
3479
3480         if (current_link_up != netif_carrier_ok(tp->dev)) {
3481                 if (current_link_up)
3482                         netif_carrier_on(tp->dev);
3483                 else
3484                         netif_carrier_off(tp->dev);
3485                 tg3_link_report(tp);
3486         } else {
3487                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3488                 if (orig_pause_cfg != now_pause_cfg ||
3489                     orig_active_speed != tp->link_config.active_speed ||
3490                     orig_active_duplex != tp->link_config.active_duplex)
3491                         tg3_link_report(tp);
3492         }
3493
3494         return 0;
3495 }
3496
3497 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3498 {
3499         int current_link_up, err = 0;
3500         u32 bmsr, bmcr;
3501         u16 current_speed;
3502         u8 current_duplex;
3503         u32 local_adv, remote_adv;
3504
3505         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3506         tw32_f(MAC_MODE, tp->mac_mode);
3507         udelay(40);
3508
3509         tw32(MAC_EVENT, 0);
3510
3511         tw32_f(MAC_STATUS,
3512              (MAC_STATUS_SYNC_CHANGED |
3513               MAC_STATUS_CFG_CHANGED |
3514               MAC_STATUS_MI_COMPLETION |
3515               MAC_STATUS_LNKSTATE_CHANGED));
3516         udelay(40);
3517
3518         if (force_reset)
3519                 tg3_phy_reset(tp);
3520
3521         current_link_up = 0;
3522         current_speed = SPEED_INVALID;
3523         current_duplex = DUPLEX_INVALID;
3524
3525         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3526         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3527         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3528                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3529                         bmsr |= BMSR_LSTATUS;
3530                 else
3531                         bmsr &= ~BMSR_LSTATUS;
3532         }
3533
3534         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3535
3536         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3537             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3538              tp->link_config.flowctrl == tp->link_config.active_flowctrl) {
3539                 /* do nothing, just check for link up at the end */
3540         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3541                 u32 adv, new_adv;
3542
3543                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3544                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3545                                   ADVERTISE_1000XPAUSE |
3546                                   ADVERTISE_1000XPSE_ASYM |
3547                                   ADVERTISE_SLCT);
3548
3549                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3550
3551                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3552                         new_adv |= ADVERTISE_1000XHALF;
3553                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3554                         new_adv |= ADVERTISE_1000XFULL;
3555
3556                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3557                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3558                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3559                         tg3_writephy(tp, MII_BMCR, bmcr);
3560
3561                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3562                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3563                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3564
3565                         return err;
3566                 }
3567         } else {
3568                 u32 new_bmcr;
3569
3570                 bmcr &= ~BMCR_SPEED1000;
3571                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3572
3573                 if (tp->link_config.duplex == DUPLEX_FULL)
3574                         new_bmcr |= BMCR_FULLDPLX;
3575
3576                 if (new_bmcr != bmcr) {
3577                         /* BMCR_SPEED1000 is a reserved bit that needs
3578                          * to be set on write.
3579                          */
3580                         new_bmcr |= BMCR_SPEED1000;
3581
3582                         /* Force a linkdown */
3583                         if (netif_carrier_ok(tp->dev)) {
3584                                 u32 adv;
3585
3586                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3587                                 adv &= ~(ADVERTISE_1000XFULL |
3588                                          ADVERTISE_1000XHALF |
3589                                          ADVERTISE_SLCT);
3590                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3591                                 tg3_writephy(tp, MII_BMCR, bmcr |
3592                                                            BMCR_ANRESTART |
3593                                                            BMCR_ANENABLE);
3594                                 udelay(10);
3595                                 netif_carrier_off(tp->dev);
3596                         }
3597                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3598                         bmcr = new_bmcr;
3599                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3600                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3601                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3602                             ASIC_REV_5714) {
3603                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3604                                         bmsr |= BMSR_LSTATUS;
3605                                 else
3606                                         bmsr &= ~BMSR_LSTATUS;
3607                         }
3608                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3609                 }
3610         }
3611
3612         if (bmsr & BMSR_LSTATUS) {
3613                 current_speed = SPEED_1000;
3614                 current_link_up = 1;
3615                 if (bmcr & BMCR_FULLDPLX)
3616                         current_duplex = DUPLEX_FULL;
3617                 else
3618                         current_duplex = DUPLEX_HALF;
3619
3620                 local_adv = 0;
3621                 remote_adv = 0;
3622
3623                 if (bmcr & BMCR_ANENABLE) {
3624                         u32 common;
3625
3626                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3627                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3628                         common = local_adv & remote_adv;
3629                         if (common & (ADVERTISE_1000XHALF |
3630                                       ADVERTISE_1000XFULL)) {
3631                                 if (common & ADVERTISE_1000XFULL)
3632                                         current_duplex = DUPLEX_FULL;
3633                                 else
3634                                         current_duplex = DUPLEX_HALF;
3635                         }
3636                         else
3637                                 current_link_up = 0;
3638                 }
3639         }
3640
3641         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3642                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3643
3644         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3645         if (tp->link_config.active_duplex == DUPLEX_HALF)
3646                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3647
3648         tw32_f(MAC_MODE, tp->mac_mode);
3649         udelay(40);
3650
3651         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3652
3653         tp->link_config.active_speed = current_speed;
3654         tp->link_config.active_duplex = current_duplex;
3655
3656         if (current_link_up != netif_carrier_ok(tp->dev)) {
3657                 if (current_link_up)
3658                         netif_carrier_on(tp->dev);
3659                 else {
3660                         netif_carrier_off(tp->dev);
3661                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3662                 }
3663                 tg3_link_report(tp);
3664         }
3665         return err;
3666 }
3667
3668 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3669 {
3670         if (tp->serdes_counter) {
3671                 /* Give autoneg time to complete. */
3672                 tp->serdes_counter--;
3673                 return;
3674         }
3675         if (!netif_carrier_ok(tp->dev) &&
3676             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3677                 u32 bmcr;
3678
3679                 tg3_readphy(tp, MII_BMCR, &bmcr);
3680                 if (bmcr & BMCR_ANENABLE) {
3681                         u32 phy1, phy2;
3682
3683                         /* Select shadow register 0x1f */
3684                         tg3_writephy(tp, 0x1c, 0x7c00);
3685                         tg3_readphy(tp, 0x1c, &phy1);
3686
3687                         /* Select expansion interrupt status register */
3688                         tg3_writephy(tp, 0x17, 0x0f01);
3689                         tg3_readphy(tp, 0x15, &phy2);
3690                         tg3_readphy(tp, 0x15, &phy2);
3691
3692                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3693                                 /* We have signal detect and not receiving
3694                                  * config code words, link is up by parallel
3695                                  * detection.
3696                                  */
3697
3698                                 bmcr &= ~BMCR_ANENABLE;
3699                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3700                                 tg3_writephy(tp, MII_BMCR, bmcr);
3701                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3702                         }
3703                 }
3704         }
3705         else if (netif_carrier_ok(tp->dev) &&
3706                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3707                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3708                 u32 phy2;
3709
3710                 /* Select expansion interrupt status register */
3711                 tg3_writephy(tp, 0x17, 0x0f01);
3712                 tg3_readphy(tp, 0x15, &phy2);
3713                 if (phy2 & 0x20) {
3714                         u32 bmcr;
3715
3716                         /* Config code words received, turn on autoneg. */
3717                         tg3_readphy(tp, MII_BMCR, &bmcr);
3718                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3719
3720                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3721
3722                 }
3723         }
3724 }
3725
3726 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3727 {
3728         int err;
3729
3730         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3731                 err = tg3_setup_fiber_phy(tp, force_reset);
3732         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3733                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3734         } else {
3735                 err = tg3_setup_copper_phy(tp, force_reset);
3736         }
3737
3738         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3739             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3740                 u32 val, scale;
3741
3742                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3743                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3744                         scale = 65;
3745                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3746                         scale = 6;
3747                 else
3748                         scale = 12;
3749
3750                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3751                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3752                 tw32(GRC_MISC_CFG, val);
3753         }
3754
3755         if (tp->link_config.active_speed == SPEED_1000 &&
3756             tp->link_config.active_duplex == DUPLEX_HALF)
3757                 tw32(MAC_TX_LENGTHS,
3758                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3759                       (6 << TX_LENGTHS_IPG_SHIFT) |
3760                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3761         else
3762                 tw32(MAC_TX_LENGTHS,
3763                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3764                       (6 << TX_LENGTHS_IPG_SHIFT) |
3765                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3766
3767         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3768                 if (netif_carrier_ok(tp->dev)) {
3769                         tw32(HOSTCC_STAT_COAL_TICKS,
3770                              tp->coal.stats_block_coalesce_usecs);
3771                 } else {
3772                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3773                 }
3774         }
3775
3776         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3777                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3778                 if (!netif_carrier_ok(tp->dev))
3779                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3780                               tp->pwrmgmt_thresh;
3781                 else
3782                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3783                 tw32(PCIE_PWR_MGMT_THRESH, val);
3784         }
3785
3786         return err;
3787 }
3788
3789 /* This is called whenever we suspect that the system chipset is re-
3790  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3791  * is bogus tx completions. We try to recover by setting the
3792  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3793  * in the workqueue.
3794  */
3795 static void tg3_tx_recover(struct tg3 *tp)
3796 {
3797         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3798                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3799
3800         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3801                "mapped I/O cycles to the network device, attempting to "
3802                "recover. Please report the problem to the driver maintainer "
3803                "and include system chipset information.\n", tp->dev->name);
3804
3805         spin_lock(&tp->lock);
3806         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3807         spin_unlock(&tp->lock);
3808 }
3809
3810 static inline u32 tg3_tx_avail(struct tg3 *tp)
3811 {
3812         smp_mb();
3813         return (tp->tx_pending -
3814                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3815 }
3816
3817 /* Tigon3 never reports partial packet sends.  So we do not
3818  * need special logic to handle SKBs that have not had all
3819  * of their frags sent yet, like SunGEM does.
3820  */
3821 static void tg3_tx(struct tg3 *tp)
3822 {
3823         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3824         u32 sw_idx = tp->tx_cons;
3825
3826         while (sw_idx != hw_idx) {
3827                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3828                 struct sk_buff *skb = ri->skb;
3829                 int i, tx_bug = 0;
3830
3831                 if (unlikely(skb == NULL)) {
3832                         tg3_tx_recover(tp);
3833                         return;
3834                 }
3835
3836                 pci_unmap_single(tp->pdev,
3837                                  pci_unmap_addr(ri, mapping),
3838                                  skb_headlen(skb),
3839                                  PCI_DMA_TODEVICE);
3840
3841                 ri->skb = NULL;
3842
3843                 sw_idx = NEXT_TX(sw_idx);
3844
3845                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3846                         ri = &tp->tx_buffers[sw_idx];
3847                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3848                                 tx_bug = 1;
3849
3850                         pci_unmap_page(tp->pdev,
3851                                        pci_unmap_addr(ri, mapping),
3852                                        skb_shinfo(skb)->frags[i].size,
3853                                        PCI_DMA_TODEVICE);
3854
3855                         sw_idx = NEXT_TX(sw_idx);
3856                 }
3857
3858                 dev_kfree_skb(skb);
3859
3860                 if (unlikely(tx_bug)) {
3861                         tg3_tx_recover(tp);
3862                         return;
3863                 }
3864         }
3865
3866         tp->tx_cons = sw_idx;
3867
3868         /* Need to make the tx_cons update visible to tg3_start_xmit()
3869          * before checking for netif_queue_stopped().  Without the
3870          * memory barrier, there is a small possibility that tg3_start_xmit()
3871          * will miss it and cause the queue to be stopped forever.
3872          */
3873         smp_mb();
3874
3875         if (unlikely(netif_queue_stopped(tp->dev) &&
3876                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3877                 netif_tx_lock(tp->dev);
3878                 if (netif_queue_stopped(tp->dev) &&
3879                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3880                         netif_wake_queue(tp->dev);
3881                 netif_tx_unlock(tp->dev);
3882         }
3883 }
3884
3885 /* Returns size of skb allocated or < 0 on error.
3886  *
3887  * We only need to fill in the address because the other members
3888  * of the RX descriptor are invariant, see tg3_init_rings.
3889  *
3890  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3891  * posting buffers we only dirty the first cache line of the RX
3892  * descriptor (containing the address).  Whereas for the RX status
3893  * buffers the cpu only reads the last cacheline of the RX descriptor
3894  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3895  */
3896 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3897                             int src_idx, u32 dest_idx_unmasked)
3898 {
3899         struct tg3_rx_buffer_desc *desc;
3900         struct ring_info *map, *src_map;
3901         struct sk_buff *skb;
3902         dma_addr_t mapping;
3903         int skb_size, dest_idx;
3904
3905         src_map = NULL;
3906         switch (opaque_key) {
3907         case RXD_OPAQUE_RING_STD:
3908                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3909                 desc = &tp->rx_std[dest_idx];
3910                 map = &tp->rx_std_buffers[dest_idx];
3911                 if (src_idx >= 0)
3912                         src_map = &tp->rx_std_buffers[src_idx];
3913                 skb_size = tp->rx_pkt_buf_sz;
3914                 break;
3915
3916         case RXD_OPAQUE_RING_JUMBO:
3917                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3918                 desc = &tp->rx_jumbo[dest_idx];
3919                 map = &tp->rx_jumbo_buffers[dest_idx];
3920                 if (src_idx >= 0)
3921                         src_map = &tp->rx_jumbo_buffers[src_idx];
3922                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3923                 break;
3924
3925         default:
3926                 return -EINVAL;
3927         }
3928
3929         /* Do not overwrite any of the map or rp information
3930          * until we are sure we can commit to a new buffer.
3931          *
3932          * Callers depend upon this behavior and assume that
3933          * we leave everything unchanged if we fail.
3934          */
3935         skb = netdev_alloc_skb(tp->dev, skb_size);
3936         if (skb == NULL)
3937                 return -ENOMEM;
3938
3939         skb_reserve(skb, tp->rx_offset);
3940
3941         mapping = pci_map_single(tp->pdev, skb->data,
3942                                  skb_size - tp->rx_offset,
3943                                  PCI_DMA_FROMDEVICE);
3944
3945         map->skb = skb;
3946         pci_unmap_addr_set(map, mapping, mapping);
3947
3948         if (src_map != NULL)
3949                 src_map->skb = NULL;
3950
3951         desc->addr_hi = ((u64)mapping >> 32);
3952         desc->addr_lo = ((u64)mapping & 0xffffffff);
3953
3954         return skb_size;
3955 }
3956
3957 /* We only need to move over in the address because the other
3958  * members of the RX descriptor are invariant.  See notes above
3959  * tg3_alloc_rx_skb for full details.
3960  */
3961 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3962                            int src_idx, u32 dest_idx_unmasked)
3963 {
3964         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3965         struct ring_info *src_map, *dest_map;
3966         int dest_idx;
3967
3968         switch (opaque_key) {
3969         case RXD_OPAQUE_RING_STD:
3970                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3971                 dest_desc = &tp->rx_std[dest_idx];
3972                 dest_map = &tp->rx_std_buffers[dest_idx];
3973                 src_desc = &tp->rx_std[src_idx];
3974                 src_map = &tp->rx_std_buffers[src_idx];
3975                 break;
3976
3977         case RXD_OPAQUE_RING_JUMBO:
3978                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3979                 dest_desc = &tp->rx_jumbo[dest_idx];
3980                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3981                 src_desc = &tp->rx_jumbo[src_idx];
3982                 src_map = &tp->rx_jumbo_buffers[src_idx];
3983                 break;
3984
3985         default:
3986                 return;
3987         }
3988
3989         dest_map->skb = src_map->skb;
3990         pci_unmap_addr_set(dest_map, mapping,
3991                            pci_unmap_addr(src_map, mapping));
3992         dest_desc->addr_hi = src_desc->addr_hi;
3993         dest_desc->addr_lo = src_desc->addr_lo;
3994
3995         src_map->skb = NULL;
3996 }
3997
3998 #if TG3_VLAN_TAG_USED
3999 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4000 {
4001         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4002 }
4003 #endif
4004
4005 /* The RX ring scheme is composed of multiple rings which post fresh
4006  * buffers to the chip, and one special ring the chip uses to report
4007  * status back to the host.
4008  *
4009  * The special ring reports the status of received packets to the
4010  * host.  The chip does not write into the original descriptor the
4011  * RX buffer was obtained from.  The chip simply takes the original
4012  * descriptor as provided by the host, updates the status and length
4013  * field, then writes this into the next status ring entry.
4014  *
4015  * Each ring the host uses to post buffers to the chip is described
4016  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4017  * it is first placed into the on-chip ram.  When the packet's length
4018  * is known, it walks down the TG3_BDINFO entries to select the ring.
4019  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4020  * which is within the range of the new packet's length is chosen.
4021  *
4022  * The "separate ring for rx status" scheme may sound queer, but it makes
4023  * sense from a cache coherency perspective.  If only the host writes
4024  * to the buffer post rings, and only the chip writes to the rx status
4025  * rings, then cache lines never move beyond shared-modified state.
4026  * If both the host and chip were to write into the same ring, cache line
4027  * eviction could occur since both entities want it in an exclusive state.
4028  */
4029 static int tg3_rx(struct tg3 *tp, int budget)
4030 {
4031         u32 work_mask, rx_std_posted = 0;
4032         u32 sw_idx = tp->rx_rcb_ptr;
4033         u16 hw_idx;
4034         int received;
4035
4036         hw_idx = tp->hw_status->idx[0].rx_producer;
4037         /*
4038          * We need to order the read of hw_idx and the read of
4039          * the opaque cookie.
4040          */
4041         rmb();
4042         work_mask = 0;
4043         received = 0;
4044         while (sw_idx != hw_idx && budget > 0) {
4045                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4046                 unsigned int len;
4047                 struct sk_buff *skb;
4048                 dma_addr_t dma_addr;
4049                 u32 opaque_key, desc_idx, *post_ptr;
4050
4051                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4052                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4053                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4054                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4055                                                   mapping);
4056                         skb = tp->rx_std_buffers[desc_idx].skb;
4057                         post_ptr = &tp->rx_std_ptr;
4058                         rx_std_posted++;
4059                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4060                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4061                                                   mapping);
4062                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
4063                         post_ptr = &tp->rx_jumbo_ptr;
4064                 }
4065                 else {
4066                         goto next_pkt_nopost;
4067                 }
4068
4069                 work_mask |= opaque_key;
4070
4071                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4072                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4073                 drop_it:
4074                         tg3_recycle_rx(tp, opaque_key,
4075                                        desc_idx, *post_ptr);
4076                 drop_it_no_recycle:
4077                         /* Other statistics kept track of by card. */
4078                         tp->net_stats.rx_dropped++;
4079                         goto next_pkt;
4080                 }
4081
4082                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
4083
4084                 if (len > RX_COPY_THRESHOLD
4085                         && tp->rx_offset == 2
4086                         /* rx_offset != 2 iff this is a 5701 card running
4087                          * in PCI-X mode [see tg3_get_invariants()] */
4088                 ) {
4089                         int skb_size;
4090
4091                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4092                                                     desc_idx, *post_ptr);
4093                         if (skb_size < 0)
4094                                 goto drop_it;
4095
4096                         pci_unmap_single(tp->pdev, dma_addr,
4097                                          skb_size - tp->rx_offset,
4098                                          PCI_DMA_FROMDEVICE);
4099
4100                         skb_put(skb, len);
4101                 } else {
4102                         struct sk_buff *copy_skb;
4103
4104                         tg3_recycle_rx(tp, opaque_key,
4105                                        desc_idx, *post_ptr);
4106
4107                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
4108                         if (copy_skb == NULL)
4109                                 goto drop_it_no_recycle;
4110
4111                         skb_reserve(copy_skb, 2);
4112                         skb_put(copy_skb, len);
4113                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4114                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4115                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4116
4117                         /* We'll reuse the original ring buffer. */
4118                         skb = copy_skb;
4119                 }
4120
4121                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4122                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4123                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4124                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4125                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4126                 else
4127                         skb->ip_summed = CHECKSUM_NONE;
4128
4129                 skb->protocol = eth_type_trans(skb, tp->dev);
4130 #if TG3_VLAN_TAG_USED
4131                 if (tp->vlgrp != NULL &&
4132                     desc->type_flags & RXD_FLAG_VLAN) {
4133                         tg3_vlan_rx(tp, skb,
4134                                     desc->err_vlan & RXD_VLAN_MASK);
4135                 } else
4136 #endif
4137                         netif_receive_skb(skb);
4138
4139                 tp->dev->last_rx = jiffies;
4140                 received++;
4141                 budget--;
4142
4143 next_pkt:
4144                 (*post_ptr)++;
4145
4146                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4147                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4148
4149                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4150                                      TG3_64BIT_REG_LOW, idx);
4151                         work_mask &= ~RXD_OPAQUE_RING_STD;
4152                         rx_std_posted = 0;
4153                 }
4154 next_pkt_nopost:
4155                 sw_idx++;
4156                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4157
4158                 /* Refresh hw_idx to see if there is new work */
4159                 if (sw_idx == hw_idx) {
4160                         hw_idx = tp->hw_status->idx[0].rx_producer;
4161                         rmb();
4162                 }
4163         }
4164
4165         /* ACK the status ring. */
4166         tp->rx_rcb_ptr = sw_idx;
4167         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4168
4169         /* Refill RX ring(s). */
4170         if (work_mask & RXD_OPAQUE_RING_STD) {
4171                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4172                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4173                              sw_idx);
4174         }
4175         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4176                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4177                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4178                              sw_idx);
4179         }
4180         mmiowb();
4181
4182         return received;
4183 }
4184
4185 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4186 {
4187         struct tg3_hw_status *sblk = tp->hw_status;
4188
4189         /* handle link change and other phy events */
4190         if (!(tp->tg3_flags &
4191               (TG3_FLAG_USE_LINKCHG_REG |
4192                TG3_FLAG_POLL_SERDES))) {
4193                 if (sblk->status & SD_STATUS_LINK_CHG) {
4194                         sblk->status = SD_STATUS_UPDATED |
4195                                 (sblk->status & ~SD_STATUS_LINK_CHG);
4196                         spin_lock(&tp->lock);
4197                         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4198                                 tw32_f(MAC_STATUS,
4199                                      (MAC_STATUS_SYNC_CHANGED |
4200                                       MAC_STATUS_CFG_CHANGED |
4201                                       MAC_STATUS_MI_COMPLETION |
4202                                       MAC_STATUS_LNKSTATE_CHANGED));
4203                                 udelay(40);
4204                         } else
4205                                 tg3_setup_phy(tp, 0);
4206                         spin_unlock(&tp->lock);
4207                 }
4208         }
4209
4210         /* run TX completion thread */
4211         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4212                 tg3_tx(tp);
4213                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4214                         return work_done;
4215         }
4216
4217         /* run RX thread, within the bounds set by NAPI.
4218          * All RX "locking" is done by ensuring outside
4219          * code synchronizes with tg3->napi.poll()
4220          */
4221         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4222                 work_done += tg3_rx(tp, budget - work_done);
4223
4224         return work_done;
4225 }
4226
4227 static int tg3_poll(struct napi_struct *napi, int budget)
4228 {
4229         struct tg3 *tp = container_of(napi, struct tg3, napi);
4230         int work_done = 0;
4231         struct tg3_hw_status *sblk = tp->hw_status;
4232
4233         while (1) {
4234                 work_done = tg3_poll_work(tp, work_done, budget);
4235
4236                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4237                         goto tx_recovery;
4238
4239                 if (unlikely(work_done >= budget))
4240                         break;
4241
4242                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4243                         /* tp->last_tag is used in tg3_restart_ints() below
4244                          * to tell the hw how much work has been processed,
4245                          * so we must read it before checking for more work.
4246                          */
4247                         tp->last_tag = sblk->status_tag;
4248                         rmb();
4249                 } else
4250                         sblk->status &= ~SD_STATUS_UPDATED;
4251
4252                 if (likely(!tg3_has_work(tp))) {
4253                         netif_rx_complete(tp->dev, napi);
4254                         tg3_restart_ints(tp);
4255                         break;
4256                 }
4257         }
4258
4259         return work_done;
4260
4261 tx_recovery:
4262         /* work_done is guaranteed to be less than budget. */
4263         netif_rx_complete(tp->dev, napi);
4264         schedule_work(&tp->reset_task);
4265         return work_done;
4266 }
4267
4268 static void tg3_irq_quiesce(struct tg3 *tp)
4269 {
4270         BUG_ON(tp->irq_sync);
4271
4272         tp->irq_sync = 1;
4273         smp_mb();
4274
4275         synchronize_irq(tp->pdev->irq);
4276 }
4277
4278 static inline int tg3_irq_sync(struct tg3 *tp)
4279 {
4280         return tp->irq_sync;
4281 }
4282
4283 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4284  * If irq_sync is non-zero, then the IRQ handler must be synchronized
4285  * with as well.  Most of the time, this is not necessary except when
4286  * shutting down the device.
4287  */
4288 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4289 {
4290         spin_lock_bh(&tp->lock);
4291         if (irq_sync)
4292                 tg3_irq_quiesce(tp);
4293 }
4294
4295 static inline void tg3_full_unlock(struct tg3 *tp)
4296 {
4297         spin_unlock_bh(&tp->lock);
4298 }
4299
4300 /* One-shot MSI handler - Chip automatically disables interrupt
4301  * after sending MSI so driver doesn't have to do it.
4302  */
4303 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4304 {
4305         struct net_device *dev = dev_id;
4306         struct tg3 *tp = netdev_priv(dev);
4307
4308         prefetch(tp->hw_status);
4309         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4310
4311         if (likely(!tg3_irq_sync(tp)))
4312                 netif_rx_schedule(dev, &tp->napi);
4313
4314         return IRQ_HANDLED;
4315 }
4316
4317 /* MSI ISR - No need to check for interrupt sharing and no need to
4318  * flush status block and interrupt mailbox. PCI ordering rules
4319  * guarantee that MSI will arrive after the status block.
4320  */
4321 static irqreturn_t tg3_msi(int irq, void *dev_id)
4322 {
4323         struct net_device *dev = dev_id;
4324         struct tg3 *tp = netdev_priv(dev);
4325
4326         prefetch(tp->hw_status);
4327         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4328         /*
4329          * Writing any value to intr-mbox-0 clears PCI INTA# and
4330          * chip-internal interrupt pending events.
4331          * Writing non-zero to intr-mbox-0 additional tells the
4332          * NIC to stop sending us irqs, engaging "in-intr-handler"
4333          * event coalescing.
4334          */
4335         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4336         if (likely(!tg3_irq_sync(tp)))
4337                 netif_rx_schedule(dev, &tp->napi);
4338
4339         return IRQ_RETVAL(1);
4340 }
4341
4342 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4343 {
4344         struct net_device *dev = dev_id;
4345         struct tg3 *tp = netdev_priv(dev);
4346         struct tg3_hw_status *sblk = tp->hw_status;
4347         unsigned int handled = 1;
4348
4349         /* In INTx mode, it is possible for the interrupt to arrive at
4350          * the CPU before the status block posted prior to the interrupt.
4351          * Reading the PCI State register will confirm whether the
4352          * interrupt is ours and will flush the status block.
4353          */
4354         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4355                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4356                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4357                         handled = 0;
4358                         goto out;
4359                 }
4360         }
4361
4362         /*
4363          * Writing any value to intr-mbox-0 clears PCI INTA# and
4364          * chip-internal interrupt pending events.
4365          * Writing non-zero to intr-mbox-0 additional tells the
4366          * NIC to stop sending us irqs, engaging "in-intr-handler"
4367          * event coalescing.
4368          *
4369          * Flush the mailbox to de-assert the IRQ immediately to prevent
4370          * spurious interrupts.  The flush impacts performance but
4371          * excessive spurious interrupts can be worse in some cases.
4372          */
4373         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4374         if (tg3_irq_sync(tp))
4375                 goto out;
4376         sblk->status &= ~SD_STATUS_UPDATED;
4377         if (likely(tg3_has_work(tp))) {
4378                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4379                 netif_rx_schedule(dev, &tp->napi);
4380         } else {
4381                 /* No work, shared interrupt perhaps?  re-enable
4382                  * interrupts, and flush that PCI write
4383                  */
4384                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4385                                0x00000000);
4386         }
4387 out:
4388         return IRQ_RETVAL(handled);
4389 }
4390
4391 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4392 {
4393         struct net_device *dev = dev_id;
4394         struct tg3 *tp = netdev_priv(dev);
4395         struct tg3_hw_status *sblk = tp->hw_status;
4396         unsigned int handled = 1;
4397
4398         /* In INTx mode, it is possible for the interrupt to arrive at
4399          * the CPU before the status block posted prior to the interrupt.
4400          * Reading the PCI State register will confirm whether the
4401          * interrupt is ours and will flush the status block.
4402          */
4403         if (unlikely(sblk->status_tag == tp->last_tag)) {
4404                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4405                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4406                         handled = 0;
4407                         goto out;
4408                 }
4409         }
4410
4411         /*
4412          * writing any value to intr-mbox-0 clears PCI INTA# and
4413          * chip-internal interrupt pending events.
4414          * writing non-zero to intr-mbox-0 additional tells the
4415          * NIC to stop sending us irqs, engaging "in-intr-handler"
4416          * event coalescing.
4417          *
4418          * Flush the mailbox to de-assert the IRQ immediately to prevent
4419          * spurious interrupts.  The flush impacts performance but
4420          * excessive spurious interrupts can be worse in some cases.
4421          */
4422         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4423         if (tg3_irq_sync(tp))
4424                 goto out;
4425         if (netif_rx_schedule_prep(dev, &tp->napi)) {
4426                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4427                 /* Update last_tag to mark that this status has been
4428                  * seen. Because interrupt may be shared, we may be
4429                  * racing with tg3_poll(), so only update last_tag
4430                  * if tg3_poll() is not scheduled.
4431                  */
4432                 tp->last_tag = sblk->status_tag;
4433                 __netif_rx_schedule(dev, &tp->napi);
4434         }
4435 out:
4436         return IRQ_RETVAL(handled);
4437 }
4438
4439 /* ISR for interrupt test */
4440 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4441 {
4442         struct net_device *dev = dev_id;
4443         struct tg3 *tp = netdev_priv(dev);
4444         struct tg3_hw_status *sblk = tp->hw_status;
4445
4446         if ((sblk->status & SD_STATUS_UPDATED) ||
4447             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4448                 tg3_disable_ints(tp);
4449                 return IRQ_RETVAL(1);
4450         }
4451         return IRQ_RETVAL(0);
4452 }
4453
4454 static int tg3_init_hw(struct tg3 *, int);
4455 static int tg3_halt(struct tg3 *, int, int);
4456
4457 /* Restart hardware after configuration changes, self-test, etc.
4458  * Invoked with tp->lock held.
4459  */
4460 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4461         __releases(tp->lock)
4462         __acquires(tp->lock)
4463 {
4464         int err;
4465
4466         err = tg3_init_hw(tp, reset_phy);
4467         if (err) {
4468                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4469                        "aborting.\n", tp->dev->name);
4470                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4471                 tg3_full_unlock(tp);
4472                 del_timer_sync(&tp->timer);
4473                 tp->irq_sync = 0;
4474                 napi_enable(&tp->napi);
4475                 dev_close(tp->dev);
4476                 tg3_full_lock(tp, 0);
4477         }
4478         return err;
4479 }
4480
4481 #ifdef CONFIG_NET_POLL_CONTROLLER
4482 static void tg3_poll_controller(struct net_device *dev)
4483 {
4484         struct tg3 *tp = netdev_priv(dev);
4485
4486         tg3_interrupt(tp->pdev->irq, dev);
4487 }
4488 #endif
4489
4490 static void tg3_reset_task(struct work_struct *work)
4491 {
4492         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4493         int err;
4494         unsigned int restart_timer;
4495
4496         tg3_full_lock(tp, 0);
4497
4498         if (!netif_running(tp->dev)) {
4499                 tg3_full_unlock(tp);
4500                 return;
4501         }
4502
4503         tg3_full_unlock(tp);
4504
4505         tg3_phy_stop(tp);
4506
4507         tg3_netif_stop(tp);
4508
4509         tg3_full_lock(tp, 1);
4510
4511         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4512         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4513
4514         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4515                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4516                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4517                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4518                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4519         }
4520
4521         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4522         err = tg3_init_hw(tp, 1);
4523         if (err)
4524                 goto out;
4525
4526         tg3_netif_start(tp);
4527
4528         if (restart_timer)
4529                 mod_timer(&tp->timer, jiffies + 1);
4530
4531 out:
4532         tg3_full_unlock(tp);
4533
4534         if (!err)
4535                 tg3_phy_start(tp);
4536 }
4537
4538 static void tg3_dump_short_state(struct tg3 *tp)
4539 {
4540         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4541                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4542         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4543                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4544 }
4545
4546 static void tg3_tx_timeout(struct net_device *dev)
4547 {
4548         struct tg3 *tp = netdev_priv(dev);
4549
4550         if (netif_msg_tx_err(tp)) {
4551                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4552                        dev->name);
4553                 tg3_dump_short_state(tp);
4554         }
4555
4556         schedule_work(&tp->reset_task);
4557 }
4558
4559 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4560 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4561 {
4562         u32 base = (u32) mapping & 0xffffffff;
4563
4564         return ((base > 0xffffdcc0) &&
4565                 (base + len + 8 < base));
4566 }
4567
4568 /* Test for DMA addresses > 40-bit */
4569 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4570                                           int len)
4571 {
4572 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4573         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4574                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4575         return 0;
4576 #else
4577         return 0;
4578 #endif
4579 }
4580
4581 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4582
4583 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4584 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4585                                        u32 last_plus_one, u32 *start,
4586                                        u32 base_flags, u32 mss)
4587 {
4588         struct sk_buff *new_skb;
4589         dma_addr_t new_addr = 0;
4590         u32 entry = *start;
4591         int i, ret = 0;
4592
4593         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4594                 new_skb = skb_copy(skb, GFP_ATOMIC);
4595         else {
4596                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4597
4598                 new_skb = skb_copy_expand(skb,
4599                                           skb_headroom(skb) + more_headroom,
4600                                           skb_tailroom(skb), GFP_ATOMIC);
4601         }
4602
4603         if (!new_skb) {
4604                 ret = -1;
4605         } else {
4606                 /* New SKB is guaranteed to be linear. */
4607                 entry = *start;
4608                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4609                                           PCI_DMA_TODEVICE);
4610                 /* Make sure new skb does not cross any 4G boundaries.
4611                  * Drop the packet if it does.
4612                  */
4613                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4614                         ret = -1;
4615                         dev_kfree_skb(new_skb);
4616                         new_skb = NULL;
4617                 } else {
4618                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4619                                     base_flags, 1 | (mss << 1));
4620                         *start = NEXT_TX(entry);
4621                 }
4622         }
4623
4624         /* Now clean up the sw ring entries. */
4625         i = 0;
4626         while (entry != last_plus_one) {
4627                 int len;
4628
4629                 if (i == 0)
4630                         len = skb_headlen(skb);
4631                 else
4632                         len = skb_shinfo(skb)->frags[i-1].size;
4633                 pci_unmap_single(tp->pdev,
4634                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4635                                  len, PCI_DMA_TODEVICE);
4636                 if (i == 0) {
4637                         tp->tx_buffers[entry].skb = new_skb;
4638                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4639                 } else {
4640                         tp->tx_buffers[entry].skb = NULL;
4641                 }
4642                 entry = NEXT_TX(entry);
4643                 i++;
4644         }
4645
4646         dev_kfree_skb(skb);
4647
4648         return ret;
4649 }
4650
4651 static void tg3_set_txd(struct tg3 *tp, int entry,
4652                         dma_addr_t mapping, int len, u32 flags,
4653                         u32 mss_and_is_end)
4654 {
4655         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4656         int is_end = (mss_and_is_end & 0x1);
4657         u32 mss = (mss_and_is_end >> 1);
4658         u32 vlan_tag = 0;
4659
4660         if (is_end)
4661                 flags |= TXD_FLAG_END;
4662         if (flags & TXD_FLAG_VLAN) {
4663                 vlan_tag = flags >> 16;
4664                 flags &= 0xffff;
4665         }
4666         vlan_tag |= (mss << TXD_MSS_SHIFT);
4667
4668         txd->addr_hi = ((u64) mapping >> 32);
4669         txd->addr_lo = ((u64) mapping & 0xffffffff);
4670         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4671         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4672 }
4673
4674 /* hard_start_xmit for devices that don't have any bugs and
4675  * support TG3_FLG2_HW_TSO_2 only.
4676  */
4677 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4678 {
4679         struct tg3 *tp = netdev_priv(dev);
4680         dma_addr_t mapping;
4681         u32 len, entry, base_flags, mss;
4682
4683         len = skb_headlen(skb);
4684
4685         /* We are running in BH disabled context with netif_tx_lock
4686          * and TX reclaim runs via tp->napi.poll inside of a software
4687          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4688          * no IRQ context deadlocks to worry about either.  Rejoice!
4689          */
4690         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4691                 if (!netif_queue_stopped(dev)) {
4692                         netif_stop_queue(dev);
4693
4694                         /* This is a hard error, log it. */
4695                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4696                                "queue awake!\n", dev->name);
4697                 }
4698                 return NETDEV_TX_BUSY;
4699         }
4700
4701         entry = tp->tx_prod;
4702         base_flags = 0;
4703         mss = 0;
4704         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4705                 int tcp_opt_len, ip_tcp_len;
4706
4707                 if (skb_header_cloned(skb) &&
4708                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4709                         dev_kfree_skb(skb);
4710                         goto out_unlock;
4711                 }
4712
4713                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4714                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4715                 else {
4716                         struct iphdr *iph = ip_hdr(skb);
4717
4718                         tcp_opt_len = tcp_optlen(skb);
4719                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4720
4721                         iph->check = 0;
4722                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4723                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4724                 }
4725
4726                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4727                                TXD_FLAG_CPU_POST_DMA);
4728
4729                 tcp_hdr(skb)->check = 0;
4730
4731         }
4732         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4733                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4734 #if TG3_VLAN_TAG_USED
4735         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4736                 base_flags |= (TXD_FLAG_VLAN |
4737                                (vlan_tx_tag_get(skb) << 16));
4738 #endif
4739
4740         /* Queue skb data, a.k.a. the main skb fragment. */
4741         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4742
4743         tp->tx_buffers[entry].skb = skb;
4744         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4745
4746         tg3_set_txd(tp, entry, mapping, len, base_flags,
4747                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4748
4749         entry = NEXT_TX(entry);
4750
4751         /* Now loop through additional data fragments, and queue them. */
4752         if (skb_shinfo(skb)->nr_frags > 0) {
4753                 unsigned int i, last;
4754
4755                 last = skb_shinfo(skb)->nr_frags - 1;
4756                 for (i = 0; i <= last; i++) {
4757                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4758
4759                         len = frag->size;
4760                         mapping = pci_map_page(tp->pdev,
4761                                                frag->page,
4762                                                frag->page_offset,
4763                                                len, PCI_DMA_TODEVICE);
4764
4765                         tp->tx_buffers[entry].skb = NULL;
4766                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4767
4768                         tg3_set_txd(tp, entry, mapping, len,
4769                                     base_flags, (i == last) | (mss << 1));
4770
4771                         entry = NEXT_TX(entry);
4772                 }
4773         }
4774
4775         /* Packets are ready, update Tx producer idx local and on card. */
4776         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4777
4778         tp->tx_prod = entry;
4779         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4780                 netif_stop_queue(dev);
4781                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4782                         netif_wake_queue(tp->dev);
4783         }
4784
4785 out_unlock:
4786         mmiowb();
4787
4788         dev->trans_start = jiffies;
4789
4790         return NETDEV_TX_OK;
4791 }
4792
4793 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4794
4795 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4796  * TSO header is greater than 80 bytes.
4797  */
4798 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4799 {
4800         struct sk_buff *segs, *nskb;
4801
4802         /* Estimate the number of fragments in the worst case */
4803         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4804                 netif_stop_queue(tp->dev);
4805                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4806                         return NETDEV_TX_BUSY;
4807
4808                 netif_wake_queue(tp->dev);
4809         }
4810
4811         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4812         if (IS_ERR(segs))
4813                 goto tg3_tso_bug_end;
4814
4815         do {
4816                 nskb = segs;
4817                 segs = segs->next;
4818                 nskb->next = NULL;
4819                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4820         } while (segs);
4821
4822 tg3_tso_bug_end:
4823         dev_kfree_skb(skb);
4824
4825         return NETDEV_TX_OK;
4826 }
4827
4828 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4829  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4830  */
4831 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4832 {
4833         struct tg3 *tp = netdev_priv(dev);
4834         dma_addr_t mapping;
4835         u32 len, entry, base_flags, mss;
4836         int would_hit_hwbug;
4837
4838         len = skb_headlen(skb);
4839
4840         /* We are running in BH disabled context with netif_tx_lock
4841          * and TX reclaim runs via tp->napi.poll inside of a software
4842          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4843          * no IRQ context deadlocks to worry about either.  Rejoice!
4844          */
4845         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4846                 if (!netif_queue_stopped(dev)) {
4847                         netif_stop_queue(dev);
4848
4849                         /* This is a hard error, log it. */
4850                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4851                                "queue awake!\n", dev->name);
4852                 }
4853                 return NETDEV_TX_BUSY;
4854         }
4855
4856         entry = tp->tx_prod;
4857         base_flags = 0;
4858         if (skb->ip_summed == CHECKSUM_PARTIAL)
4859                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4860         mss = 0;
4861         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4862                 struct iphdr *iph;
4863                 int tcp_opt_len, ip_tcp_len, hdr_len;
4864
4865                 if (skb_header_cloned(skb) &&
4866                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4867                         dev_kfree_skb(skb);
4868                         goto out_unlock;
4869                 }
4870
4871                 tcp_opt_len = tcp_optlen(skb);
4872                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4873
4874                 hdr_len = ip_tcp_len + tcp_opt_len;
4875                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4876                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4877                         return (tg3_tso_bug(tp, skb));
4878
4879                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4880                                TXD_FLAG_CPU_POST_DMA);
4881
4882                 iph = ip_hdr(skb);
4883                 iph->check = 0;
4884                 iph->tot_len = htons(mss + hdr_len);
4885                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4886                         tcp_hdr(skb)->check = 0;
4887                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4888                 } else
4889                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4890                                                                  iph->daddr, 0,
4891                                                                  IPPROTO_TCP,
4892                                                                  0);
4893
4894                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4895                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4896                         if (tcp_opt_len || iph->ihl > 5) {
4897                                 int tsflags;
4898
4899                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4900                                 mss |= (tsflags << 11);
4901                         }
4902                 } else {
4903                         if (tcp_opt_len || iph->ihl > 5) {
4904                                 int tsflags;
4905
4906                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4907                                 base_flags |= tsflags << 12;
4908                         }
4909                 }
4910         }
4911 #if TG3_VLAN_TAG_USED
4912         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4913                 base_flags |= (TXD_FLAG_VLAN |
4914                                (vlan_tx_tag_get(skb) << 16));
4915 #endif
4916
4917         /* Queue skb data, a.k.a. the main skb fragment. */
4918         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4919
4920         tp->tx_buffers[entry].skb = skb;
4921         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4922
4923         would_hit_hwbug = 0;
4924
4925         if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4926                 would_hit_hwbug = 1;
4927         else if (tg3_4g_overflow_test(mapping, len))
4928                 would_hit_hwbug = 1;
4929
4930         tg3_set_txd(tp, entry, mapping, len, base_flags,
4931                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4932
4933         entry = NEXT_TX(entry);
4934
4935         /* Now loop through additional data fragments, and queue them. */
4936         if (skb_shinfo(skb)->nr_frags > 0) {
4937                 unsigned int i, last;
4938
4939                 last = skb_shinfo(skb)->nr_frags - 1;
4940                 for (i = 0; i <= last; i++) {
4941                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4942
4943                         len = frag->size;
4944                         mapping = pci_map_page(tp->pdev,
4945                                                frag->page,
4946                                                frag->page_offset,
4947                                                len, PCI_DMA_TODEVICE);
4948
4949                         tp->tx_buffers[entry].skb = NULL;
4950                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4951
4952                         if (tg3_4g_overflow_test(mapping, len))
4953                                 would_hit_hwbug = 1;
4954
4955                         if (tg3_40bit_overflow_test(tp, mapping, len))
4956                                 would_hit_hwbug = 1;
4957
4958                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4959                                 tg3_set_txd(tp, entry, mapping, len,
4960                                             base_flags, (i == last)|(mss << 1));
4961                         else
4962                                 tg3_set_txd(tp, entry, mapping, len,
4963                                             base_flags, (i == last));
4964
4965                         entry = NEXT_TX(entry);
4966                 }
4967         }
4968
4969         if (would_hit_hwbug) {
4970                 u32 last_plus_one = entry;
4971                 u32 start;
4972
4973                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4974                 start &= (TG3_TX_RING_SIZE - 1);
4975
4976                 /* If the workaround fails due to memory/mapping
4977                  * failure, silently drop this packet.
4978                  */
4979                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4980                                                 &start, base_flags, mss))
4981                         goto out_unlock;
4982
4983                 entry = start;
4984         }
4985
4986         /* Packets are ready, update Tx producer idx local and on card. */
4987         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4988
4989         tp->tx_prod = entry;
4990         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4991                 netif_stop_queue(dev);
4992                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4993                         netif_wake_queue(tp->dev);
4994         }
4995
4996 out_unlock:
4997         mmiowb();
4998
4999         dev->trans_start = jiffies;
5000
5001         return NETDEV_TX_OK;
5002 }
5003
5004 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5005                                int new_mtu)
5006 {
5007         dev->mtu = new_mtu;
5008
5009         if (new_mtu > ETH_DATA_LEN) {
5010                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5011                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5012                         ethtool_op_set_tso(dev, 0);
5013                 }
5014                 else
5015                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5016         } else {
5017                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5018                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5019                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5020         }
5021 }
5022
5023 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5024 {
5025         struct tg3 *tp = netdev_priv(dev);
5026         int err;
5027
5028         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5029                 return -EINVAL;
5030
5031         if (!netif_running(dev)) {
5032                 /* We'll just catch it later when the
5033                  * device is up'd.
5034                  */
5035                 tg3_set_mtu(dev, tp, new_mtu);
5036                 return 0;
5037         }
5038
5039         tg3_phy_stop(tp);
5040
5041         tg3_netif_stop(tp);
5042
5043         tg3_full_lock(tp, 1);
5044
5045         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5046
5047         tg3_set_mtu(dev, tp, new_mtu);
5048
5049         err = tg3_restart_hw(tp, 0);
5050
5051         if (!err)
5052                 tg3_netif_start(tp);
5053
5054         tg3_full_unlock(tp);
5055
5056         if (!err)
5057                 tg3_phy_start(tp);
5058
5059         return err;
5060 }
5061
5062 /* Free up pending packets in all rx/tx rings.
5063  *
5064  * The chip has been shut down and the driver detached from
5065  * the networking, so no interrupts or new tx packets will
5066  * end up in the driver.  tp->{tx,}lock is not held and we are not
5067  * in an interrupt context and thus may sleep.
5068  */
5069 static void tg3_free_rings(struct tg3 *tp)
5070 {
5071         struct ring_info *rxp;
5072         int i;
5073
5074         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5075                 rxp = &tp->rx_std_buffers[i];
5076
5077                 if (rxp->skb == NULL)
5078                         continue;
5079                 pci_unmap_single(tp->pdev,
5080                                  pci_unmap_addr(rxp, mapping),
5081                                  tp->rx_pkt_buf_sz - tp->rx_offset,
5082                                  PCI_DMA_FROMDEVICE);
5083                 dev_kfree_skb_any(rxp->skb);
5084                 rxp->skb = NULL;
5085         }
5086
5087         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5088                 rxp = &tp->rx_jumbo_buffers[i];
5089
5090                 if (rxp->skb == NULL)
5091                         continue;
5092                 pci_unmap_single(tp->pdev,
5093                                  pci_unmap_addr(rxp, mapping),
5094                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5095                                  PCI_DMA_FROMDEVICE);
5096                 dev_kfree_skb_any(rxp->skb);
5097                 rxp->skb = NULL;
5098         }
5099
5100         for (i = 0; i < TG3_TX_RING_SIZE; ) {
5101                 struct tx_ring_info *txp;
5102                 struct sk_buff *skb;
5103                 int j;
5104
5105                 txp = &tp->tx_buffers[i];
5106                 skb = txp->skb;
5107
5108                 if (skb == NULL) {
5109                         i++;
5110                         continue;
5111                 }
5112
5113                 pci_unmap_single(tp->pdev,
5114                                  pci_unmap_addr(txp, mapping),
5115                                  skb_headlen(skb),
5116                                  PCI_DMA_TODEVICE);
5117                 txp->skb = NULL;
5118
5119                 i++;
5120
5121                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
5122                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
5123                         pci_unmap_page(tp->pdev,
5124                                        pci_unmap_addr(txp, mapping),
5125                                        skb_shinfo(skb)->frags[j].size,
5126                                        PCI_DMA_TODEVICE);
5127                         i++;
5128                 }
5129
5130                 dev_kfree_skb_any(skb);
5131         }
5132 }
5133
5134 /* Initialize tx/rx rings for packet processing.
5135  *
5136  * The chip has been shut down and the driver detached from
5137  * the networking, so no interrupts or new tx packets will
5138  * end up in the driver.  tp->{tx,}lock are held and thus
5139  * we may not sleep.
5140  */
5141 static int tg3_init_rings(struct tg3 *tp)
5142 {
5143         u32 i;
5144
5145         /* Free up all the SKBs. */
5146         tg3_free_rings(tp);
5147
5148         /* Zero out all descriptors. */
5149         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5150         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5151         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5152         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5153
5154         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
5155         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5156             (tp->dev->mtu > ETH_DATA_LEN))
5157                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5158
5159         /* Initialize invariants of the rings, we only set this
5160          * stuff once.  This works because the card does not
5161          * write into the rx buffer posting rings.
5162          */
5163         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5164                 struct tg3_rx_buffer_desc *rxd;
5165
5166                 rxd = &tp->rx_std[i];
5167                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
5168                         << RXD_LEN_SHIFT;
5169                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5170                 rxd->opaque = (RXD_OPAQUE_RING_STD |
5171                                (i << RXD_OPAQUE_INDEX_SHIFT));
5172         }
5173
5174         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5175                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5176                         struct tg3_rx_buffer_desc *rxd;
5177
5178                         rxd = &tp->rx_jumbo[i];
5179                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5180                                 << RXD_LEN_SHIFT;
5181                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5182                                 RXD_FLAG_JUMBO;
5183                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5184                                (i << RXD_OPAQUE_INDEX_SHIFT));
5185                 }
5186         }
5187
5188         /* Now allocate fresh SKBs for each rx ring. */
5189         for (i = 0; i < tp->rx_pending; i++) {
5190                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5191                         printk(KERN_WARNING PFX
5192                                "%s: Using a smaller RX standard ring, "
5193                                "only %d out of %d buffers were allocated "
5194                                "successfully.\n",
5195                                tp->dev->name, i, tp->rx_pending);
5196                         if (i == 0)
5197                                 return -ENOMEM;
5198                         tp->rx_pending = i;
5199                         break;
5200                 }
5201         }
5202
5203         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5204                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5205                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5206                                              -1, i) < 0) {
5207                                 printk(KERN_WARNING PFX
5208                                        "%s: Using a smaller RX jumbo ring, "
5209                                        "only %d out of %d buffers were "
5210                                        "allocated successfully.\n",
5211                                        tp->dev->name, i, tp->rx_jumbo_pending);
5212                                 if (i == 0) {
5213                                         tg3_free_rings(tp);
5214                                         return -ENOMEM;
5215                                 }
5216                                 tp->rx_jumbo_pending = i;
5217                                 break;
5218                         }
5219                 }
5220         }
5221         return 0;
5222 }
5223
5224 /*
5225  * Must not be invoked with interrupt sources disabled and
5226  * the hardware shutdown down.
5227  */
5228 static void tg3_free_consistent(struct tg3 *tp)
5229 {
5230         kfree(tp->rx_std_buffers);
5231         tp->rx_std_buffers = NULL;
5232         if (tp->rx_std) {
5233                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5234                                     tp->rx_std, tp->rx_std_mapping);
5235                 tp->rx_std = NULL;
5236         }
5237         if (tp->rx_jumbo) {
5238                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5239                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
5240                 tp->rx_jumbo = NULL;
5241         }
5242         if (tp->rx_rcb) {
5243                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5244                                     tp->rx_rcb, tp->rx_rcb_mapping);
5245                 tp->rx_rcb = NULL;
5246         }
5247         if (tp->tx_ring) {
5248                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5249                         tp->tx_ring, tp->tx_desc_mapping);
5250                 tp->tx_ring = NULL;
5251         }
5252         if (tp->hw_status) {
5253                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5254                                     tp->hw_status, tp->status_mapping);
5255                 tp->hw_status = NULL;
5256         }
5257         if (tp->hw_stats) {
5258                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5259                                     tp->hw_stats, tp->stats_mapping);
5260                 tp->hw_stats = NULL;
5261         }
5262 }
5263
5264 /*
5265  * Must not be invoked with interrupt sources disabled and
5266  * the hardware shutdown down.  Can sleep.
5267  */
5268 static int tg3_alloc_consistent(struct tg3 *tp)
5269 {
5270         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5271                                       (TG3_RX_RING_SIZE +
5272                                        TG3_RX_JUMBO_RING_SIZE)) +
5273                                      (sizeof(struct tx_ring_info) *
5274                                       TG3_TX_RING_SIZE),
5275                                      GFP_KERNEL);
5276         if (!tp->rx_std_buffers)
5277                 return -ENOMEM;
5278
5279         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5280         tp->tx_buffers = (struct tx_ring_info *)
5281                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5282
5283         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5284                                           &tp->rx_std_mapping);
5285         if (!tp->rx_std)
5286                 goto err_out;
5287
5288         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5289                                             &tp->rx_jumbo_mapping);
5290
5291         if (!tp->rx_jumbo)
5292                 goto err_out;
5293
5294         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5295                                           &tp->rx_rcb_mapping);
5296         if (!tp->rx_rcb)
5297                 goto err_out;
5298
5299         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5300                                            &tp->tx_desc_mapping);
5301         if (!tp->tx_ring)
5302                 goto err_out;
5303
5304         tp->hw_status = pci_alloc_consistent(tp->pdev,
5305                                              TG3_HW_STATUS_SIZE,
5306                                              &tp->status_mapping);
5307         if (!tp->hw_status)
5308                 goto err_out;
5309
5310         tp->hw_stats = pci_alloc_consistent(tp->pdev,
5311                                             sizeof(struct tg3_hw_stats),
5312                                             &tp->stats_mapping);
5313         if (!tp->hw_stats)
5314                 goto err_out;
5315
5316         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5317         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5318
5319         return 0;
5320
5321 err_out:
5322         tg3_free_consistent(tp);
5323         return -ENOMEM;
5324 }
5325
5326 #define MAX_WAIT_CNT 1000
5327
5328 /* To stop a block, clear the enable bit and poll till it
5329  * clears.  tp->lock is held.
5330  */
5331 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5332 {
5333         unsigned int i;
5334         u32 val;
5335
5336         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5337                 switch (ofs) {
5338                 case RCVLSC_MODE:
5339                 case DMAC_MODE:
5340                 case MBFREE_MODE:
5341                 case BUFMGR_MODE:
5342                 case MEMARB_MODE:
5343                         /* We can't enable/disable these bits of the
5344                          * 5705/5750, just say success.
5345                          */
5346                         return 0;
5347
5348                 default:
5349                         break;
5350                 }
5351         }
5352
5353         val = tr32(ofs);
5354         val &= ~enable_bit;
5355         tw32_f(ofs, val);
5356
5357         for (i = 0; i < MAX_WAIT_CNT; i++) {
5358                 udelay(100);
5359                 val = tr32(ofs);
5360                 if ((val & enable_bit) == 0)
5361                         break;
5362         }
5363
5364         if (i == MAX_WAIT_CNT && !silent) {
5365                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5366                        "ofs=%lx enable_bit=%x\n",
5367                        ofs, enable_bit);
5368                 return -ENODEV;
5369         }
5370
5371         return 0;
5372 }
5373
5374 /* tp->lock is held. */
5375 static int tg3_abort_hw(struct tg3 *tp, int silent)
5376 {
5377         int i, err;
5378
5379         tg3_disable_ints(tp);
5380
5381         tp->rx_mode &= ~RX_MODE_ENABLE;
5382         tw32_f(MAC_RX_MODE, tp->rx_mode);
5383         udelay(10);
5384
5385         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5386         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5387         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5388         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5389         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5390         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5391
5392         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5393         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5394         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5395         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5396         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5397         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5398         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5399
5400         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5401         tw32_f(MAC_MODE, tp->mac_mode);
5402         udelay(40);
5403
5404         tp->tx_mode &= ~TX_MODE_ENABLE;
5405         tw32_f(MAC_TX_MODE, tp->tx_mode);
5406
5407         for (i = 0; i < MAX_WAIT_CNT; i++) {
5408                 udelay(100);
5409                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5410                         break;
5411         }
5412         if (i >= MAX_WAIT_CNT) {
5413                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5414                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5415                        tp->dev->name, tr32(MAC_TX_MODE));
5416                 err |= -ENODEV;
5417         }
5418
5419         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5420         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5421         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5422
5423         tw32(FTQ_RESET, 0xffffffff);
5424         tw32(FTQ_RESET, 0x00000000);
5425
5426         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5427         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5428
5429         if (tp->hw_status)
5430                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5431         if (tp->hw_stats)
5432                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5433
5434         return err;
5435 }
5436
5437 /* tp->lock is held. */
5438 static int tg3_nvram_lock(struct tg3 *tp)
5439 {
5440         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5441                 int i;
5442
5443                 if (tp->nvram_lock_cnt == 0) {
5444                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5445                         for (i = 0; i < 8000; i++) {
5446                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5447                                         break;
5448                                 udelay(20);
5449                         }
5450                         if (i == 8000) {
5451                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5452                                 return -ENODEV;
5453                         }
5454                 }
5455                 tp->nvram_lock_cnt++;
5456         }
5457         return 0;
5458 }
5459
5460 /* tp->lock is held. */
5461 static void tg3_nvram_unlock(struct tg3 *tp)
5462 {
5463         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5464                 if (tp->nvram_lock_cnt > 0)
5465                         tp->nvram_lock_cnt--;
5466                 if (tp->nvram_lock_cnt == 0)
5467                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5468         }
5469 }
5470
5471 /* tp->lock is held. */
5472 static void tg3_enable_nvram_access(struct tg3 *tp)
5473 {
5474         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5475             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5476                 u32 nvaccess = tr32(NVRAM_ACCESS);
5477
5478                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5479         }
5480 }
5481
5482 /* tp->lock is held. */
5483 static void tg3_disable_nvram_access(struct tg3 *tp)
5484 {
5485         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5486             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5487                 u32 nvaccess = tr32(NVRAM_ACCESS);
5488
5489                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5490         }
5491 }
5492
5493 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5494 {
5495         int i;
5496         u32 apedata;
5497
5498         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5499         if (apedata != APE_SEG_SIG_MAGIC)
5500                 return;
5501
5502         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5503         if (apedata != APE_FW_STATUS_READY)
5504                 return;
5505
5506         /* Wait for up to 1 millisecond for APE to service previous event. */
5507         for (i = 0; i < 10; i++) {
5508                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5509                         return;
5510
5511                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5512
5513                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5514                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5515                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5516
5517                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5518
5519                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5520                         break;
5521
5522                 udelay(100);
5523         }
5524
5525         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5526                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5527 }
5528
5529 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5530 {
5531         u32 event;
5532         u32 apedata;
5533
5534         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5535                 return;
5536
5537         switch (kind) {
5538                 case RESET_KIND_INIT:
5539                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5540                                         APE_HOST_SEG_SIG_MAGIC);
5541                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5542                                         APE_HOST_SEG_LEN_MAGIC);
5543                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5544                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5545                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5546                                         APE_HOST_DRIVER_ID_MAGIC);
5547                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5548                                         APE_HOST_BEHAV_NO_PHYLOCK);
5549
5550                         event = APE_EVENT_STATUS_STATE_START;
5551                         break;
5552                 case RESET_KIND_SHUTDOWN:
5553                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5554                         break;
5555                 case RESET_KIND_SUSPEND:
5556                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5557                         break;
5558                 default:
5559                         return;
5560         }
5561
5562         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5563
5564         tg3_ape_send_event(tp, event);
5565 }
5566
5567 /* tp->lock is held. */
5568 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5569 {
5570         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5571                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5572
5573         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5574                 switch (kind) {
5575                 case RESET_KIND_INIT:
5576                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5577                                       DRV_STATE_START);
5578                         break;
5579
5580                 case RESET_KIND_SHUTDOWN:
5581                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5582                                       DRV_STATE_UNLOAD);
5583                         break;
5584
5585                 case RESET_KIND_SUSPEND:
5586                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5587                                       DRV_STATE_SUSPEND);
5588                         break;
5589
5590                 default:
5591                         break;
5592                 }
5593         }
5594
5595         if (kind == RESET_KIND_INIT ||
5596             kind == RESET_KIND_SUSPEND)
5597                 tg3_ape_driver_state_change(tp, kind);
5598 }
5599
5600 /* tp->lock is held. */
5601 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5602 {
5603         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5604                 switch (kind) {
5605                 case RESET_KIND_INIT:
5606                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5607                                       DRV_STATE_START_DONE);
5608                         break;
5609
5610                 case RESET_KIND_SHUTDOWN:
5611                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5612                                       DRV_STATE_UNLOAD_DONE);
5613                         break;
5614
5615                 default:
5616                         break;
5617                 }
5618         }
5619
5620         if (kind == RESET_KIND_SHUTDOWN)
5621                 tg3_ape_driver_state_change(tp, kind);
5622 }
5623
5624 /* tp->lock is held. */
5625 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5626 {
5627         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5628                 switch (kind) {
5629                 case RESET_KIND_INIT:
5630                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5631                                       DRV_STATE_START);
5632                         break;
5633
5634                 case RESET_KIND_SHUTDOWN:
5635                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5636                                       DRV_STATE_UNLOAD);
5637                         break;
5638
5639                 case RESET_KIND_SUSPEND:
5640                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5641                                       DRV_STATE_SUSPEND);
5642                         break;
5643
5644                 default:
5645                         break;
5646                 }
5647         }
5648 }
5649
5650 static int tg3_poll_fw(struct tg3 *tp)
5651 {
5652         int i;
5653         u32 val;
5654
5655         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5656                 /* Wait up to 20ms for init done. */
5657                 for (i = 0; i < 200; i++) {
5658                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5659                                 return 0;
5660                         udelay(100);
5661                 }
5662                 return -ENODEV;
5663         }
5664
5665         /* Wait for firmware initialization to complete. */
5666         for (i = 0; i < 100000; i++) {
5667                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5668                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5669                         break;
5670                 udelay(10);
5671         }
5672
5673         /* Chip might not be fitted with firmware.  Some Sun onboard
5674          * parts are configured like that.  So don't signal the timeout
5675          * of the above loop as an error, but do report the lack of
5676          * running firmware once.
5677          */
5678         if (i >= 100000 &&
5679             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5680                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5681
5682                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5683                        tp->dev->name);
5684         }
5685
5686         return 0;
5687 }
5688
5689 /* Save PCI command register before chip reset */
5690 static void tg3_save_pci_state(struct tg3 *tp)
5691 {
5692         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5693 }
5694
5695 /* Restore PCI state after chip reset */
5696 static void tg3_restore_pci_state(struct tg3 *tp)
5697 {
5698         u32 val;
5699
5700         /* Re-enable indirect register accesses. */
5701         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5702                                tp->misc_host_ctrl);
5703
5704         /* Set MAX PCI retry to zero. */
5705         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5706         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5707             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5708                 val |= PCISTATE_RETRY_SAME_DMA;
5709         /* Allow reads and writes to the APE register and memory space. */
5710         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5711                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5712                        PCISTATE_ALLOW_APE_SHMEM_WR;
5713         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5714
5715         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5716
5717         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5718                 pcie_set_readrq(tp->pdev, 4096);
5719         else {
5720                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5721                                       tp->pci_cacheline_sz);
5722                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5723                                       tp->pci_lat_timer);
5724         }
5725
5726         /* Make sure PCI-X relaxed ordering bit is clear. */
5727         if (tp->pcix_cap) {
5728                 u16 pcix_cmd;
5729
5730                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5731                                      &pcix_cmd);
5732                 pcix_cmd &= ~PCI_X_CMD_ERO;
5733                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5734                                       pcix_cmd);
5735         }
5736
5737         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5738
5739                 /* Chip reset on 5780 will reset MSI enable bit,
5740                  * so need to restore it.
5741                  */
5742                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5743                         u16 ctrl;
5744
5745                         pci_read_config_word(tp->pdev,
5746                                              tp->msi_cap + PCI_MSI_FLAGS,
5747                                              &ctrl);
5748                         pci_write_config_word(tp->pdev,
5749                                               tp->msi_cap + PCI_MSI_FLAGS,
5750                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5751                         val = tr32(MSGINT_MODE);
5752                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5753                 }
5754         }
5755 }
5756
5757 static void tg3_stop_fw(struct tg3 *);
5758
5759 /* tp->lock is held. */
5760 static int tg3_chip_reset(struct tg3 *tp)
5761 {
5762         u32 val;
5763         void (*write_op)(struct tg3 *, u32, u32);
5764         int err;
5765
5766         tg3_nvram_lock(tp);
5767
5768         tg3_mdio_stop(tp);
5769
5770         /* No matching tg3_nvram_unlock() after this because
5771          * chip reset below will undo the nvram lock.
5772          */
5773         tp->nvram_lock_cnt = 0;
5774
5775         /* GRC_MISC_CFG core clock reset will clear the memory
5776          * enable bit in PCI register 4 and the MSI enable bit
5777          * on some chips, so we save relevant registers here.
5778          */
5779         tg3_save_pci_state(tp);
5780
5781         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5782             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5783             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5784             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5785             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5786             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
5787                 tw32(GRC_FASTBOOT_PC, 0);
5788
5789         /*
5790          * We must avoid the readl() that normally takes place.
5791          * It locks machines, causes machine checks, and other
5792          * fun things.  So, temporarily disable the 5701
5793          * hardware workaround, while we do the reset.
5794          */
5795         write_op = tp->write32;
5796         if (write_op == tg3_write_flush_reg32)
5797                 tp->write32 = tg3_write32;
5798
5799         /* Prevent the irq handler from reading or writing PCI registers
5800          * during chip reset when the memory enable bit in the PCI command
5801          * register may be cleared.  The chip does not generate interrupt
5802          * at this time, but the irq handler may still be called due to irq
5803          * sharing or irqpoll.
5804          */
5805         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5806         if (tp->hw_status) {
5807                 tp->hw_status->status = 0;
5808                 tp->hw_status->status_tag = 0;
5809         }
5810         tp->last_tag = 0;
5811         smp_mb();
5812         synchronize_irq(tp->pdev->irq);
5813
5814         /* do the reset */
5815         val = GRC_MISC_CFG_CORECLK_RESET;
5816
5817         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5818                 if (tr32(0x7e2c) == 0x60) {
5819                         tw32(0x7e2c, 0x20);
5820                 }
5821                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5822                         tw32(GRC_MISC_CFG, (1 << 29));
5823                         val |= (1 << 29);
5824                 }
5825         }
5826
5827         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5828                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5829                 tw32(GRC_VCPU_EXT_CTRL,
5830                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5831         }
5832
5833         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5834                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5835         tw32(GRC_MISC_CFG, val);
5836
5837         /* restore 5701 hardware bug workaround write method */
5838         tp->write32 = write_op;
5839
5840         /* Unfortunately, we have to delay before the PCI read back.
5841          * Some 575X chips even will not respond to a PCI cfg access
5842          * when the reset command is given to the chip.
5843          *
5844          * How do these hardware designers expect things to work
5845          * properly if the PCI write is posted for a long period
5846          * of time?  It is always necessary to have some method by
5847          * which a register read back can occur to push the write
5848          * out which does the reset.
5849          *
5850          * For most tg3 variants the trick below was working.
5851          * Ho hum...
5852          */
5853         udelay(120);
5854
5855         /* Flush PCI posted writes.  The normal MMIO registers
5856          * are inaccessible at this time so this is the only
5857          * way to make this reliably (actually, this is no longer
5858          * the case, see above).  I tried to use indirect
5859          * register read/write but this upset some 5701 variants.
5860          */
5861         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5862
5863         udelay(120);
5864
5865         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5866                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5867                         int i;
5868                         u32 cfg_val;
5869
5870                         /* Wait for link training to complete.  */
5871                         for (i = 0; i < 5000; i++)
5872                                 udelay(100);
5873
5874                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5875                         pci_write_config_dword(tp->pdev, 0xc4,
5876                                                cfg_val | (1 << 15));
5877                 }
5878                 /* Set PCIE max payload size and clear error status.  */
5879                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5880         }
5881
5882         tg3_restore_pci_state(tp);
5883
5884         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5885
5886         val = 0;
5887         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5888                 val = tr32(MEMARB_MODE);
5889         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5890
5891         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5892                 tg3_stop_fw(tp);
5893                 tw32(0x5000, 0x400);
5894         }
5895
5896         tw32(GRC_MODE, tp->grc_mode);
5897
5898         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5899                 val = tr32(0xc4);
5900
5901                 tw32(0xc4, val | (1 << 15));
5902         }
5903
5904         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5905             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5906                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5907                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5908                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5909                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5910         }
5911
5912         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5913                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5914                 tw32_f(MAC_MODE, tp->mac_mode);
5915         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5916                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5917                 tw32_f(MAC_MODE, tp->mac_mode);
5918         } else
5919                 tw32_f(MAC_MODE, 0);
5920         udelay(40);
5921
5922         tg3_mdio_start(tp);
5923
5924         err = tg3_poll_fw(tp);
5925         if (err)
5926                 return err;
5927
5928         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5929             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5930                 val = tr32(0x7c00);
5931
5932                 tw32(0x7c00, val | (1 << 25));
5933         }
5934
5935         /* Reprobe ASF enable state.  */
5936         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5937         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5938         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5939         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5940                 u32 nic_cfg;
5941
5942                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5943                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5944                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5945                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5946                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5947                 }
5948         }
5949
5950         return 0;
5951 }
5952
5953 /* tp->lock is held. */
5954 static void tg3_stop_fw(struct tg3 *tp)
5955 {
5956         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5957            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5958                 u32 val;
5959
5960                 /* Wait for RX cpu to ACK the previous event. */
5961                 tg3_wait_for_event_ack(tp);
5962
5963                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5964                 val = tr32(GRC_RX_CPU_EVENT);
5965                 val |= GRC_RX_CPU_DRIVER_EVENT;
5966                 tw32(GRC_RX_CPU_EVENT, val);
5967
5968                 /* Wait for RX cpu to ACK this event. */
5969                 tg3_wait_for_event_ack(tp);
5970         }
5971 }
5972
5973 /* tp->lock is held. */
5974 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5975 {
5976         int err;
5977
5978         tg3_stop_fw(tp);
5979
5980         tg3_write_sig_pre_reset(tp, kind);
5981
5982         tg3_abort_hw(tp, silent);
5983         err = tg3_chip_reset(tp);
5984
5985         tg3_write_sig_legacy(tp, kind);
5986         tg3_write_sig_post_reset(tp, kind);
5987
5988         if (err)
5989                 return err;
5990
5991         return 0;
5992 }
5993
5994 #define TG3_FW_RELEASE_MAJOR    0x0
5995 #define TG3_FW_RELASE_MINOR     0x0
5996 #define TG3_FW_RELEASE_FIX      0x0
5997 #define TG3_FW_START_ADDR       0x08000000
5998 #define TG3_FW_TEXT_ADDR        0x08000000
5999 #define TG3_FW_TEXT_LEN         0x9c0
6000 #define TG3_FW_RODATA_ADDR      0x080009c0
6001 #define TG3_FW_RODATA_LEN       0x60
6002 #define TG3_FW_DATA_ADDR        0x08000a40
6003 #define TG3_FW_DATA_LEN         0x20
6004 #define TG3_FW_SBSS_ADDR        0x08000a60
6005 #define TG3_FW_SBSS_LEN         0xc
6006 #define TG3_FW_BSS_ADDR         0x08000a70
6007 #define TG3_FW_BSS_LEN          0x10
6008
6009 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
6010         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6011         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6012         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6013         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6014         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6015         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6016         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6017         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6018         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6019         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6020         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6021         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6022         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6023         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6024         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6025         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6026         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6027         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6028         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6029         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6030         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6031         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6032         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6033         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6034         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6035         0, 0, 0, 0, 0, 0,
6036         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6037         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6038         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6039         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6040         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6041         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6042         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6043         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6044         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6045         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6046         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6047         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6048         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6049         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6050         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6051         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6052         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6053         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6054         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6055         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6056         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6057         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6058         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6059         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6060         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6061         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6062         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6063         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6064         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6065         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6066         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6067         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6068         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6069         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6070         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6071         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6072         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6073         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6074         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6075         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6076         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6077         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6078         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6079         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6080         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6081         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6082         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6083         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6084         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6085         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6086         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6087         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6088         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6089         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6090         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6091         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6092         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6093         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6094         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6095         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6096         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6097         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6098         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6099         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6100         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6101 };
6102
6103 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
6104         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6105         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6106         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6107         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6108         0x00000000
6109 };
6110
6111 #if 0 /* All zeros, don't eat up space with it. */
6112 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6113         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6114         0x00000000, 0x00000000, 0x00000000, 0x00000000
6115 };
6116 #endif
6117
6118 #define RX_CPU_SCRATCH_BASE     0x30000
6119 #define RX_CPU_SCRATCH_SIZE     0x04000
6120 #define TX_CPU_SCRATCH_BASE     0x34000
6121 #define TX_CPU_SCRATCH_SIZE     0x04000
6122
6123 /* tp->lock is held. */
6124 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6125 {
6126         int i;
6127
6128         BUG_ON(offset == TX_CPU_BASE &&
6129             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6130
6131         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6132                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6133
6134                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6135                 return 0;
6136         }
6137         if (offset == RX_CPU_BASE) {
6138                 for (i = 0; i < 10000; i++) {
6139                         tw32(offset + CPU_STATE, 0xffffffff);
6140                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6141                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6142                                 break;
6143                 }
6144
6145                 tw32(offset + CPU_STATE, 0xffffffff);
6146                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
6147                 udelay(10);
6148         } else {
6149                 for (i = 0; i < 10000; i++) {
6150                         tw32(offset + CPU_STATE, 0xffffffff);
6151                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6152                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6153                                 break;
6154                 }
6155         }
6156
6157         if (i >= 10000) {
6158                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6159                        "and %s CPU\n",
6160                        tp->dev->name,
6161                        (offset == RX_CPU_BASE ? "RX" : "TX"));
6162                 return -ENODEV;
6163         }
6164
6165         /* Clear firmware's nvram arbitration. */
6166         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6167                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6168         return 0;
6169 }
6170
6171 struct fw_info {
6172         unsigned int text_base;
6173         unsigned int text_len;
6174         const u32 *text_data;
6175         unsigned int rodata_base;
6176         unsigned int rodata_len;
6177         const u32 *rodata_data;
6178         unsigned int data_base;
6179         unsigned int data_len;
6180         const u32 *data_data;
6181 };
6182
6183 /* tp->lock is held. */
6184 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6185                                  int cpu_scratch_size, struct fw_info *info)
6186 {
6187         int err, lock_err, i;
6188         void (*write_op)(struct tg3 *, u32, u32);
6189
6190         if (cpu_base == TX_CPU_BASE &&
6191             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6192                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6193                        "TX cpu firmware on %s which is 5705.\n",
6194                        tp->dev->name);
6195                 return -EINVAL;
6196         }
6197
6198         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6199                 write_op = tg3_write_mem;
6200         else
6201                 write_op = tg3_write_indirect_reg32;
6202
6203         /* It is possible that bootcode is still loading at this point.
6204          * Get the nvram lock first before halting the cpu.
6205          */
6206         lock_err = tg3_nvram_lock(tp);
6207         err = tg3_halt_cpu(tp, cpu_base);
6208         if (!lock_err)
6209                 tg3_nvram_unlock(tp);
6210         if (err)
6211                 goto out;
6212
6213         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6214                 write_op(tp, cpu_scratch_base + i, 0);
6215         tw32(cpu_base + CPU_STATE, 0xffffffff);
6216         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6217         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6218                 write_op(tp, (cpu_scratch_base +
6219                               (info->text_base & 0xffff) +
6220                               (i * sizeof(u32))),
6221                          (info->text_data ?
6222                           info->text_data[i] : 0));
6223         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6224                 write_op(tp, (cpu_scratch_base +
6225                               (info->rodata_base & 0xffff) +
6226                               (i * sizeof(u32))),
6227                          (info->rodata_data ?
6228                           info->rodata_data[i] : 0));
6229         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6230                 write_op(tp, (cpu_scratch_base +
6231                               (info->data_base & 0xffff) +
6232                               (i * sizeof(u32))),
6233                          (info->data_data ?
6234                           info->data_data[i] : 0));
6235
6236         err = 0;
6237
6238 out:
6239         return err;
6240 }
6241
6242 /* tp->lock is held. */
6243 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6244 {
6245         struct fw_info info;
6246         int err, i;
6247
6248         info.text_base = TG3_FW_TEXT_ADDR;
6249         info.text_len = TG3_FW_TEXT_LEN;
6250         info.text_data = &tg3FwText[0];
6251         info.rodata_base = TG3_FW_RODATA_ADDR;
6252         info.rodata_len = TG3_FW_RODATA_LEN;
6253         info.rodata_data = &tg3FwRodata[0];
6254         info.data_base = TG3_FW_DATA_ADDR;
6255         info.data_len = TG3_FW_DATA_LEN;
6256         info.data_data = NULL;
6257
6258         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6259                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6260                                     &info);
6261         if (err)
6262                 return err;
6263
6264         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6265                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6266                                     &info);
6267         if (err)
6268                 return err;
6269
6270         /* Now startup only the RX cpu. */
6271         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6272         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6273
6274         for (i = 0; i < 5; i++) {
6275                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6276                         break;
6277                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6278                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
6279                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6280                 udelay(1000);
6281         }
6282         if (i >= 5) {
6283                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6284                        "to set RX CPU PC, is %08x should be %08x\n",
6285                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6286                        TG3_FW_TEXT_ADDR);
6287                 return -ENODEV;
6288         }
6289         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6290         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
6291
6292         return 0;
6293 }
6294
6295
6296 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
6297 #define TG3_TSO_FW_RELASE_MINOR         0x6
6298 #define TG3_TSO_FW_RELEASE_FIX          0x0
6299 #define TG3_TSO_FW_START_ADDR           0x08000000
6300 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
6301 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
6302 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
6303 #define TG3_TSO_FW_RODATA_LEN           0x60
6304 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
6305 #define TG3_TSO_FW_DATA_LEN             0x30
6306 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
6307 #define TG3_TSO_FW_SBSS_LEN             0x2c
6308 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
6309 #define TG3_TSO_FW_BSS_LEN              0x894
6310
6311 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
6312         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6313         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6314         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6315         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6316         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6317         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6318         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6319         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6320         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6321         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6322         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6323         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6324         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6325         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6326         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6327         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6328         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6329         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6330         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6331         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6332         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6333         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6334         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6335         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6336         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6337         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6338         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6339         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6340         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6341         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6342         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6343         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6344         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6345         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6346         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6347         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6348         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6349         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6350         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6351         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6352         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6353         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6354         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6355         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6356         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6357         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6358         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6359         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6360         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6361         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6362         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6363         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6364         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6365         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6366         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6367         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6368         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6369         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6370         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6371         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6372         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6373         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6374         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6375         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6376         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6377         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6378         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6379         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6380         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6381         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6382         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6383         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6384         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6385         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6386         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6387         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6388         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6389         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6390         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6391         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6392         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6393         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6394         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6395         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6396         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6397         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6398         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6399         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6400         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6401         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6402         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6403         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6404         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6405         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6406         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6407         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6408         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6409         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6410         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6411         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6412         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6413         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6414         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6415         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6416         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6417         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6418         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6419         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6420         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6421         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6422         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6423         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6424         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6425         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6426         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6427         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6428         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6429         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6430         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6431         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6432         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6433         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6434         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6435         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6436         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6437         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6438         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6439         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6440         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6441         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6442         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6443         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6444         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6445         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6446         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6447         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6448         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6449         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6450         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6451         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6452         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6453         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6454         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6455         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6456         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6457         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6458         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6459         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6460         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6461         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6462         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6463         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6464         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6465         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6466         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6467         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6468         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6469         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6470         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6471         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6472         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6473         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6474         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6475         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6476         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6477         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6478         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6479         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6480         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6481         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6482         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6483         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6484         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6485         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6486         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6487         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6488         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6489         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6490         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6491         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6492         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6493         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6494         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6495         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6496         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6497         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6498         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6499         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6500         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6501         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6502         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6503         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6504         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6505         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6506         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6507         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6508         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6509         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6510         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6511         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6512         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6513         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6514         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6515         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6516         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6517         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6518         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6519         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6520         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6521         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6522         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6523         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6524         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6525         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6526         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6527         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6528         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6529         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6530         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6531         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6532         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6533         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6534         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6535         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6536         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6537         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6538         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6539         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6540         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6541         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6542         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6543         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6544         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6545         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6546         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6547         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6548         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6549         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6550         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6551         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6552         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6553         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6554         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6555         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6556         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6557         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6558         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6559         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6560         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6561         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6562         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6563         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6564         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6565         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6566         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6567         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6568         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6569         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6570         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6571         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6572         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6573         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6574         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6575         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6576         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6577         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6578         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6579         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6580         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6581         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6582         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6583         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6584         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6585         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6586         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6587         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6588         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6589         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6590         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6591         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6592         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6593         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6594         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6595         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6596 };
6597
6598 static const u32 tg3TsoFwRodata[] = {
6599         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6600         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6601         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6602         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6603         0x00000000,
6604 };
6605
6606 static const u32 tg3TsoFwData[] = {
6607         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6608         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6609         0x00000000,
6610 };
6611
6612 /* 5705 needs a special version of the TSO firmware.  */
6613 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
6614 #define TG3_TSO5_FW_RELASE_MINOR        0x2
6615 #define TG3_TSO5_FW_RELEASE_FIX         0x0
6616 #define TG3_TSO5_FW_START_ADDR          0x00010000
6617 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
6618 #define TG3_TSO5_FW_TEXT_LEN            0xe90
6619 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
6620 #define TG3_TSO5_FW_RODATA_LEN          0x50
6621 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
6622 #define TG3_TSO5_FW_DATA_LEN            0x20
6623 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
6624 #define TG3_TSO5_FW_SBSS_LEN            0x28
6625 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
6626 #define TG3_TSO5_FW_BSS_LEN             0x88
6627
6628 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6629         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6630         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6631         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6632         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6633         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6634         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6635         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6636         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6637         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6638         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6639         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6640         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6641         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6642         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6643         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6644         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6645         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6646         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6647         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6648         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6649         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6650         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6651         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6652         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6653         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6654         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6655         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6656         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6657         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6658         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6659         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6660         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6661         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6662         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6663         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6664         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6665         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6666         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6667         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6668         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6669         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6670         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6671         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6672         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6673         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6674         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6675         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6676         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6677         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6678         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6679         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6680         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6681         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6682         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6683         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6684         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6685         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6686         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6687         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6688         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6689         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6690         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6691         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6692         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6693         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6694         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6695         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6696         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6697         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6698         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6699         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6700         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6701         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6702         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6703         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6704         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6705         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6706         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6707         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6708         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6709         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6710         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6711         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6712         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6713         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6714         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6715         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6716         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6717         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6718         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6719         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6720         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6721         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6722         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6723         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6724         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6725         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6726         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6727         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6728         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6729         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6730         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6731         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6732         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6733         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6734         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6735         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6736         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6737         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6738         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6739         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6740         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6741         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6742         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6743         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6744         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6745         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6746         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6747         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6748         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6749         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6750         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6751         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6752         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6753         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6754         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6755         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6756         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6757         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6758         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6759         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6760         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6761         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6762         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6763         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6764         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6765         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6766         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6767         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6768         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6769         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6770         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6771         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6772         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6773         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6774         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6775         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6776         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6777         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6778         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6779         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6780         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6781         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6782         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6783         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6784         0x00000000, 0x00000000, 0x00000000,
6785 };
6786
6787 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6788         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6789         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6790         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6791         0x00000000, 0x00000000, 0x00000000,
6792 };
6793
6794 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6795         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6796         0x00000000, 0x00000000, 0x00000000,
6797 };
6798
6799 /* tp->lock is held. */
6800 static int tg3_load_tso_firmware(struct tg3 *tp)
6801 {
6802         struct fw_info info;
6803         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6804         int err, i;
6805
6806         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6807                 return 0;
6808
6809         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6810                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6811                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6812                 info.text_data = &tg3Tso5FwText[0];
6813                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6814                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6815                 info.rodata_data = &tg3Tso5FwRodata[0];
6816                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6817                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6818                 info.data_data = &tg3Tso5FwData[0];
6819                 cpu_base = RX_CPU_BASE;
6820                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6821                 cpu_scratch_size = (info.text_len +
6822                                     info.rodata_len +
6823                                     info.data_len +
6824                                     TG3_TSO5_FW_SBSS_LEN +
6825                                     TG3_TSO5_FW_BSS_LEN);
6826         } else {
6827                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6828                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6829                 info.text_data = &tg3TsoFwText[0];
6830                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6831                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6832                 info.rodata_data = &tg3TsoFwRodata[0];
6833                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6834                 info.data_len = TG3_TSO_FW_DATA_LEN;
6835                 info.data_data = &tg3TsoFwData[0];
6836                 cpu_base = TX_CPU_BASE;
6837                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6838                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6839         }
6840
6841         err = tg3_load_firmware_cpu(tp, cpu_base,
6842                                     cpu_scratch_base, cpu_scratch_size,
6843                                     &info);
6844         if (err)
6845                 return err;
6846
6847         /* Now startup the cpu. */
6848         tw32(cpu_base + CPU_STATE, 0xffffffff);
6849         tw32_f(cpu_base + CPU_PC,    info.text_base);
6850
6851         for (i = 0; i < 5; i++) {
6852                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6853                         break;
6854                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6855                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6856                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6857                 udelay(1000);
6858         }
6859         if (i >= 5) {
6860                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6861                        "to set CPU PC, is %08x should be %08x\n",
6862                        tp->dev->name, tr32(cpu_base + CPU_PC),
6863                        info.text_base);
6864                 return -ENODEV;
6865         }
6866         tw32(cpu_base + CPU_STATE, 0xffffffff);
6867         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6868         return 0;
6869 }
6870
6871
6872 /* tp->lock is held. */
6873 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6874 {
6875         u32 addr_high, addr_low;
6876         int i;
6877
6878         addr_high = ((tp->dev->dev_addr[0] << 8) |
6879                      tp->dev->dev_addr[1]);
6880         addr_low = ((tp->dev->dev_addr[2] << 24) |
6881                     (tp->dev->dev_addr[3] << 16) |
6882                     (tp->dev->dev_addr[4] <<  8) |
6883                     (tp->dev->dev_addr[5] <<  0));
6884         for (i = 0; i < 4; i++) {
6885                 if (i == 1 && skip_mac_1)
6886                         continue;
6887                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6888                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6889         }
6890
6891         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6892             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6893                 for (i = 0; i < 12; i++) {
6894                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6895                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6896                 }
6897         }
6898
6899         addr_high = (tp->dev->dev_addr[0] +
6900                      tp->dev->dev_addr[1] +
6901                      tp->dev->dev_addr[2] +
6902                      tp->dev->dev_addr[3] +
6903                      tp->dev->dev_addr[4] +
6904                      tp->dev->dev_addr[5]) &
6905                 TX_BACKOFF_SEED_MASK;
6906         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6907 }
6908
6909 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6910 {
6911         struct tg3 *tp = netdev_priv(dev);
6912         struct sockaddr *addr = p;
6913         int err = 0, skip_mac_1 = 0;
6914
6915         if (!is_valid_ether_addr(addr->sa_data))
6916                 return -EINVAL;
6917
6918         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6919
6920         if (!netif_running(dev))
6921                 return 0;
6922
6923         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6924                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6925
6926                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6927                 addr0_low = tr32(MAC_ADDR_0_LOW);
6928                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6929                 addr1_low = tr32(MAC_ADDR_1_LOW);
6930
6931                 /* Skip MAC addr 1 if ASF is using it. */
6932                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6933                     !(addr1_high == 0 && addr1_low == 0))
6934                         skip_mac_1 = 1;
6935         }
6936         spin_lock_bh(&tp->lock);
6937         __tg3_set_mac_addr(tp, skip_mac_1);
6938         spin_unlock_bh(&tp->lock);
6939
6940         return err;
6941 }
6942
6943 /* tp->lock is held. */
6944 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6945                            dma_addr_t mapping, u32 maxlen_flags,
6946                            u32 nic_addr)
6947 {
6948         tg3_write_mem(tp,
6949                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6950                       ((u64) mapping >> 32));
6951         tg3_write_mem(tp,
6952                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6953                       ((u64) mapping & 0xffffffff));
6954         tg3_write_mem(tp,
6955                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6956                        maxlen_flags);
6957
6958         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6959                 tg3_write_mem(tp,
6960                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6961                               nic_addr);
6962 }
6963
6964 static void __tg3_set_rx_mode(struct net_device *);
6965 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6966 {
6967         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6968         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6969         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6970         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6971         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6972                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6973                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6974         }
6975         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6976         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6977         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6978                 u32 val = ec->stats_block_coalesce_usecs;
6979
6980                 if (!netif_carrier_ok(tp->dev))
6981                         val = 0;
6982
6983                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6984         }
6985 }
6986
6987 /* tp->lock is held. */
6988 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6989 {
6990         u32 val, rdmac_mode;
6991         int i, err, limit;
6992
6993         tg3_disable_ints(tp);
6994
6995         tg3_stop_fw(tp);
6996
6997         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6998
6999         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7000                 tg3_abort_hw(tp, 1);
7001         }
7002
7003         if (reset_phy &&
7004             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7005                 tg3_phy_reset(tp);
7006
7007         err = tg3_chip_reset(tp);
7008         if (err)
7009                 return err;
7010
7011         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7012
7013         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
7014             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
7015                 val = tr32(TG3_CPMU_CTRL);
7016                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7017                 tw32(TG3_CPMU_CTRL, val);
7018
7019                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7020                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7021                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7022                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7023
7024                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7025                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7026                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7027                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7028
7029                 val = tr32(TG3_CPMU_HST_ACC);
7030                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7031                 val |= CPMU_HST_ACC_MACCLK_6_25;
7032                 tw32(TG3_CPMU_HST_ACC, val);
7033         }
7034
7035         /* This works around an issue with Athlon chipsets on
7036          * B3 tigon3 silicon.  This bit has no effect on any
7037          * other revision.  But do not set this on PCI Express
7038          * chips and don't even touch the clocks if the CPMU is present.
7039          */
7040         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7041                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7042                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7043                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7044         }
7045
7046         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7047             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7048                 val = tr32(TG3PCI_PCISTATE);
7049                 val |= PCISTATE_RETRY_SAME_DMA;
7050                 tw32(TG3PCI_PCISTATE, val);
7051         }
7052
7053         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7054                 /* Allow reads and writes to the
7055                  * APE register and memory space.
7056                  */
7057                 val = tr32(TG3PCI_PCISTATE);
7058                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7059                        PCISTATE_ALLOW_APE_SHMEM_WR;
7060                 tw32(TG3PCI_PCISTATE, val);
7061         }
7062
7063         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7064                 /* Enable some hw fixes.  */
7065                 val = tr32(TG3PCI_MSI_DATA);
7066                 val |= (1 << 26) | (1 << 28) | (1 << 29);
7067                 tw32(TG3PCI_MSI_DATA, val);
7068         }
7069
7070         /* Descriptor ring init may make accesses to the
7071          * NIC SRAM area to setup the TX descriptors, so we
7072          * can only do this after the hardware has been
7073          * successfully reset.
7074          */
7075         err = tg3_init_rings(tp);
7076         if (err)
7077                 return err;
7078
7079         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7080             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
7081             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7082                 /* This value is determined during the probe time DMA
7083                  * engine test, tg3_test_dma.
7084                  */
7085                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7086         }
7087
7088         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7089                           GRC_MODE_4X_NIC_SEND_RINGS |
7090                           GRC_MODE_NO_TX_PHDR_CSUM |
7091                           GRC_MODE_NO_RX_PHDR_CSUM);
7092         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7093
7094         /* Pseudo-header checksum is done by hardware logic and not
7095          * the offload processers, so make the chip do the pseudo-
7096          * header checksums on receive.  For transmit it is more
7097          * convenient to do the pseudo-header checksum in software
7098          * as Linux does that on transmit for us in all cases.
7099          */
7100         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7101
7102         tw32(GRC_MODE,
7103              tp->grc_mode |
7104              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7105
7106         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
7107         val = tr32(GRC_MISC_CFG);
7108         val &= ~0xff;
7109         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7110         tw32(GRC_MISC_CFG, val);
7111
7112         /* Initialize MBUF/DESC pool. */
7113         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7114                 /* Do nothing.  */
7115         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7116                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7117                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7118                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7119                 else
7120                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7121                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7122                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7123         }
7124         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7125                 int fw_len;
7126
7127                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7128                           TG3_TSO5_FW_RODATA_LEN +
7129                           TG3_TSO5_FW_DATA_LEN +
7130                           TG3_TSO5_FW_SBSS_LEN +
7131                           TG3_TSO5_FW_BSS_LEN);
7132                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7133                 tw32(BUFMGR_MB_POOL_ADDR,
7134                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7135                 tw32(BUFMGR_MB_POOL_SIZE,
7136                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7137         }
7138
7139         if (tp->dev->mtu <= ETH_DATA_LEN) {
7140                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7141                      tp->bufmgr_config.mbuf_read_dma_low_water);
7142                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7143                      tp->bufmgr_config.mbuf_mac_rx_low_water);
7144                 tw32(BUFMGR_MB_HIGH_WATER,
7145                      tp->bufmgr_config.mbuf_high_water);
7146         } else {
7147                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7148                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7149                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7150                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7151                 tw32(BUFMGR_MB_HIGH_WATER,
7152                      tp->bufmgr_config.mbuf_high_water_jumbo);
7153         }
7154         tw32(BUFMGR_DMA_LOW_WATER,
7155              tp->bufmgr_config.dma_low_water);
7156         tw32(BUFMGR_DMA_HIGH_WATER,
7157              tp->bufmgr_config.dma_high_water);
7158
7159         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7160         for (i = 0; i < 2000; i++) {
7161                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7162                         break;
7163                 udelay(10);
7164         }
7165         if (i >= 2000) {
7166                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7167                        tp->dev->name);
7168                 return -ENODEV;
7169         }
7170
7171         /* Setup replenish threshold. */
7172         val = tp->rx_pending / 8;
7173         if (val == 0)
7174                 val = 1;
7175         else if (val > tp->rx_std_max_post)
7176                 val = tp->rx_std_max_post;
7177         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7178                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7179                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7180
7181                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7182                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7183         }
7184
7185         tw32(RCVBDI_STD_THRESH, val);
7186
7187         /* Initialize TG3_BDINFO's at:
7188          *  RCVDBDI_STD_BD:     standard eth size rx ring
7189          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
7190          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
7191          *
7192          * like so:
7193          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
7194          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
7195          *                              ring attribute flags
7196          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
7197          *
7198          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7199          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7200          *
7201          * The size of each ring is fixed in the firmware, but the location is
7202          * configurable.
7203          */
7204         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7205              ((u64) tp->rx_std_mapping >> 32));
7206         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7207              ((u64) tp->rx_std_mapping & 0xffffffff));
7208         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7209              NIC_SRAM_RX_BUFFER_DESC);
7210
7211         /* Don't even try to program the JUMBO/MINI buffer descriptor
7212          * configs on 5705.
7213          */
7214         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7215                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7216                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7217         } else {
7218                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7219                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7220
7221                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7222                      BDINFO_FLAGS_DISABLED);
7223
7224                 /* Setup replenish threshold. */
7225                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7226
7227                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7228                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7229                              ((u64) tp->rx_jumbo_mapping >> 32));
7230                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7231                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7232                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7233                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7234                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7235                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7236                 } else {
7237                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7238                              BDINFO_FLAGS_DISABLED);
7239                 }
7240
7241         }
7242
7243         /* There is only one send ring on 5705/5750, no need to explicitly
7244          * disable the others.
7245          */
7246         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7247                 /* Clear out send RCB ring in SRAM. */
7248                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7249                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7250                                       BDINFO_FLAGS_DISABLED);
7251         }
7252
7253         tp->tx_prod = 0;
7254         tp->tx_cons = 0;
7255         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7256         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7257
7258         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7259                        tp->tx_desc_mapping,
7260                        (TG3_TX_RING_SIZE <<
7261                         BDINFO_FLAGS_MAXLEN_SHIFT),
7262                        NIC_SRAM_TX_BUFFER_DESC);
7263
7264         /* There is only one receive return ring on 5705/5750, no need
7265          * to explicitly disable the others.
7266          */
7267         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7268                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7269                      i += TG3_BDINFO_SIZE) {
7270                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7271                                       BDINFO_FLAGS_DISABLED);
7272                 }
7273         }
7274
7275         tp->rx_rcb_ptr = 0;
7276         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7277
7278         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7279                        tp->rx_rcb_mapping,
7280                        (TG3_RX_RCB_RING_SIZE(tp) <<
7281                         BDINFO_FLAGS_MAXLEN_SHIFT),
7282                        0);
7283
7284         tp->rx_std_ptr = tp->rx_pending;
7285         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7286                      tp->rx_std_ptr);
7287
7288         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7289                                                 tp->rx_jumbo_pending : 0;
7290         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7291                      tp->rx_jumbo_ptr);
7292
7293         /* Initialize MAC address and backoff seed. */
7294         __tg3_set_mac_addr(tp, 0);
7295
7296         /* MTU + ethernet header + FCS + optional VLAN tag */
7297         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7298
7299         /* The slot time is changed by tg3_setup_phy if we
7300          * run at gigabit with half duplex.
7301          */
7302         tw32(MAC_TX_LENGTHS,
7303              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7304              (6 << TX_LENGTHS_IPG_SHIFT) |
7305              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7306
7307         /* Receive rules. */
7308         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7309         tw32(RCVLPC_CONFIG, 0x0181);
7310
7311         /* Calculate RDMAC_MODE setting early, we need it to determine
7312          * the RCVLPC_STATE_ENABLE mask.
7313          */
7314         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7315                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7316                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7317                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7318                       RDMAC_MODE_LNGREAD_ENAB);
7319
7320         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7321             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7322                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7323                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7324                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7325
7326         /* If statement applies to 5705 and 5750 PCI devices only */
7327         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7328              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7329             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7330                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7331                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7332                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7333                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7334                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7335                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7336                 }
7337         }
7338
7339         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7340                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7341
7342         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7343                 rdmac_mode |= (1 << 27);
7344
7345         /* Receive/send statistics. */
7346         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7347                 val = tr32(RCVLPC_STATS_ENABLE);
7348                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7349                 tw32(RCVLPC_STATS_ENABLE, val);
7350         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7351                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7352                 val = tr32(RCVLPC_STATS_ENABLE);
7353                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7354                 tw32(RCVLPC_STATS_ENABLE, val);
7355         } else {
7356                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7357         }
7358         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7359         tw32(SNDDATAI_STATSENAB, 0xffffff);
7360         tw32(SNDDATAI_STATSCTRL,
7361              (SNDDATAI_SCTRL_ENABLE |
7362               SNDDATAI_SCTRL_FASTUPD));
7363
7364         /* Setup host coalescing engine. */
7365         tw32(HOSTCC_MODE, 0);
7366         for (i = 0; i < 2000; i++) {
7367                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7368                         break;
7369                 udelay(10);
7370         }
7371
7372         __tg3_set_coalesce(tp, &tp->coal);
7373
7374         /* set status block DMA address */
7375         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7376              ((u64) tp->status_mapping >> 32));
7377         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7378              ((u64) tp->status_mapping & 0xffffffff));
7379
7380         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7381                 /* Status/statistics block address.  See tg3_timer,
7382                  * the tg3_periodic_fetch_stats call there, and
7383                  * tg3_get_stats to see how this works for 5705/5750 chips.
7384                  */
7385                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7386                      ((u64) tp->stats_mapping >> 32));
7387                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7388                      ((u64) tp->stats_mapping & 0xffffffff));
7389                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7390                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7391         }
7392
7393         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7394
7395         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7396         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7397         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7398                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7399
7400         /* Clear statistics/status block in chip, and status block in ram. */
7401         for (i = NIC_SRAM_STATS_BLK;
7402              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7403              i += sizeof(u32)) {
7404                 tg3_write_mem(tp, i, 0);
7405                 udelay(40);
7406         }
7407         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7408
7409         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7410                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7411                 /* reset to prevent losing 1st rx packet intermittently */
7412                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7413                 udelay(10);
7414         }
7415
7416         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7417                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7418         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7419             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7420             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7421                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7422         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7423         udelay(40);
7424
7425         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7426          * If TG3_FLG2_IS_NIC is zero, we should read the
7427          * register to preserve the GPIO settings for LOMs. The GPIOs,
7428          * whether used as inputs or outputs, are set by boot code after
7429          * reset.
7430          */
7431         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7432                 u32 gpio_mask;
7433
7434                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7435                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7436                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7437
7438                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7439                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7440                                      GRC_LCLCTRL_GPIO_OUTPUT3;
7441
7442                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7443                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7444
7445                 tp->grc_local_ctrl &= ~gpio_mask;
7446                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7447
7448                 /* GPIO1 must be driven high for eeprom write protect */
7449                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7450                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7451                                                GRC_LCLCTRL_GPIO_OUTPUT1);
7452         }
7453         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7454         udelay(100);
7455
7456         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7457         tp->last_tag = 0;
7458
7459         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7460                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7461                 udelay(40);
7462         }
7463
7464         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7465                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7466                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7467                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7468                WDMAC_MODE_LNGREAD_ENAB);
7469
7470         /* If statement applies to 5705 and 5750 PCI devices only */
7471         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7472              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7473             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7474                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7475                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7476                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7477                         /* nothing */
7478                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7479                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7480                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7481                         val |= WDMAC_MODE_RX_ACCEL;
7482                 }
7483         }
7484
7485         /* Enable host coalescing bug fix */
7486         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7487             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7488             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7489             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7490             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
7491                 val |= WDMAC_MODE_STATUS_TAG_FIX;
7492
7493         tw32_f(WDMAC_MODE, val);
7494         udelay(40);
7495
7496         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7497                 u16 pcix_cmd;
7498
7499                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7500                                      &pcix_cmd);
7501                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7502                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7503                         pcix_cmd |= PCI_X_CMD_READ_2K;
7504                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7505                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7506                         pcix_cmd |= PCI_X_CMD_READ_2K;
7507                 }
7508                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7509                                       pcix_cmd);
7510         }
7511
7512         tw32_f(RDMAC_MODE, rdmac_mode);
7513         udelay(40);
7514
7515         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7516         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7517                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7518
7519         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7520                 tw32(SNDDATAC_MODE,
7521                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7522         else
7523                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7524
7525         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7526         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7527         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7528         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7529         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7530                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7531         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7532         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7533
7534         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7535                 err = tg3_load_5701_a0_firmware_fix(tp);
7536                 if (err)
7537                         return err;
7538         }
7539
7540         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7541                 err = tg3_load_tso_firmware(tp);
7542                 if (err)
7543                         return err;
7544         }
7545
7546         tp->tx_mode = TX_MODE_ENABLE;
7547         tw32_f(MAC_TX_MODE, tp->tx_mode);
7548         udelay(100);
7549
7550         tp->rx_mode = RX_MODE_ENABLE;
7551         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7552             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7553             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7554             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7555                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7556
7557         tw32_f(MAC_RX_MODE, tp->rx_mode);
7558         udelay(10);
7559
7560         tw32(MAC_LED_CTRL, tp->led_ctrl);
7561
7562         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7563         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7564                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7565                 udelay(10);
7566         }
7567         tw32_f(MAC_RX_MODE, tp->rx_mode);
7568         udelay(10);
7569
7570         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7571                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7572                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7573                         /* Set drive transmission level to 1.2V  */
7574                         /* only if the signal pre-emphasis bit is not set  */
7575                         val = tr32(MAC_SERDES_CFG);
7576                         val &= 0xfffff000;
7577                         val |= 0x880;
7578                         tw32(MAC_SERDES_CFG, val);
7579                 }
7580                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7581                         tw32(MAC_SERDES_CFG, 0x616000);
7582         }
7583
7584         /* Prevent chip from dropping frames when flow control
7585          * is enabled.
7586          */
7587         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7588
7589         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7590             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7591                 /* Use hardware link auto-negotiation */
7592                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7593         }
7594
7595         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7596             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7597                 u32 tmp;
7598
7599                 tmp = tr32(SERDES_RX_CTRL);
7600                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7601                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7602                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7603                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7604         }
7605
7606         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7607                 if (tp->link_config.phy_is_low_power) {
7608                         tp->link_config.phy_is_low_power = 0;
7609                         tp->link_config.speed = tp->link_config.orig_speed;
7610                         tp->link_config.duplex = tp->link_config.orig_duplex;
7611                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
7612                 }
7613
7614                 err = tg3_setup_phy(tp, 0);
7615                 if (err)
7616                         return err;
7617
7618                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7619                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7620                         u32 tmp;
7621
7622                         /* Clear CRC stats. */
7623                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7624                                 tg3_writephy(tp, MII_TG3_TEST1,
7625                                              tmp | MII_TG3_TEST1_CRC_EN);
7626                                 tg3_readphy(tp, 0x14, &tmp);
7627                         }
7628                 }
7629         }
7630
7631         __tg3_set_rx_mode(tp->dev);
7632
7633         /* Initialize receive rules. */
7634         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7635         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7636         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7637         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7638
7639         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7640             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7641                 limit = 8;
7642         else
7643                 limit = 16;
7644         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7645                 limit -= 4;
7646         switch (limit) {
7647         case 16:
7648                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7649         case 15:
7650                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7651         case 14:
7652                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7653         case 13:
7654                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7655         case 12:
7656                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7657         case 11:
7658                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7659         case 10:
7660                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7661         case 9:
7662                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7663         case 8:
7664                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7665         case 7:
7666                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7667         case 6:
7668                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7669         case 5:
7670                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7671         case 4:
7672                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7673         case 3:
7674                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7675         case 2:
7676         case 1:
7677
7678         default:
7679                 break;
7680         }
7681
7682         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7683                 /* Write our heartbeat update interval to APE. */
7684                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7685                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7686
7687         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7688
7689         return 0;
7690 }
7691
7692 /* Called at device open time to get the chip ready for
7693  * packet processing.  Invoked with tp->lock held.
7694  */
7695 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7696 {
7697         int err;
7698
7699         /* Force the chip into D0. */
7700         err = tg3_set_power_state(tp, PCI_D0);
7701         if (err)
7702                 goto out;
7703
7704         tg3_switch_clocks(tp);
7705
7706         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7707
7708         err = tg3_reset_hw(tp, reset_phy);
7709
7710 out:
7711         return err;
7712 }
7713
7714 #define TG3_STAT_ADD32(PSTAT, REG) \
7715 do {    u32 __val = tr32(REG); \
7716         (PSTAT)->low += __val; \
7717         if ((PSTAT)->low < __val) \
7718                 (PSTAT)->high += 1; \
7719 } while (0)
7720
7721 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7722 {
7723         struct tg3_hw_stats *sp = tp->hw_stats;
7724
7725         if (!netif_carrier_ok(tp->dev))
7726                 return;
7727
7728         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7729         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7730         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7731         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7732         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7733         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7734         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7735         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7736         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7737         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7738         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7739         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7740         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7741
7742         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7743         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7744         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7745         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7746         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7747         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7748         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7749         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7750         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7751         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7752         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7753         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7754         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7755         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7756
7757         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7758         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7759         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7760 }
7761
7762 static void tg3_timer(unsigned long __opaque)
7763 {
7764         struct tg3 *tp = (struct tg3 *) __opaque;
7765
7766         if (tp->irq_sync)
7767                 goto restart_timer;
7768
7769         spin_lock(&tp->lock);
7770
7771         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7772                 /* All of this garbage is because when using non-tagged
7773                  * IRQ status the mailbox/status_block protocol the chip
7774                  * uses with the cpu is race prone.
7775                  */
7776                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7777                         tw32(GRC_LOCAL_CTRL,
7778                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7779                 } else {
7780                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7781                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7782                 }
7783
7784                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7785                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7786                         spin_unlock(&tp->lock);
7787                         schedule_work(&tp->reset_task);
7788                         return;
7789                 }
7790         }
7791
7792         /* This part only runs once per second. */
7793         if (!--tp->timer_counter) {
7794                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7795                         tg3_periodic_fetch_stats(tp);
7796
7797                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7798                         u32 mac_stat;
7799                         int phy_event;
7800
7801                         mac_stat = tr32(MAC_STATUS);
7802
7803                         phy_event = 0;
7804                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7805                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7806                                         phy_event = 1;
7807                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7808                                 phy_event = 1;
7809
7810                         if (phy_event)
7811                                 tg3_setup_phy(tp, 0);
7812                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7813                         u32 mac_stat = tr32(MAC_STATUS);
7814                         int need_setup = 0;
7815
7816                         if (netif_carrier_ok(tp->dev) &&
7817                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7818                                 need_setup = 1;
7819                         }
7820                         if (! netif_carrier_ok(tp->dev) &&
7821                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7822                                          MAC_STATUS_SIGNAL_DET))) {
7823                                 need_setup = 1;
7824                         }
7825                         if (need_setup) {
7826                                 if (!tp->serdes_counter) {
7827                                         tw32_f(MAC_MODE,
7828                                              (tp->mac_mode &
7829                                               ~MAC_MODE_PORT_MODE_MASK));
7830                                         udelay(40);
7831                                         tw32_f(MAC_MODE, tp->mac_mode);
7832                                         udelay(40);
7833                                 }
7834                                 tg3_setup_phy(tp, 0);
7835                         }
7836                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7837                         tg3_serdes_parallel_detect(tp);
7838
7839                 tp->timer_counter = tp->timer_multiplier;
7840         }
7841
7842         /* Heartbeat is only sent once every 2 seconds.
7843          *
7844          * The heartbeat is to tell the ASF firmware that the host
7845          * driver is still alive.  In the event that the OS crashes,
7846          * ASF needs to reset the hardware to free up the FIFO space
7847          * that may be filled with rx packets destined for the host.
7848          * If the FIFO is full, ASF will no longer function properly.
7849          *
7850          * Unintended resets have been reported on real time kernels
7851          * where the timer doesn't run on time.  Netpoll will also have
7852          * same problem.
7853          *
7854          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7855          * to check the ring condition when the heartbeat is expiring
7856          * before doing the reset.  This will prevent most unintended
7857          * resets.
7858          */
7859         if (!--tp->asf_counter) {
7860                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7861                         u32 val;
7862
7863                         tg3_wait_for_event_ack(tp);
7864
7865                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7866                                       FWCMD_NICDRV_ALIVE3);
7867                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7868                         /* 5 seconds timeout */
7869                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7870                         val = tr32(GRC_RX_CPU_EVENT);
7871                         val |= GRC_RX_CPU_DRIVER_EVENT;
7872                         tw32_f(GRC_RX_CPU_EVENT, val);
7873                 }
7874                 tp->asf_counter = tp->asf_multiplier;
7875         }
7876
7877         spin_unlock(&tp->lock);
7878
7879 restart_timer:
7880         tp->timer.expires = jiffies + tp->timer_offset;
7881         add_timer(&tp->timer);
7882 }
7883
7884 static int tg3_request_irq(struct tg3 *tp)
7885 {
7886         irq_handler_t fn;
7887         unsigned long flags;
7888         struct net_device *dev = tp->dev;
7889
7890         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7891                 fn = tg3_msi;
7892                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7893                         fn = tg3_msi_1shot;
7894                 flags = IRQF_SAMPLE_RANDOM;
7895         } else {
7896                 fn = tg3_interrupt;
7897                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7898                         fn = tg3_interrupt_tagged;
7899                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7900         }
7901         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7902 }
7903
7904 static int tg3_test_interrupt(struct tg3 *tp)
7905 {
7906         struct net_device *dev = tp->dev;
7907         int err, i, intr_ok = 0;
7908
7909         if (!netif_running(dev))
7910                 return -ENODEV;
7911
7912         tg3_disable_ints(tp);
7913
7914         free_irq(tp->pdev->irq, dev);
7915
7916         err = request_irq(tp->pdev->irq, tg3_test_isr,
7917                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7918         if (err)
7919                 return err;
7920
7921         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7922         tg3_enable_ints(tp);
7923
7924         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7925                HOSTCC_MODE_NOW);
7926
7927         for (i = 0; i < 5; i++) {
7928                 u32 int_mbox, misc_host_ctrl;
7929
7930                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7931                                         TG3_64BIT_REG_LOW);
7932                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7933
7934                 if ((int_mbox != 0) ||
7935                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7936                         intr_ok = 1;
7937                         break;
7938                 }
7939
7940                 msleep(10);
7941         }
7942
7943         tg3_disable_ints(tp);
7944
7945         free_irq(tp->pdev->irq, dev);
7946
7947         err = tg3_request_irq(tp);
7948
7949         if (err)
7950                 return err;
7951
7952         if (intr_ok)
7953                 return 0;
7954
7955         return -EIO;
7956 }
7957
7958 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7959  * successfully restored
7960  */
7961 static int tg3_test_msi(struct tg3 *tp)
7962 {
7963         struct net_device *dev = tp->dev;
7964         int err;
7965         u16 pci_cmd;
7966
7967         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7968                 return 0;
7969
7970         /* Turn off SERR reporting in case MSI terminates with Master
7971          * Abort.
7972          */
7973         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7974         pci_write_config_word(tp->pdev, PCI_COMMAND,
7975                               pci_cmd & ~PCI_COMMAND_SERR);
7976
7977         err = tg3_test_interrupt(tp);
7978
7979         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7980
7981         if (!err)
7982                 return 0;
7983
7984         /* other failures */
7985         if (err != -EIO)
7986                 return err;
7987
7988         /* MSI test failed, go back to INTx mode */
7989         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7990                "switching to INTx mode. Please report this failure to "
7991                "the PCI maintainer and include system chipset information.\n",
7992                        tp->dev->name);
7993
7994         free_irq(tp->pdev->irq, dev);
7995         pci_disable_msi(tp->pdev);
7996
7997         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7998
7999         err = tg3_request_irq(tp);
8000         if (err)
8001                 return err;
8002
8003         /* Need to reset the chip because the MSI cycle may have terminated
8004          * with Master Abort.
8005          */
8006         tg3_full_lock(tp, 1);
8007
8008         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8009         err = tg3_init_hw(tp, 1);
8010
8011         tg3_full_unlock(tp);
8012
8013         if (err)
8014                 free_irq(tp->pdev->irq, dev);
8015
8016         return err;
8017 }
8018
8019 static int tg3_open(struct net_device *dev)
8020 {
8021         struct tg3 *tp = netdev_priv(dev);
8022         int err;
8023
8024         netif_carrier_off(tp->dev);
8025
8026         tg3_full_lock(tp, 0);
8027
8028         err = tg3_set_power_state(tp, PCI_D0);
8029         if (err) {
8030                 tg3_full_unlock(tp);
8031                 return err;
8032         }
8033
8034         tg3_disable_ints(tp);
8035         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8036
8037         tg3_full_unlock(tp);
8038
8039         /* The placement of this call is tied
8040          * to the setup and use of Host TX descriptors.
8041          */
8042         err = tg3_alloc_consistent(tp);
8043         if (err)
8044                 return err;
8045
8046         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
8047                 /* All MSI supporting chips should support tagged
8048                  * status.  Assert that this is the case.
8049                  */
8050                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8051                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8052                                "Not using MSI.\n", tp->dev->name);
8053                 } else if (pci_enable_msi(tp->pdev) == 0) {
8054                         u32 msi_mode;
8055
8056                         msi_mode = tr32(MSGINT_MODE);
8057                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8058                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8059                 }
8060         }
8061         err = tg3_request_irq(tp);
8062
8063         if (err) {
8064                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8065                         pci_disable_msi(tp->pdev);
8066                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8067                 }
8068                 tg3_free_consistent(tp);
8069                 return err;
8070         }
8071
8072         napi_enable(&tp->napi);
8073
8074         tg3_full_lock(tp, 0);
8075
8076         err = tg3_init_hw(tp, 1);
8077         if (err) {
8078                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8079                 tg3_free_rings(tp);
8080         } else {
8081                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8082                         tp->timer_offset = HZ;
8083                 else
8084                         tp->timer_offset = HZ / 10;
8085
8086                 BUG_ON(tp->timer_offset > HZ);
8087                 tp->timer_counter = tp->timer_multiplier =
8088                         (HZ / tp->timer_offset);
8089                 tp->asf_counter = tp->asf_multiplier =
8090                         ((HZ / tp->timer_offset) * 2);
8091
8092                 init_timer(&tp->timer);
8093                 tp->timer.expires = jiffies + tp->timer_offset;
8094                 tp->timer.data = (unsigned long) tp;
8095                 tp->timer.function = tg3_timer;
8096         }
8097
8098         tg3_full_unlock(tp);
8099
8100         if (err) {
8101                 napi_disable(&tp->napi);
8102                 free_irq(tp->pdev->irq, dev);
8103                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8104                         pci_disable_msi(tp->pdev);
8105                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8106                 }
8107                 tg3_free_consistent(tp);
8108                 return err;
8109         }
8110
8111         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8112                 err = tg3_test_msi(tp);
8113
8114                 if (err) {
8115                         tg3_full_lock(tp, 0);
8116
8117                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8118                                 pci_disable_msi(tp->pdev);
8119                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8120                         }
8121                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8122                         tg3_free_rings(tp);
8123                         tg3_free_consistent(tp);
8124
8125                         tg3_full_unlock(tp);
8126
8127                         napi_disable(&tp->napi);
8128
8129                         return err;
8130                 }
8131
8132                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8133                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
8134                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
8135
8136                                 tw32(PCIE_TRANSACTION_CFG,
8137                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
8138                         }
8139                 }
8140         }
8141
8142         tg3_phy_start(tp);
8143
8144         tg3_full_lock(tp, 0);
8145
8146         add_timer(&tp->timer);
8147         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8148         tg3_enable_ints(tp);
8149
8150         tg3_full_unlock(tp);
8151
8152         netif_start_queue(dev);
8153
8154         return 0;
8155 }
8156
8157 #if 0
8158 /*static*/ void tg3_dump_state(struct tg3 *tp)
8159 {
8160         u32 val32, val32_2, val32_3, val32_4, val32_5;
8161         u16 val16;
8162         int i;
8163
8164         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8165         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8166         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8167                val16, val32);
8168
8169         /* MAC block */
8170         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8171                tr32(MAC_MODE), tr32(MAC_STATUS));
8172         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8173                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8174         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8175                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8176         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8177                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8178
8179         /* Send data initiator control block */
8180         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8181                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8182         printk("       SNDDATAI_STATSCTRL[%08x]\n",
8183                tr32(SNDDATAI_STATSCTRL));
8184
8185         /* Send data completion control block */
8186         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8187
8188         /* Send BD ring selector block */
8189         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8190                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8191
8192         /* Send BD initiator control block */
8193         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8194                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8195
8196         /* Send BD completion control block */
8197         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8198
8199         /* Receive list placement control block */
8200         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8201                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8202         printk("       RCVLPC_STATSCTRL[%08x]\n",
8203                tr32(RCVLPC_STATSCTRL));
8204
8205         /* Receive data and receive BD initiator control block */
8206         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8207                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8208
8209         /* Receive data completion control block */
8210         printk("DEBUG: RCVDCC_MODE[%08x]\n",
8211                tr32(RCVDCC_MODE));
8212
8213         /* Receive BD initiator control block */
8214         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8215                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8216
8217         /* Receive BD completion control block */
8218         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8219                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8220
8221         /* Receive list selector control block */
8222         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8223                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8224
8225         /* Mbuf cluster free block */
8226         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8227                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8228
8229         /* Host coalescing control block */
8230         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8231                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8232         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8233                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8234                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8235         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8236                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8237                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8238         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8239                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8240         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8241                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8242
8243         /* Memory arbiter control block */
8244         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8245                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8246
8247         /* Buffer manager control block */
8248         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8249                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8250         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8251                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8252         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8253                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8254                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8255                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8256
8257         /* Read DMA control block */
8258         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8259                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8260
8261         /* Write DMA control block */
8262         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8263                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8264
8265         /* DMA completion block */
8266         printk("DEBUG: DMAC_MODE[%08x]\n",
8267                tr32(DMAC_MODE));
8268
8269         /* GRC block */
8270         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8271                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8272         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8273                tr32(GRC_LOCAL_CTRL));
8274
8275         /* TG3_BDINFOs */
8276         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8277                tr32(RCVDBDI_JUMBO_BD + 0x0),
8278                tr32(RCVDBDI_JUMBO_BD + 0x4),
8279                tr32(RCVDBDI_JUMBO_BD + 0x8),
8280                tr32(RCVDBDI_JUMBO_BD + 0xc));
8281         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8282                tr32(RCVDBDI_STD_BD + 0x0),
8283                tr32(RCVDBDI_STD_BD + 0x4),
8284                tr32(RCVDBDI_STD_BD + 0x8),
8285                tr32(RCVDBDI_STD_BD + 0xc));
8286         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8287                tr32(RCVDBDI_MINI_BD + 0x0),
8288                tr32(RCVDBDI_MINI_BD + 0x4),
8289                tr32(RCVDBDI_MINI_BD + 0x8),
8290                tr32(RCVDBDI_MINI_BD + 0xc));
8291
8292         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8293         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8294         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8295         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8296         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8297                val32, val32_2, val32_3, val32_4);
8298
8299         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8300         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8301         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8302         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8303         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8304                val32, val32_2, val32_3, val32_4);
8305
8306         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8307         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8308         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8309         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8310         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8311         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8312                val32, val32_2, val32_3, val32_4, val32_5);
8313
8314         /* SW status block */
8315         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8316                tp->hw_status->status,
8317                tp->hw_status->status_tag,
8318                tp->hw_status->rx_jumbo_consumer,
8319                tp->hw_status->rx_consumer,
8320                tp->hw_status->rx_mini_consumer,
8321                tp->hw_status->idx[0].rx_producer,
8322                tp->hw_status->idx[0].tx_consumer);
8323
8324         /* SW statistics block */
8325         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8326                ((u32 *)tp->hw_stats)[0],
8327                ((u32 *)tp->hw_stats)[1],
8328                ((u32 *)tp->hw_stats)[2],
8329                ((u32 *)tp->hw_stats)[3]);
8330
8331         /* Mailboxes */
8332         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8333                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8334                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8335                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8336                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8337
8338         /* NIC side send descriptors. */
8339         for (i = 0; i < 6; i++) {
8340                 unsigned long txd;
8341
8342                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8343                         + (i * sizeof(struct tg3_tx_buffer_desc));
8344                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8345                        i,
8346                        readl(txd + 0x0), readl(txd + 0x4),
8347                        readl(txd + 0x8), readl(txd + 0xc));
8348         }
8349
8350         /* NIC side RX descriptors. */
8351         for (i = 0; i < 6; i++) {
8352                 unsigned long rxd;
8353
8354                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8355                         + (i * sizeof(struct tg3_rx_buffer_desc));
8356                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8357                        i,
8358                        readl(rxd + 0x0), readl(rxd + 0x4),
8359                        readl(rxd + 0x8), readl(rxd + 0xc));
8360                 rxd += (4 * sizeof(u32));
8361                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8362                        i,
8363                        readl(rxd + 0x0), readl(rxd + 0x4),
8364                        readl(rxd + 0x8), readl(rxd + 0xc));
8365         }
8366
8367         for (i = 0; i < 6; i++) {
8368                 unsigned long rxd;
8369
8370                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8371                         + (i * sizeof(struct tg3_rx_buffer_desc));
8372                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8373                        i,
8374                        readl(rxd + 0x0), readl(rxd + 0x4),
8375                        readl(rxd + 0x8), readl(rxd + 0xc));
8376                 rxd += (4 * sizeof(u32));
8377                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8378                        i,
8379                        readl(rxd + 0x0), readl(rxd + 0x4),
8380                        readl(rxd + 0x8), readl(rxd + 0xc));
8381         }
8382 }
8383 #endif
8384
8385 static struct net_device_stats *tg3_get_stats(struct net_device *);
8386 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8387
8388 static int tg3_close(struct net_device *dev)
8389 {
8390         struct tg3 *tp = netdev_priv(dev);
8391
8392         napi_disable(&tp->napi);
8393         cancel_work_sync(&tp->reset_task);
8394
8395         netif_stop_queue(dev);
8396
8397         del_timer_sync(&tp->timer);
8398
8399         tg3_full_lock(tp, 1);
8400 #if 0
8401         tg3_dump_state(tp);
8402 #endif
8403
8404         tg3_disable_ints(tp);
8405
8406         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8407         tg3_free_rings(tp);
8408         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8409
8410         tg3_full_unlock(tp);
8411
8412         free_irq(tp->pdev->irq, dev);
8413         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8414                 pci_disable_msi(tp->pdev);
8415                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8416         }
8417
8418         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8419                sizeof(tp->net_stats_prev));
8420         memcpy(&tp->estats_prev, tg3_get_estats(tp),
8421                sizeof(tp->estats_prev));
8422
8423         tg3_free_consistent(tp);
8424
8425         tg3_set_power_state(tp, PCI_D3hot);
8426
8427         netif_carrier_off(tp->dev);
8428
8429         return 0;
8430 }
8431
8432 static inline unsigned long get_stat64(tg3_stat64_t *val)
8433 {
8434         unsigned long ret;
8435
8436 #if (BITS_PER_LONG == 32)
8437         ret = val->low;
8438 #else
8439         ret = ((u64)val->high << 32) | ((u64)val->low);
8440 #endif
8441         return ret;
8442 }
8443
8444 static unsigned long calc_crc_errors(struct tg3 *tp)
8445 {
8446         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8447
8448         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8449             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8450              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8451                 u32 val;
8452
8453                 spin_lock_bh(&tp->lock);
8454                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8455                         tg3_writephy(tp, MII_TG3_TEST1,
8456                                      val | MII_TG3_TEST1_CRC_EN);
8457                         tg3_readphy(tp, 0x14, &val);
8458                 } else
8459                         val = 0;
8460                 spin_unlock_bh(&tp->lock);
8461
8462                 tp->phy_crc_errors += val;
8463
8464                 return tp->phy_crc_errors;
8465         }
8466
8467         return get_stat64(&hw_stats->rx_fcs_errors);
8468 }
8469
8470 #define ESTAT_ADD(member) \
8471         estats->member =        old_estats->member + \
8472                                 get_stat64(&hw_stats->member)
8473
8474 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8475 {
8476         struct tg3_ethtool_stats *estats = &tp->estats;
8477         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8478         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8479
8480         if (!hw_stats)
8481                 return old_estats;
8482
8483         ESTAT_ADD(rx_octets);
8484         ESTAT_ADD(rx_fragments);
8485         ESTAT_ADD(rx_ucast_packets);
8486         ESTAT_ADD(rx_mcast_packets);
8487         ESTAT_ADD(rx_bcast_packets);
8488         ESTAT_ADD(rx_fcs_errors);
8489         ESTAT_ADD(rx_align_errors);
8490         ESTAT_ADD(rx_xon_pause_rcvd);
8491         ESTAT_ADD(rx_xoff_pause_rcvd);
8492         ESTAT_ADD(rx_mac_ctrl_rcvd);
8493         ESTAT_ADD(rx_xoff_entered);
8494         ESTAT_ADD(rx_frame_too_long_errors);
8495         ESTAT_ADD(rx_jabbers);
8496         ESTAT_ADD(rx_undersize_packets);
8497         ESTAT_ADD(rx_in_length_errors);
8498         ESTAT_ADD(rx_out_length_errors);
8499         ESTAT_ADD(rx_64_or_less_octet_packets);
8500         ESTAT_ADD(rx_65_to_127_octet_packets);
8501         ESTAT_ADD(rx_128_to_255_octet_packets);
8502         ESTAT_ADD(rx_256_to_511_octet_packets);
8503         ESTAT_ADD(rx_512_to_1023_octet_packets);
8504         ESTAT_ADD(rx_1024_to_1522_octet_packets);
8505         ESTAT_ADD(rx_1523_to_2047_octet_packets);
8506         ESTAT_ADD(rx_2048_to_4095_octet_packets);
8507         ESTAT_ADD(rx_4096_to_8191_octet_packets);
8508         ESTAT_ADD(rx_8192_to_9022_octet_packets);
8509
8510         ESTAT_ADD(tx_octets);
8511         ESTAT_ADD(tx_collisions);
8512         ESTAT_ADD(tx_xon_sent);
8513         ESTAT_ADD(tx_xoff_sent);
8514         ESTAT_ADD(tx_flow_control);
8515         ESTAT_ADD(tx_mac_errors);
8516         ESTAT_ADD(tx_single_collisions);
8517         ESTAT_ADD(tx_mult_collisions);
8518         ESTAT_ADD(tx_deferred);
8519         ESTAT_ADD(tx_excessive_collisions);
8520         ESTAT_ADD(tx_late_collisions);
8521         ESTAT_ADD(tx_collide_2times);
8522         ESTAT_ADD(tx_collide_3times);
8523         ESTAT_ADD(tx_collide_4times);
8524         ESTAT_ADD(tx_collide_5times);
8525         ESTAT_ADD(tx_collide_6times);
8526         ESTAT_ADD(tx_collide_7times);
8527         ESTAT_ADD(tx_collide_8times);
8528         ESTAT_ADD(tx_collide_9times);
8529         ESTAT_ADD(tx_collide_10times);
8530         ESTAT_ADD(tx_collide_11times);
8531         ESTAT_ADD(tx_collide_12times);
8532         ESTAT_ADD(tx_collide_13times);
8533         ESTAT_ADD(tx_collide_14times);
8534         ESTAT_ADD(tx_collide_15times);
8535         ESTAT_ADD(tx_ucast_packets);
8536         ESTAT_ADD(tx_mcast_packets);
8537         ESTAT_ADD(tx_bcast_packets);
8538         ESTAT_ADD(tx_carrier_sense_errors);
8539         ESTAT_ADD(tx_discards);
8540         ESTAT_ADD(tx_errors);
8541
8542         ESTAT_ADD(dma_writeq_full);
8543         ESTAT_ADD(dma_write_prioq_full);
8544         ESTAT_ADD(rxbds_empty);
8545         ESTAT_ADD(rx_discards);
8546         ESTAT_ADD(rx_errors);
8547         ESTAT_ADD(rx_threshold_hit);
8548
8549         ESTAT_ADD(dma_readq_full);
8550         ESTAT_ADD(dma_read_prioq_full);
8551         ESTAT_ADD(tx_comp_queue_full);
8552
8553         ESTAT_ADD(ring_set_send_prod_index);
8554         ESTAT_ADD(ring_status_update);
8555         ESTAT_ADD(nic_irqs);
8556         ESTAT_ADD(nic_avoided_irqs);
8557         ESTAT_ADD(nic_tx_threshold_hit);
8558
8559         return estats;
8560 }
8561
8562 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8563 {
8564         struct tg3 *tp = netdev_priv(dev);
8565         struct net_device_stats *stats = &tp->net_stats;
8566         struct net_device_stats *old_stats = &tp->net_stats_prev;
8567         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8568
8569         if (!hw_stats)
8570                 return old_stats;
8571
8572         stats->rx_packets = old_stats->rx_packets +
8573                 get_stat64(&hw_stats->rx_ucast_packets) +
8574                 get_stat64(&hw_stats->rx_mcast_packets) +
8575                 get_stat64(&hw_stats->rx_bcast_packets);
8576
8577         stats->tx_packets = old_stats->tx_packets +
8578                 get_stat64(&hw_stats->tx_ucast_packets) +
8579                 get_stat64(&hw_stats->tx_mcast_packets) +
8580                 get_stat64(&hw_stats->tx_bcast_packets);
8581
8582         stats->rx_bytes = old_stats->rx_bytes +
8583                 get_stat64(&hw_stats->rx_octets);
8584         stats->tx_bytes = old_stats->tx_bytes +
8585                 get_stat64(&hw_stats->tx_octets);
8586
8587         stats->rx_errors = old_stats->rx_errors +
8588                 get_stat64(&hw_stats->rx_errors);
8589         stats->tx_errors = old_stats->tx_errors +
8590                 get_stat64(&hw_stats->tx_errors) +
8591                 get_stat64(&hw_stats->tx_mac_errors) +
8592                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8593                 get_stat64(&hw_stats->tx_discards);
8594
8595         stats->multicast = old_stats->multicast +
8596                 get_stat64(&hw_stats->rx_mcast_packets);
8597         stats->collisions = old_stats->collisions +
8598                 get_stat64(&hw_stats->tx_collisions);
8599
8600         stats->rx_length_errors = old_stats->rx_length_errors +
8601                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8602                 get_stat64(&hw_stats->rx_undersize_packets);
8603
8604         stats->rx_over_errors = old_stats->rx_over_errors +
8605                 get_stat64(&hw_stats->rxbds_empty);
8606         stats->rx_frame_errors = old_stats->rx_frame_errors +
8607                 get_stat64(&hw_stats->rx_align_errors);
8608         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8609                 get_stat64(&hw_stats->tx_discards);
8610         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8611                 get_stat64(&hw_stats->tx_carrier_sense_errors);
8612
8613         stats->rx_crc_errors = old_stats->rx_crc_errors +
8614                 calc_crc_errors(tp);
8615
8616         stats->rx_missed_errors = old_stats->rx_missed_errors +
8617                 get_stat64(&hw_stats->rx_discards);
8618
8619         return stats;
8620 }
8621
8622 static inline u32 calc_crc(unsigned char *buf, int len)
8623 {
8624         u32 reg;
8625         u32 tmp;
8626         int j, k;
8627
8628         reg = 0xffffffff;
8629
8630         for (j = 0; j < len; j++) {
8631                 reg ^= buf[j];
8632
8633                 for (k = 0; k < 8; k++) {
8634                         tmp = reg & 0x01;
8635
8636                         reg >>= 1;
8637
8638                         if (tmp) {
8639                                 reg ^= 0xedb88320;
8640                         }
8641                 }
8642         }
8643
8644         return ~reg;
8645 }
8646
8647 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8648 {
8649         /* accept or reject all multicast frames */
8650         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8651         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8652         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8653         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8654 }
8655
8656 static void __tg3_set_rx_mode(struct net_device *dev)
8657 {
8658         struct tg3 *tp = netdev_priv(dev);
8659         u32 rx_mode;
8660
8661         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8662                                   RX_MODE_KEEP_VLAN_TAG);
8663
8664         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8665          * flag clear.
8666          */
8667 #if TG3_VLAN_TAG_USED
8668         if (!tp->vlgrp &&
8669             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8670                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8671 #else
8672         /* By definition, VLAN is disabled always in this
8673          * case.
8674          */
8675         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8676                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8677 #endif
8678
8679         if (dev->flags & IFF_PROMISC) {
8680                 /* Promiscuous mode. */
8681                 rx_mode |= RX_MODE_PROMISC;
8682         } else if (dev->flags & IFF_ALLMULTI) {
8683                 /* Accept all multicast. */
8684                 tg3_set_multi (tp, 1);
8685         } else if (dev->mc_count < 1) {
8686                 /* Reject all multicast. */
8687                 tg3_set_multi (tp, 0);
8688         } else {
8689                 /* Accept one or more multicast(s). */
8690                 struct dev_mc_list *mclist;
8691                 unsigned int i;
8692                 u32 mc_filter[4] = { 0, };
8693                 u32 regidx;
8694                 u32 bit;
8695                 u32 crc;
8696
8697                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8698                      i++, mclist = mclist->next) {
8699
8700                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8701                         bit = ~crc & 0x7f;
8702                         regidx = (bit & 0x60) >> 5;
8703                         bit &= 0x1f;
8704                         mc_filter[regidx] |= (1 << bit);
8705                 }
8706
8707                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8708                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8709                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8710                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8711         }
8712
8713         if (rx_mode != tp->rx_mode) {
8714                 tp->rx_mode = rx_mode;
8715                 tw32_f(MAC_RX_MODE, rx_mode);
8716                 udelay(10);
8717         }
8718 }
8719
8720 static void tg3_set_rx_mode(struct net_device *dev)
8721 {
8722         struct tg3 *tp = netdev_priv(dev);
8723
8724         if (!netif_running(dev))
8725                 return;
8726
8727         tg3_full_lock(tp, 0);
8728         __tg3_set_rx_mode(dev);
8729         tg3_full_unlock(tp);
8730 }
8731
8732 #define TG3_REGDUMP_LEN         (32 * 1024)
8733
8734 static int tg3_get_regs_len(struct net_device *dev)
8735 {
8736         return TG3_REGDUMP_LEN;
8737 }
8738
8739 static void tg3_get_regs(struct net_device *dev,
8740                 struct ethtool_regs *regs, void *_p)
8741 {
8742         u32 *p = _p;
8743         struct tg3 *tp = netdev_priv(dev);
8744         u8 *orig_p = _p;
8745         int i;
8746
8747         regs->version = 0;
8748
8749         memset(p, 0, TG3_REGDUMP_LEN);
8750
8751         if (tp->link_config.phy_is_low_power)
8752                 return;
8753
8754         tg3_full_lock(tp, 0);
8755
8756 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8757 #define GET_REG32_LOOP(base,len)                \
8758 do {    p = (u32 *)(orig_p + (base));           \
8759         for (i = 0; i < len; i += 4)            \
8760                 __GET_REG32((base) + i);        \
8761 } while (0)
8762 #define GET_REG32_1(reg)                        \
8763 do {    p = (u32 *)(orig_p + (reg));            \
8764         __GET_REG32((reg));                     \
8765 } while (0)
8766
8767         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8768         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8769         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8770         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8771         GET_REG32_1(SNDDATAC_MODE);
8772         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8773         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8774         GET_REG32_1(SNDBDC_MODE);
8775         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8776         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8777         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8778         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8779         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8780         GET_REG32_1(RCVDCC_MODE);
8781         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8782         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8783         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8784         GET_REG32_1(MBFREE_MODE);
8785         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8786         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8787         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8788         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8789         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8790         GET_REG32_1(RX_CPU_MODE);
8791         GET_REG32_1(RX_CPU_STATE);
8792         GET_REG32_1(RX_CPU_PGMCTR);
8793         GET_REG32_1(RX_CPU_HWBKPT);
8794         GET_REG32_1(TX_CPU_MODE);
8795         GET_REG32_1(TX_CPU_STATE);
8796         GET_REG32_1(TX_CPU_PGMCTR);
8797         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8798         GET_REG32_LOOP(FTQ_RESET, 0x120);
8799         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8800         GET_REG32_1(DMAC_MODE);
8801         GET_REG32_LOOP(GRC_MODE, 0x4c);
8802         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8803                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8804
8805 #undef __GET_REG32
8806 #undef GET_REG32_LOOP
8807 #undef GET_REG32_1
8808
8809         tg3_full_unlock(tp);
8810 }
8811
8812 static int tg3_get_eeprom_len(struct net_device *dev)
8813 {
8814         struct tg3 *tp = netdev_priv(dev);
8815
8816         return tp->nvram_size;
8817 }
8818
8819 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8820 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8821 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8822
8823 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8824 {
8825         struct tg3 *tp = netdev_priv(dev);
8826         int ret;
8827         u8  *pd;
8828         u32 i, offset, len, b_offset, b_count;
8829         __le32 val;
8830
8831         if (tp->link_config.phy_is_low_power)
8832                 return -EAGAIN;
8833
8834         offset = eeprom->offset;
8835         len = eeprom->len;
8836         eeprom->len = 0;
8837
8838         eeprom->magic = TG3_EEPROM_MAGIC;
8839
8840         if (offset & 3) {
8841                 /* adjustments to start on required 4 byte boundary */
8842                 b_offset = offset & 3;
8843                 b_count = 4 - b_offset;
8844                 if (b_count > len) {
8845                         /* i.e. offset=1 len=2 */
8846                         b_count = len;
8847                 }
8848                 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8849                 if (ret)
8850                         return ret;
8851                 memcpy(data, ((char*)&val) + b_offset, b_count);
8852                 len -= b_count;
8853                 offset += b_count;
8854                 eeprom->len += b_count;
8855         }
8856
8857         /* read bytes upto the last 4 byte boundary */
8858         pd = &data[eeprom->len];
8859         for (i = 0; i < (len - (len & 3)); i += 4) {
8860                 ret = tg3_nvram_read_le(tp, offset + i, &val);
8861                 if (ret) {
8862                         eeprom->len += i;
8863                         return ret;
8864                 }
8865                 memcpy(pd + i, &val, 4);
8866         }
8867         eeprom->len += i;
8868
8869         if (len & 3) {
8870                 /* read last bytes not ending on 4 byte boundary */
8871                 pd = &data[eeprom->len];
8872                 b_count = len & 3;
8873                 b_offset = offset + len - b_count;
8874                 ret = tg3_nvram_read_le(tp, b_offset, &val);
8875                 if (ret)
8876                         return ret;
8877                 memcpy(pd, &val, b_count);
8878                 eeprom->len += b_count;
8879         }
8880         return 0;
8881 }
8882
8883 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8884
8885 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8886 {
8887         struct tg3 *tp = netdev_priv(dev);
8888         int ret;
8889         u32 offset, len, b_offset, odd_len;
8890         u8 *buf;
8891         __le32 start, end;
8892
8893         if (tp->link_config.phy_is_low_power)
8894                 return -EAGAIN;
8895
8896         if (eeprom->magic != TG3_EEPROM_MAGIC)
8897                 return -EINVAL;
8898
8899         offset = eeprom->offset;
8900         len = eeprom->len;
8901
8902         if ((b_offset = (offset & 3))) {
8903                 /* adjustments to start on required 4 byte boundary */
8904                 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8905                 if (ret)
8906                         return ret;
8907                 len += b_offset;
8908                 offset &= ~3;
8909                 if (len < 4)
8910                         len = 4;
8911         }
8912
8913         odd_len = 0;
8914         if (len & 3) {
8915                 /* adjustments to end on required 4 byte boundary */
8916                 odd_len = 1;
8917                 len = (len + 3) & ~3;
8918                 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8919                 if (ret)
8920                         return ret;
8921         }
8922
8923         buf = data;
8924         if (b_offset || odd_len) {
8925                 buf = kmalloc(len, GFP_KERNEL);
8926                 if (!buf)
8927                         return -ENOMEM;
8928                 if (b_offset)
8929                         memcpy(buf, &start, 4);
8930                 if (odd_len)
8931                         memcpy(buf+len-4, &end, 4);
8932                 memcpy(buf + b_offset, data, eeprom->len);
8933         }
8934
8935         ret = tg3_nvram_write_block(tp, offset, len, buf);
8936
8937         if (buf != data)
8938                 kfree(buf);
8939
8940         return ret;
8941 }
8942
8943 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8944 {
8945         struct tg3 *tp = netdev_priv(dev);
8946
8947         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8948                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8949                         return -EAGAIN;
8950                 return phy_ethtool_gset(tp->mdio_bus.phy_map[PHY_ADDR], cmd);
8951         }
8952
8953         cmd->supported = (SUPPORTED_Autoneg);
8954
8955         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8956                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8957                                    SUPPORTED_1000baseT_Full);
8958
8959         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8960                 cmd->supported |= (SUPPORTED_100baseT_Half |
8961                                   SUPPORTED_100baseT_Full |
8962                                   SUPPORTED_10baseT_Half |
8963                                   SUPPORTED_10baseT_Full |
8964                                   SUPPORTED_TP);
8965                 cmd->port = PORT_TP;
8966         } else {
8967                 cmd->supported |= SUPPORTED_FIBRE;
8968                 cmd->port = PORT_FIBRE;
8969         }
8970
8971         cmd->advertising = tp->link_config.advertising;
8972         if (netif_running(dev)) {
8973                 cmd->speed = tp->link_config.active_speed;
8974                 cmd->duplex = tp->link_config.active_duplex;
8975         }
8976         cmd->phy_address = PHY_ADDR;
8977         cmd->transceiver = 0;
8978         cmd->autoneg = tp->link_config.autoneg;
8979         cmd->maxtxpkt = 0;
8980         cmd->maxrxpkt = 0;
8981         return 0;
8982 }
8983
8984 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8985 {
8986         struct tg3 *tp = netdev_priv(dev);
8987
8988         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8989                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8990                         return -EAGAIN;
8991                 return phy_ethtool_sset(tp->mdio_bus.phy_map[PHY_ADDR], cmd);
8992         }
8993
8994         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8995                 /* These are the only valid advertisement bits allowed.  */
8996                 if (cmd->autoneg == AUTONEG_ENABLE &&
8997                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8998                                           ADVERTISED_1000baseT_Full |
8999                                           ADVERTISED_Autoneg |
9000                                           ADVERTISED_FIBRE)))
9001                         return -EINVAL;
9002                 /* Fiber can only do SPEED_1000.  */
9003                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9004                          (cmd->speed != SPEED_1000))
9005                         return -EINVAL;
9006         /* Copper cannot force SPEED_1000.  */
9007         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9008                    (cmd->speed == SPEED_1000))
9009                 return -EINVAL;
9010         else if ((cmd->speed == SPEED_1000) &&
9011                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
9012                 return -EINVAL;
9013
9014         tg3_full_lock(tp, 0);
9015
9016         tp->link_config.autoneg = cmd->autoneg;
9017         if (cmd->autoneg == AUTONEG_ENABLE) {
9018                 tp->link_config.advertising = (cmd->advertising |
9019                                               ADVERTISED_Autoneg);
9020                 tp->link_config.speed = SPEED_INVALID;
9021                 tp->link_config.duplex = DUPLEX_INVALID;
9022         } else {
9023                 tp->link_config.advertising = 0;
9024                 tp->link_config.speed = cmd->speed;
9025                 tp->link_config.duplex = cmd->duplex;
9026         }
9027
9028         tp->link_config.orig_speed = tp->link_config.speed;
9029         tp->link_config.orig_duplex = tp->link_config.duplex;
9030         tp->link_config.orig_autoneg = tp->link_config.autoneg;
9031
9032         if (netif_running(dev))
9033                 tg3_setup_phy(tp, 1);
9034
9035         tg3_full_unlock(tp);
9036
9037         return 0;
9038 }
9039
9040 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9041 {
9042         struct tg3 *tp = netdev_priv(dev);
9043
9044         strcpy(info->driver, DRV_MODULE_NAME);
9045         strcpy(info->version, DRV_MODULE_VERSION);
9046         strcpy(info->fw_version, tp->fw_ver);
9047         strcpy(info->bus_info, pci_name(tp->pdev));
9048 }
9049
9050 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9051 {
9052         struct tg3 *tp = netdev_priv(dev);
9053
9054         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
9055                 wol->supported = WAKE_MAGIC;
9056         else
9057                 wol->supported = 0;
9058         wol->wolopts = 0;
9059         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
9060                 wol->wolopts = WAKE_MAGIC;
9061         memset(&wol->sopass, 0, sizeof(wol->sopass));
9062 }
9063
9064 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9065 {
9066         struct tg3 *tp = netdev_priv(dev);
9067
9068         if (wol->wolopts & ~WAKE_MAGIC)
9069                 return -EINVAL;
9070         if ((wol->wolopts & WAKE_MAGIC) &&
9071             !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
9072                 return -EINVAL;
9073
9074         spin_lock_bh(&tp->lock);
9075         if (wol->wolopts & WAKE_MAGIC)
9076                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9077         else
9078                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9079         spin_unlock_bh(&tp->lock);
9080
9081         return 0;
9082 }
9083
9084 static u32 tg3_get_msglevel(struct net_device *dev)
9085 {
9086         struct tg3 *tp = netdev_priv(dev);
9087         return tp->msg_enable;
9088 }
9089
9090 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9091 {
9092         struct tg3 *tp = netdev_priv(dev);
9093         tp->msg_enable = value;
9094 }
9095
9096 static int tg3_set_tso(struct net_device *dev, u32 value)
9097 {
9098         struct tg3 *tp = netdev_priv(dev);
9099
9100         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9101                 if (value)
9102                         return -EINVAL;
9103                 return 0;
9104         }
9105         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9106             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
9107                 if (value) {
9108                         dev->features |= NETIF_F_TSO6;
9109                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9110                             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9111                              GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9112                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9113                                 dev->features |= NETIF_F_TSO_ECN;
9114                 } else
9115                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9116         }
9117         return ethtool_op_set_tso(dev, value);
9118 }
9119
9120 static int tg3_nway_reset(struct net_device *dev)
9121 {
9122         struct tg3 *tp = netdev_priv(dev);
9123         int r;
9124
9125         if (!netif_running(dev))
9126                 return -EAGAIN;
9127
9128         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9129                 return -EINVAL;
9130
9131         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9132                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9133                         return -EAGAIN;
9134                 r = phy_start_aneg(tp->mdio_bus.phy_map[PHY_ADDR]);
9135         } else {
9136                 u32 bmcr;
9137
9138                 spin_lock_bh(&tp->lock);
9139                 r = -EINVAL;
9140                 tg3_readphy(tp, MII_BMCR, &bmcr);
9141                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9142                     ((bmcr & BMCR_ANENABLE) ||
9143                      (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9144                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9145                                                    BMCR_ANENABLE);
9146                         r = 0;
9147                 }
9148                 spin_unlock_bh(&tp->lock);
9149         }
9150
9151         return r;
9152 }
9153
9154 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9155 {
9156         struct tg3 *tp = netdev_priv(dev);
9157
9158         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9159         ering->rx_mini_max_pending = 0;
9160         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9161                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9162         else
9163                 ering->rx_jumbo_max_pending = 0;
9164
9165         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9166
9167         ering->rx_pending = tp->rx_pending;
9168         ering->rx_mini_pending = 0;
9169         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9170                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9171         else
9172                 ering->rx_jumbo_pending = 0;
9173
9174         ering->tx_pending = tp->tx_pending;
9175 }
9176
9177 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9178 {
9179         struct tg3 *tp = netdev_priv(dev);
9180         int irq_sync = 0, err = 0;
9181
9182         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9183             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9184             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9185             (ering->tx_pending <= MAX_SKB_FRAGS) ||
9186             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9187              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9188                 return -EINVAL;
9189
9190         if (netif_running(dev)) {
9191                 tg3_phy_stop(tp);
9192                 tg3_netif_stop(tp);
9193                 irq_sync = 1;
9194         }
9195
9196         tg3_full_lock(tp, irq_sync);
9197
9198         tp->rx_pending = ering->rx_pending;
9199
9200         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9201             tp->rx_pending > 63)
9202                 tp->rx_pending = 63;
9203         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9204         tp->tx_pending = ering->tx_pending;
9205
9206         if (netif_running(dev)) {
9207                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9208                 err = tg3_restart_hw(tp, 1);
9209                 if (!err)
9210                         tg3_netif_start(tp);
9211         }
9212
9213         tg3_full_unlock(tp);
9214
9215         if (irq_sync && !err)
9216                 tg3_phy_start(tp);
9217
9218         return err;
9219 }
9220
9221 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9222 {
9223         struct tg3 *tp = netdev_priv(dev);
9224
9225         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9226
9227         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9228                 epause->rx_pause = 1;
9229         else
9230                 epause->rx_pause = 0;
9231
9232         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9233                 epause->tx_pause = 1;
9234         else
9235                 epause->tx_pause = 0;
9236 }
9237
9238 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9239 {
9240         struct tg3 *tp = netdev_priv(dev);
9241         int err = 0;
9242
9243         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9244                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9245                         return -EAGAIN;
9246
9247                 if (epause->autoneg) {
9248                         u32 newadv;
9249                         struct phy_device *phydev;
9250
9251                         phydev = tp->mdio_bus.phy_map[PHY_ADDR];
9252
9253                         if (epause->rx_pause) {
9254                                 if (epause->tx_pause)
9255                                         newadv = ADVERTISED_Pause;
9256                                 else
9257                                         newadv = ADVERTISED_Pause |
9258                                                  ADVERTISED_Asym_Pause;
9259                         } else if (epause->tx_pause) {
9260                                 newadv = ADVERTISED_Asym_Pause;
9261                         } else
9262                                 newadv = 0;
9263
9264                         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9265                                 u32 oldadv = phydev->advertising &
9266                                              (ADVERTISED_Pause |
9267                                               ADVERTISED_Asym_Pause);
9268                                 if (oldadv != newadv) {
9269                                         phydev->advertising &=
9270                                                 ~(ADVERTISED_Pause |
9271                                                   ADVERTISED_Asym_Pause);
9272                                         phydev->advertising |= newadv;
9273                                         err = phy_start_aneg(phydev);
9274                                 }
9275                         } else {
9276                                 tp->link_config.advertising &=
9277                                                 ~(ADVERTISED_Pause |
9278                                                   ADVERTISED_Asym_Pause);
9279                                 tp->link_config.advertising |= newadv;
9280                         }
9281                 } else {
9282                         if (epause->rx_pause)
9283                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9284                         else
9285                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9286
9287                         if (epause->tx_pause)
9288                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9289                         else
9290                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9291
9292                         if (netif_running(dev))
9293                                 tg3_setup_flow_control(tp, 0, 0);
9294                 }
9295         } else {
9296                 int irq_sync = 0;
9297
9298                 if (netif_running(dev)) {
9299                         tg3_netif_stop(tp);
9300                         irq_sync = 1;
9301                 }
9302
9303                 tg3_full_lock(tp, irq_sync);
9304
9305                 if (epause->autoneg)
9306                         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9307                 else
9308                         tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9309                 if (epause->rx_pause)
9310                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9311                 else
9312                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9313                 if (epause->tx_pause)
9314                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9315                 else
9316                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9317
9318                 if (netif_running(dev)) {
9319                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9320                         err = tg3_restart_hw(tp, 1);
9321                         if (!err)
9322                                 tg3_netif_start(tp);
9323                 }
9324
9325                 tg3_full_unlock(tp);
9326         }
9327
9328         return err;
9329 }
9330
9331 static u32 tg3_get_rx_csum(struct net_device *dev)
9332 {
9333         struct tg3 *tp = netdev_priv(dev);
9334         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9335 }
9336
9337 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9338 {
9339         struct tg3 *tp = netdev_priv(dev);
9340
9341         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9342                 if (data != 0)
9343                         return -EINVAL;
9344                 return 0;
9345         }
9346
9347         spin_lock_bh(&tp->lock);
9348         if (data)
9349                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9350         else
9351                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9352         spin_unlock_bh(&tp->lock);
9353
9354         return 0;
9355 }
9356
9357 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9358 {
9359         struct tg3 *tp = netdev_priv(dev);
9360
9361         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9362                 if (data != 0)
9363                         return -EINVAL;
9364                 return 0;
9365         }
9366
9367         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9368             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9369             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9370             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9371             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9372                 ethtool_op_set_tx_ipv6_csum(dev, data);
9373         else
9374                 ethtool_op_set_tx_csum(dev, data);
9375
9376         return 0;
9377 }
9378
9379 static int tg3_get_sset_count (struct net_device *dev, int sset)
9380 {
9381         switch (sset) {
9382         case ETH_SS_TEST:
9383                 return TG3_NUM_TEST;
9384         case ETH_SS_STATS:
9385                 return TG3_NUM_STATS;
9386         default:
9387                 return -EOPNOTSUPP;
9388         }
9389 }
9390
9391 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9392 {
9393         switch (stringset) {
9394         case ETH_SS_STATS:
9395                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9396                 break;
9397         case ETH_SS_TEST:
9398                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9399                 break;
9400         default:
9401                 WARN_ON(1);     /* we need a WARN() */
9402                 break;
9403         }
9404 }
9405
9406 static int tg3_phys_id(struct net_device *dev, u32 data)
9407 {
9408         struct tg3 *tp = netdev_priv(dev);
9409         int i;
9410
9411         if (!netif_running(tp->dev))
9412                 return -EAGAIN;
9413
9414         if (data == 0)
9415                 data = UINT_MAX / 2;
9416
9417         for (i = 0; i < (data * 2); i++) {
9418                 if ((i % 2) == 0)
9419                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9420                                            LED_CTRL_1000MBPS_ON |
9421                                            LED_CTRL_100MBPS_ON |
9422                                            LED_CTRL_10MBPS_ON |
9423                                            LED_CTRL_TRAFFIC_OVERRIDE |
9424                                            LED_CTRL_TRAFFIC_BLINK |
9425                                            LED_CTRL_TRAFFIC_LED);
9426
9427                 else
9428                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9429                                            LED_CTRL_TRAFFIC_OVERRIDE);
9430
9431                 if (msleep_interruptible(500))
9432                         break;
9433         }
9434         tw32(MAC_LED_CTRL, tp->led_ctrl);
9435         return 0;
9436 }
9437
9438 static void tg3_get_ethtool_stats (struct net_device *dev,
9439                                    struct ethtool_stats *estats, u64 *tmp_stats)
9440 {
9441         struct tg3 *tp = netdev_priv(dev);
9442         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9443 }
9444
9445 #define NVRAM_TEST_SIZE 0x100
9446 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
9447 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
9448 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
9449 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9450 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9451
9452 static int tg3_test_nvram(struct tg3 *tp)
9453 {
9454         u32 csum, magic;
9455         __le32 *buf;
9456         int i, j, k, err = 0, size;
9457
9458         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9459                 return -EIO;
9460
9461         if (magic == TG3_EEPROM_MAGIC)
9462                 size = NVRAM_TEST_SIZE;
9463         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9464                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9465                     TG3_EEPROM_SB_FORMAT_1) {
9466                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9467                         case TG3_EEPROM_SB_REVISION_0:
9468                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9469                                 break;
9470                         case TG3_EEPROM_SB_REVISION_2:
9471                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9472                                 break;
9473                         case TG3_EEPROM_SB_REVISION_3:
9474                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9475                                 break;
9476                         default:
9477                                 return 0;
9478                         }
9479                 } else
9480                         return 0;
9481         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9482                 size = NVRAM_SELFBOOT_HW_SIZE;
9483         else
9484                 return -EIO;
9485
9486         buf = kmalloc(size, GFP_KERNEL);
9487         if (buf == NULL)
9488                 return -ENOMEM;
9489
9490         err = -EIO;
9491         for (i = 0, j = 0; i < size; i += 4, j++) {
9492                 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9493                         break;
9494         }
9495         if (i < size)
9496                 goto out;
9497
9498         /* Selfboot format */
9499         magic = swab32(le32_to_cpu(buf[0]));
9500         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9501             TG3_EEPROM_MAGIC_FW) {
9502                 u8 *buf8 = (u8 *) buf, csum8 = 0;
9503
9504                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9505                     TG3_EEPROM_SB_REVISION_2) {
9506                         /* For rev 2, the csum doesn't include the MBA. */
9507                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9508                                 csum8 += buf8[i];
9509                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9510                                 csum8 += buf8[i];
9511                 } else {
9512                         for (i = 0; i < size; i++)
9513                                 csum8 += buf8[i];
9514                 }
9515
9516                 if (csum8 == 0) {
9517                         err = 0;
9518                         goto out;
9519                 }
9520
9521                 err = -EIO;
9522                 goto out;
9523         }
9524
9525         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9526             TG3_EEPROM_MAGIC_HW) {
9527                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9528                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9529                 u8 *buf8 = (u8 *) buf;
9530
9531                 /* Separate the parity bits and the data bytes.  */
9532                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9533                         if ((i == 0) || (i == 8)) {
9534                                 int l;
9535                                 u8 msk;
9536
9537                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9538                                         parity[k++] = buf8[i] & msk;
9539                                 i++;
9540                         }
9541                         else if (i == 16) {
9542                                 int l;
9543                                 u8 msk;
9544
9545                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9546                                         parity[k++] = buf8[i] & msk;
9547                                 i++;
9548
9549                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9550                                         parity[k++] = buf8[i] & msk;
9551                                 i++;
9552                         }
9553                         data[j++] = buf8[i];
9554                 }
9555
9556                 err = -EIO;
9557                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9558                         u8 hw8 = hweight8(data[i]);
9559
9560                         if ((hw8 & 0x1) && parity[i])
9561                                 goto out;
9562                         else if (!(hw8 & 0x1) && !parity[i])
9563                                 goto out;
9564                 }
9565                 err = 0;
9566                 goto out;
9567         }
9568
9569         /* Bootstrap checksum at offset 0x10 */
9570         csum = calc_crc((unsigned char *) buf, 0x10);
9571         if(csum != le32_to_cpu(buf[0x10/4]))
9572                 goto out;
9573
9574         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9575         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9576         if (csum != le32_to_cpu(buf[0xfc/4]))
9577                  goto out;
9578
9579         err = 0;
9580
9581 out:
9582         kfree(buf);
9583         return err;
9584 }
9585
9586 #define TG3_SERDES_TIMEOUT_SEC  2
9587 #define TG3_COPPER_TIMEOUT_SEC  6
9588
9589 static int tg3_test_link(struct tg3 *tp)
9590 {
9591         int i, max;
9592
9593         if (!netif_running(tp->dev))
9594                 return -ENODEV;
9595
9596         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9597                 max = TG3_SERDES_TIMEOUT_SEC;
9598         else
9599                 max = TG3_COPPER_TIMEOUT_SEC;
9600
9601         for (i = 0; i < max; i++) {
9602                 if (netif_carrier_ok(tp->dev))
9603                         return 0;
9604
9605                 if (msleep_interruptible(1000))
9606                         break;
9607         }
9608
9609         return -EIO;
9610 }
9611
9612 /* Only test the commonly used registers */
9613 static int tg3_test_registers(struct tg3 *tp)
9614 {
9615         int i, is_5705, is_5750;
9616         u32 offset, read_mask, write_mask, val, save_val, read_val;
9617         static struct {
9618                 u16 offset;
9619                 u16 flags;
9620 #define TG3_FL_5705     0x1
9621 #define TG3_FL_NOT_5705 0x2
9622 #define TG3_FL_NOT_5788 0x4
9623 #define TG3_FL_NOT_5750 0x8
9624                 u32 read_mask;
9625                 u32 write_mask;
9626         } reg_tbl[] = {
9627                 /* MAC Control Registers */
9628                 { MAC_MODE, TG3_FL_NOT_5705,
9629                         0x00000000, 0x00ef6f8c },
9630                 { MAC_MODE, TG3_FL_5705,
9631                         0x00000000, 0x01ef6b8c },
9632                 { MAC_STATUS, TG3_FL_NOT_5705,
9633                         0x03800107, 0x00000000 },
9634                 { MAC_STATUS, TG3_FL_5705,
9635                         0x03800100, 0x00000000 },
9636                 { MAC_ADDR_0_HIGH, 0x0000,
9637                         0x00000000, 0x0000ffff },
9638                 { MAC_ADDR_0_LOW, 0x0000,
9639                         0x00000000, 0xffffffff },
9640                 { MAC_RX_MTU_SIZE, 0x0000,
9641                         0x00000000, 0x0000ffff },
9642                 { MAC_TX_MODE, 0x0000,
9643                         0x00000000, 0x00000070 },
9644                 { MAC_TX_LENGTHS, 0x0000,
9645                         0x00000000, 0x00003fff },
9646                 { MAC_RX_MODE, TG3_FL_NOT_5705,
9647                         0x00000000, 0x000007fc },
9648                 { MAC_RX_MODE, TG3_FL_5705,
9649                         0x00000000, 0x000007dc },
9650                 { MAC_HASH_REG_0, 0x0000,
9651                         0x00000000, 0xffffffff },
9652                 { MAC_HASH_REG_1, 0x0000,
9653                         0x00000000, 0xffffffff },
9654                 { MAC_HASH_REG_2, 0x0000,
9655                         0x00000000, 0xffffffff },
9656                 { MAC_HASH_REG_3, 0x0000,
9657                         0x00000000, 0xffffffff },
9658
9659                 /* Receive Data and Receive BD Initiator Control Registers. */
9660                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9661                         0x00000000, 0xffffffff },
9662                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9663                         0x00000000, 0xffffffff },
9664                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9665                         0x00000000, 0x00000003 },
9666                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9667                         0x00000000, 0xffffffff },
9668                 { RCVDBDI_STD_BD+0, 0x0000,
9669                         0x00000000, 0xffffffff },
9670                 { RCVDBDI_STD_BD+4, 0x0000,
9671                         0x00000000, 0xffffffff },
9672                 { RCVDBDI_STD_BD+8, 0x0000,
9673                         0x00000000, 0xffff0002 },
9674                 { RCVDBDI_STD_BD+0xc, 0x0000,
9675                         0x00000000, 0xffffffff },
9676
9677                 /* Receive BD Initiator Control Registers. */
9678                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9679                         0x00000000, 0xffffffff },
9680                 { RCVBDI_STD_THRESH, TG3_FL_5705,
9681                         0x00000000, 0x000003ff },
9682                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9683                         0x00000000, 0xffffffff },
9684
9685                 /* Host Coalescing Control Registers. */
9686                 { HOSTCC_MODE, TG3_FL_NOT_5705,
9687                         0x00000000, 0x00000004 },
9688                 { HOSTCC_MODE, TG3_FL_5705,
9689                         0x00000000, 0x000000f6 },
9690                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9691                         0x00000000, 0xffffffff },
9692                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9693                         0x00000000, 0x000003ff },
9694                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9695                         0x00000000, 0xffffffff },
9696                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9697                         0x00000000, 0x000003ff },
9698                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9699                         0x00000000, 0xffffffff },
9700                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9701                         0x00000000, 0x000000ff },
9702                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9703                         0x00000000, 0xffffffff },
9704                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9705                         0x00000000, 0x000000ff },
9706                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9707                         0x00000000, 0xffffffff },
9708                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9709                         0x00000000, 0xffffffff },
9710                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9711                         0x00000000, 0xffffffff },
9712                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9713                         0x00000000, 0x000000ff },
9714                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9715                         0x00000000, 0xffffffff },
9716                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9717                         0x00000000, 0x000000ff },
9718                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9719                         0x00000000, 0xffffffff },
9720                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9721                         0x00000000, 0xffffffff },
9722                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9723                         0x00000000, 0xffffffff },
9724                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9725                         0x00000000, 0xffffffff },
9726                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9727                         0x00000000, 0xffffffff },
9728                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9729                         0xffffffff, 0x00000000 },
9730                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9731                         0xffffffff, 0x00000000 },
9732
9733                 /* Buffer Manager Control Registers. */
9734                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9735                         0x00000000, 0x007fff80 },
9736                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9737                         0x00000000, 0x007fffff },
9738                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9739                         0x00000000, 0x0000003f },
9740                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9741                         0x00000000, 0x000001ff },
9742                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9743                         0x00000000, 0x000001ff },
9744                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9745                         0xffffffff, 0x00000000 },
9746                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9747                         0xffffffff, 0x00000000 },
9748
9749                 /* Mailbox Registers */
9750                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9751                         0x00000000, 0x000001ff },
9752                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9753                         0x00000000, 0x000001ff },
9754                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9755                         0x00000000, 0x000007ff },
9756                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9757                         0x00000000, 0x000001ff },
9758
9759                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9760         };
9761
9762         is_5705 = is_5750 = 0;
9763         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9764                 is_5705 = 1;
9765                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9766                         is_5750 = 1;
9767         }
9768
9769         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9770                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9771                         continue;
9772
9773                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9774                         continue;
9775
9776                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9777                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9778                         continue;
9779
9780                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9781                         continue;
9782
9783                 offset = (u32) reg_tbl[i].offset;
9784                 read_mask = reg_tbl[i].read_mask;
9785                 write_mask = reg_tbl[i].write_mask;
9786
9787                 /* Save the original register content */
9788                 save_val = tr32(offset);
9789
9790                 /* Determine the read-only value. */
9791                 read_val = save_val & read_mask;
9792
9793                 /* Write zero to the register, then make sure the read-only bits
9794                  * are not changed and the read/write bits are all zeros.
9795                  */
9796                 tw32(offset, 0);
9797
9798                 val = tr32(offset);
9799
9800                 /* Test the read-only and read/write bits. */
9801                 if (((val & read_mask) != read_val) || (val & write_mask))
9802                         goto out;
9803
9804                 /* Write ones to all the bits defined by RdMask and WrMask, then
9805                  * make sure the read-only bits are not changed and the
9806                  * read/write bits are all ones.
9807                  */
9808                 tw32(offset, read_mask | write_mask);
9809
9810                 val = tr32(offset);
9811
9812                 /* Test the read-only bits. */
9813                 if ((val & read_mask) != read_val)
9814                         goto out;
9815
9816                 /* Test the read/write bits. */
9817                 if ((val & write_mask) != write_mask)
9818                         goto out;
9819
9820                 tw32(offset, save_val);
9821         }
9822
9823         return 0;
9824
9825 out:
9826         if (netif_msg_hw(tp))
9827                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9828                        offset);
9829         tw32(offset, save_val);
9830         return -EIO;
9831 }
9832
9833 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9834 {
9835         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9836         int i;
9837         u32 j;
9838
9839         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9840                 for (j = 0; j < len; j += 4) {
9841                         u32 val;
9842
9843                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9844                         tg3_read_mem(tp, offset + j, &val);
9845                         if (val != test_pattern[i])
9846                                 return -EIO;
9847                 }
9848         }
9849         return 0;
9850 }
9851
9852 static int tg3_test_memory(struct tg3 *tp)
9853 {
9854         static struct mem_entry {
9855                 u32 offset;
9856                 u32 len;
9857         } mem_tbl_570x[] = {
9858                 { 0x00000000, 0x00b50},
9859                 { 0x00002000, 0x1c000},
9860                 { 0xffffffff, 0x00000}
9861         }, mem_tbl_5705[] = {
9862                 { 0x00000100, 0x0000c},
9863                 { 0x00000200, 0x00008},
9864                 { 0x00004000, 0x00800},
9865                 { 0x00006000, 0x01000},
9866                 { 0x00008000, 0x02000},
9867                 { 0x00010000, 0x0e000},
9868                 { 0xffffffff, 0x00000}
9869         }, mem_tbl_5755[] = {
9870                 { 0x00000200, 0x00008},
9871                 { 0x00004000, 0x00800},
9872                 { 0x00006000, 0x00800},
9873                 { 0x00008000, 0x02000},
9874                 { 0x00010000, 0x0c000},
9875                 { 0xffffffff, 0x00000}
9876         }, mem_tbl_5906[] = {
9877                 { 0x00000200, 0x00008},
9878                 { 0x00004000, 0x00400},
9879                 { 0x00006000, 0x00400},
9880                 { 0x00008000, 0x01000},
9881                 { 0x00010000, 0x01000},
9882                 { 0xffffffff, 0x00000}
9883         };
9884         struct mem_entry *mem_tbl;
9885         int err = 0;
9886         int i;
9887
9888         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9889                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9890                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9891                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9892                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9893                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9894                         mem_tbl = mem_tbl_5755;
9895                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9896                         mem_tbl = mem_tbl_5906;
9897                 else
9898                         mem_tbl = mem_tbl_5705;
9899         } else
9900                 mem_tbl = mem_tbl_570x;
9901
9902         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9903                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9904                     mem_tbl[i].len)) != 0)
9905                         break;
9906         }
9907
9908         return err;
9909 }
9910
9911 #define TG3_MAC_LOOPBACK        0
9912 #define TG3_PHY_LOOPBACK        1
9913
9914 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9915 {
9916         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9917         u32 desc_idx;
9918         struct sk_buff *skb, *rx_skb;
9919         u8 *tx_data;
9920         dma_addr_t map;
9921         int num_pkts, tx_len, rx_len, i, err;
9922         struct tg3_rx_buffer_desc *desc;
9923
9924         if (loopback_mode == TG3_MAC_LOOPBACK) {
9925                 /* HW errata - mac loopback fails in some cases on 5780.
9926                  * Normal traffic and PHY loopback are not affected by
9927                  * errata.
9928                  */
9929                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9930                         return 0;
9931
9932                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9933                            MAC_MODE_PORT_INT_LPBACK;
9934                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9935                         mac_mode |= MAC_MODE_LINK_POLARITY;
9936                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9937                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9938                 else
9939                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9940                 tw32(MAC_MODE, mac_mode);
9941         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9942                 u32 val;
9943
9944                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9945                         u32 phytest;
9946
9947                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9948                                 u32 phy;
9949
9950                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9951                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9952                                 if (!tg3_readphy(tp, 0x1b, &phy))
9953                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9954                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9955                         }
9956                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9957                 } else
9958                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9959
9960                 tg3_phy_toggle_automdix(tp, 0);
9961
9962                 tg3_writephy(tp, MII_BMCR, val);
9963                 udelay(40);
9964
9965                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9966                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9967                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9968                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9969                 } else
9970                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9971
9972                 /* reset to prevent losing 1st rx packet intermittently */
9973                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9974                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9975                         udelay(10);
9976                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9977                 }
9978                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9979                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9980                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9981                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9982                                 mac_mode |= MAC_MODE_LINK_POLARITY;
9983                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
9984                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9985                 }
9986                 tw32(MAC_MODE, mac_mode);
9987         }
9988         else
9989                 return -EINVAL;
9990
9991         err = -EIO;
9992
9993         tx_len = 1514;
9994         skb = netdev_alloc_skb(tp->dev, tx_len);
9995         if (!skb)
9996                 return -ENOMEM;
9997
9998         tx_data = skb_put(skb, tx_len);
9999         memcpy(tx_data, tp->dev->dev_addr, 6);
10000         memset(tx_data + 6, 0x0, 8);
10001
10002         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10003
10004         for (i = 14; i < tx_len; i++)
10005                 tx_data[i] = (u8) (i & 0xff);
10006
10007         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10008
10009         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10010              HOSTCC_MODE_NOW);
10011
10012         udelay(10);
10013
10014         rx_start_idx = tp->hw_status->idx[0].rx_producer;
10015
10016         num_pkts = 0;
10017
10018         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
10019
10020         tp->tx_prod++;
10021         num_pkts++;
10022
10023         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10024                      tp->tx_prod);
10025         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
10026
10027         udelay(10);
10028
10029         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
10030         for (i = 0; i < 25; i++) {
10031                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10032                        HOSTCC_MODE_NOW);
10033
10034                 udelay(10);
10035
10036                 tx_idx = tp->hw_status->idx[0].tx_consumer;
10037                 rx_idx = tp->hw_status->idx[0].rx_producer;
10038                 if ((tx_idx == tp->tx_prod) &&
10039                     (rx_idx == (rx_start_idx + num_pkts)))
10040                         break;
10041         }
10042
10043         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10044         dev_kfree_skb(skb);
10045
10046         if (tx_idx != tp->tx_prod)
10047                 goto out;
10048
10049         if (rx_idx != rx_start_idx + num_pkts)
10050                 goto out;
10051
10052         desc = &tp->rx_rcb[rx_start_idx];
10053         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10054         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10055         if (opaque_key != RXD_OPAQUE_RING_STD)
10056                 goto out;
10057
10058         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10059             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10060                 goto out;
10061
10062         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10063         if (rx_len != tx_len)
10064                 goto out;
10065
10066         rx_skb = tp->rx_std_buffers[desc_idx].skb;
10067
10068         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10069         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10070
10071         for (i = 14; i < tx_len; i++) {
10072                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10073                         goto out;
10074         }
10075         err = 0;
10076
10077         /* tg3_free_rings will unmap and free the rx_skb */
10078 out:
10079         return err;
10080 }
10081
10082 #define TG3_MAC_LOOPBACK_FAILED         1
10083 #define TG3_PHY_LOOPBACK_FAILED         2
10084 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
10085                                          TG3_PHY_LOOPBACK_FAILED)
10086
10087 static int tg3_test_loopback(struct tg3 *tp)
10088 {
10089         int err = 0;
10090         u32 cpmuctrl = 0;
10091
10092         if (!netif_running(tp->dev))
10093                 return TG3_LOOPBACK_FAILED;
10094
10095         err = tg3_reset_hw(tp, 1);
10096         if (err)
10097                 return TG3_LOOPBACK_FAILED;
10098
10099         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10100             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10101             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10102                 int i;
10103                 u32 status;
10104
10105                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10106
10107                 /* Wait for up to 40 microseconds to acquire lock. */
10108                 for (i = 0; i < 4; i++) {
10109                         status = tr32(TG3_CPMU_MUTEX_GNT);
10110                         if (status == CPMU_MUTEX_GNT_DRIVER)
10111                                 break;
10112                         udelay(10);
10113                 }
10114
10115                 if (status != CPMU_MUTEX_GNT_DRIVER)
10116                         return TG3_LOOPBACK_FAILED;
10117
10118                 /* Turn off link-based power management. */
10119                 cpmuctrl = tr32(TG3_CPMU_CTRL);
10120                 tw32(TG3_CPMU_CTRL,
10121                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10122                                   CPMU_CTRL_LINK_AWARE_MODE));
10123         }
10124
10125         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10126                 err |= TG3_MAC_LOOPBACK_FAILED;
10127
10128         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10129             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10130             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10131                 tw32(TG3_CPMU_CTRL, cpmuctrl);
10132
10133                 /* Release the mutex */
10134                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10135         }
10136
10137         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10138             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10139                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10140                         err |= TG3_PHY_LOOPBACK_FAILED;
10141         }
10142
10143         return err;
10144 }
10145
10146 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10147                           u64 *data)
10148 {
10149         struct tg3 *tp = netdev_priv(dev);
10150
10151         if (tp->link_config.phy_is_low_power)
10152                 tg3_set_power_state(tp, PCI_D0);
10153
10154         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10155
10156         if (tg3_test_nvram(tp) != 0) {
10157                 etest->flags |= ETH_TEST_FL_FAILED;
10158                 data[0] = 1;
10159         }
10160         if (tg3_test_link(tp) != 0) {
10161                 etest->flags |= ETH_TEST_FL_FAILED;
10162                 data[1] = 1;
10163         }
10164         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10165                 int err, err2 = 0, irq_sync = 0;
10166
10167                 if (netif_running(dev)) {
10168                         tg3_phy_stop(tp);
10169                         tg3_netif_stop(tp);
10170                         irq_sync = 1;
10171                 }
10172
10173                 tg3_full_lock(tp, irq_sync);
10174
10175                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10176                 err = tg3_nvram_lock(tp);
10177                 tg3_halt_cpu(tp, RX_CPU_BASE);
10178                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10179                         tg3_halt_cpu(tp, TX_CPU_BASE);
10180                 if (!err)
10181                         tg3_nvram_unlock(tp);
10182
10183                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10184                         tg3_phy_reset(tp);
10185
10186                 if (tg3_test_registers(tp) != 0) {
10187                         etest->flags |= ETH_TEST_FL_FAILED;
10188                         data[2] = 1;
10189                 }
10190                 if (tg3_test_memory(tp) != 0) {
10191                         etest->flags |= ETH_TEST_FL_FAILED;
10192                         data[3] = 1;
10193                 }
10194                 if ((data[4] = tg3_test_loopback(tp)) != 0)
10195                         etest->flags |= ETH_TEST_FL_FAILED;
10196
10197                 tg3_full_unlock(tp);
10198
10199                 if (tg3_test_interrupt(tp) != 0) {
10200                         etest->flags |= ETH_TEST_FL_FAILED;
10201                         data[5] = 1;
10202                 }
10203
10204                 tg3_full_lock(tp, 0);
10205
10206                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10207                 if (netif_running(dev)) {
10208                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10209                         err2 = tg3_restart_hw(tp, 1);
10210                         if (!err2)
10211                                 tg3_netif_start(tp);
10212                 }
10213
10214                 tg3_full_unlock(tp);
10215
10216                 if (irq_sync && !err2)
10217                         tg3_phy_start(tp);
10218         }
10219         if (tp->link_config.phy_is_low_power)
10220                 tg3_set_power_state(tp, PCI_D3hot);
10221
10222 }
10223
10224 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10225 {
10226         struct mii_ioctl_data *data = if_mii(ifr);
10227         struct tg3 *tp = netdev_priv(dev);
10228         int err;
10229
10230         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10231                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10232                         return -EAGAIN;
10233                 return phy_mii_ioctl(tp->mdio_bus.phy_map[PHY_ADDR], data, cmd);
10234         }
10235
10236         switch(cmd) {
10237         case SIOCGMIIPHY:
10238                 data->phy_id = PHY_ADDR;
10239
10240                 /* fallthru */
10241         case SIOCGMIIREG: {
10242                 u32 mii_regval;
10243
10244                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10245                         break;                  /* We have no PHY */
10246
10247                 if (tp->link_config.phy_is_low_power)
10248                         return -EAGAIN;
10249
10250                 spin_lock_bh(&tp->lock);
10251                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10252                 spin_unlock_bh(&tp->lock);
10253
10254                 data->val_out = mii_regval;
10255
10256                 return err;
10257         }
10258
10259         case SIOCSMIIREG:
10260                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10261                         break;                  /* We have no PHY */
10262
10263                 if (!capable(CAP_NET_ADMIN))
10264                         return -EPERM;
10265
10266                 if (tp->link_config.phy_is_low_power)
10267                         return -EAGAIN;
10268
10269                 spin_lock_bh(&tp->lock);
10270                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10271                 spin_unlock_bh(&tp->lock);
10272
10273                 return err;
10274
10275         default:
10276                 /* do nothing */
10277                 break;
10278         }
10279         return -EOPNOTSUPP;
10280 }
10281
10282 #if TG3_VLAN_TAG_USED
10283 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10284 {
10285         struct tg3 *tp = netdev_priv(dev);
10286
10287         if (netif_running(dev))
10288                 tg3_netif_stop(tp);
10289
10290         tg3_full_lock(tp, 0);
10291
10292         tp->vlgrp = grp;
10293
10294         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10295         __tg3_set_rx_mode(dev);
10296
10297         if (netif_running(dev))
10298                 tg3_netif_start(tp);
10299
10300         tg3_full_unlock(tp);
10301 }
10302 #endif
10303
10304 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10305 {
10306         struct tg3 *tp = netdev_priv(dev);
10307
10308         memcpy(ec, &tp->coal, sizeof(*ec));
10309         return 0;
10310 }
10311
10312 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10313 {
10314         struct tg3 *tp = netdev_priv(dev);
10315         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10316         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10317
10318         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10319                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10320                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10321                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10322                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10323         }
10324
10325         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10326             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10327             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10328             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10329             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10330             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10331             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10332             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10333             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10334             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10335                 return -EINVAL;
10336
10337         /* No rx interrupts will be generated if both are zero */
10338         if ((ec->rx_coalesce_usecs == 0) &&
10339             (ec->rx_max_coalesced_frames == 0))
10340                 return -EINVAL;
10341
10342         /* No tx interrupts will be generated if both are zero */
10343         if ((ec->tx_coalesce_usecs == 0) &&
10344             (ec->tx_max_coalesced_frames == 0))
10345                 return -EINVAL;
10346
10347         /* Only copy relevant parameters, ignore all others. */
10348         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10349         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10350         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10351         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10352         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10353         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10354         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10355         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10356         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10357
10358         if (netif_running(dev)) {
10359                 tg3_full_lock(tp, 0);
10360                 __tg3_set_coalesce(tp, &tp->coal);
10361                 tg3_full_unlock(tp);
10362         }
10363         return 0;
10364 }
10365
10366 static const struct ethtool_ops tg3_ethtool_ops = {
10367         .get_settings           = tg3_get_settings,
10368         .set_settings           = tg3_set_settings,
10369         .get_drvinfo            = tg3_get_drvinfo,
10370         .get_regs_len           = tg3_get_regs_len,
10371         .get_regs               = tg3_get_regs,
10372         .get_wol                = tg3_get_wol,
10373         .set_wol                = tg3_set_wol,
10374         .get_msglevel           = tg3_get_msglevel,
10375         .set_msglevel           = tg3_set_msglevel,
10376         .nway_reset             = tg3_nway_reset,
10377         .get_link               = ethtool_op_get_link,
10378         .get_eeprom_len         = tg3_get_eeprom_len,
10379         .get_eeprom             = tg3_get_eeprom,
10380         .set_eeprom             = tg3_set_eeprom,
10381         .get_ringparam          = tg3_get_ringparam,
10382         .set_ringparam          = tg3_set_ringparam,
10383         .get_pauseparam         = tg3_get_pauseparam,
10384         .set_pauseparam         = tg3_set_pauseparam,
10385         .get_rx_csum            = tg3_get_rx_csum,
10386         .set_rx_csum            = tg3_set_rx_csum,
10387         .set_tx_csum            = tg3_set_tx_csum,
10388         .set_sg                 = ethtool_op_set_sg,
10389         .set_tso                = tg3_set_tso,
10390         .self_test              = tg3_self_test,
10391         .get_strings            = tg3_get_strings,
10392         .phys_id                = tg3_phys_id,
10393         .get_ethtool_stats      = tg3_get_ethtool_stats,
10394         .get_coalesce           = tg3_get_coalesce,
10395         .set_coalesce           = tg3_set_coalesce,
10396         .get_sset_count         = tg3_get_sset_count,
10397 };
10398
10399 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10400 {
10401         u32 cursize, val, magic;
10402
10403         tp->nvram_size = EEPROM_CHIP_SIZE;
10404
10405         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
10406                 return;
10407
10408         if ((magic != TG3_EEPROM_MAGIC) &&
10409             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10410             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10411                 return;
10412
10413         /*
10414          * Size the chip by reading offsets at increasing powers of two.
10415          * When we encounter our validation signature, we know the addressing
10416          * has wrapped around, and thus have our chip size.
10417          */
10418         cursize = 0x10;
10419
10420         while (cursize < tp->nvram_size) {
10421                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
10422                         return;
10423
10424                 if (val == magic)
10425                         break;
10426
10427                 cursize <<= 1;
10428         }
10429
10430         tp->nvram_size = cursize;
10431 }
10432
10433 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10434 {
10435         u32 val;
10436
10437         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
10438                 return;
10439
10440         /* Selfboot format */
10441         if (val != TG3_EEPROM_MAGIC) {
10442                 tg3_get_eeprom_size(tp);
10443                 return;
10444         }
10445
10446         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10447                 if (val != 0) {
10448                         tp->nvram_size = (val >> 16) * 1024;
10449                         return;
10450                 }
10451         }
10452         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10453 }
10454
10455 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10456 {
10457         u32 nvcfg1;
10458
10459         nvcfg1 = tr32(NVRAM_CFG1);
10460         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10461                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10462         }
10463         else {
10464                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10465                 tw32(NVRAM_CFG1, nvcfg1);
10466         }
10467
10468         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10469             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10470                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10471                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10472                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10473                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10474                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10475                                 break;
10476                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10477                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10478                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10479                                 break;
10480                         case FLASH_VENDOR_ATMEL_EEPROM:
10481                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10482                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10483                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10484                                 break;
10485                         case FLASH_VENDOR_ST:
10486                                 tp->nvram_jedecnum = JEDEC_ST;
10487                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10488                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10489                                 break;
10490                         case FLASH_VENDOR_SAIFUN:
10491                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
10492                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10493                                 break;
10494                         case FLASH_VENDOR_SST_SMALL:
10495                         case FLASH_VENDOR_SST_LARGE:
10496                                 tp->nvram_jedecnum = JEDEC_SST;
10497                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10498                                 break;
10499                 }
10500         }
10501         else {
10502                 tp->nvram_jedecnum = JEDEC_ATMEL;
10503                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10504                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10505         }
10506 }
10507
10508 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10509 {
10510         u32 nvcfg1;
10511
10512         nvcfg1 = tr32(NVRAM_CFG1);
10513
10514         /* NVRAM protection for TPM */
10515         if (nvcfg1 & (1 << 27))
10516                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10517
10518         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10519                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10520                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10521                         tp->nvram_jedecnum = JEDEC_ATMEL;
10522                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10523                         break;
10524                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10525                         tp->nvram_jedecnum = JEDEC_ATMEL;
10526                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10527                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10528                         break;
10529                 case FLASH_5752VENDOR_ST_M45PE10:
10530                 case FLASH_5752VENDOR_ST_M45PE20:
10531                 case FLASH_5752VENDOR_ST_M45PE40:
10532                         tp->nvram_jedecnum = JEDEC_ST;
10533                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10534                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10535                         break;
10536         }
10537
10538         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10539                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10540                         case FLASH_5752PAGE_SIZE_256:
10541                                 tp->nvram_pagesize = 256;
10542                                 break;
10543                         case FLASH_5752PAGE_SIZE_512:
10544                                 tp->nvram_pagesize = 512;
10545                                 break;
10546                         case FLASH_5752PAGE_SIZE_1K:
10547                                 tp->nvram_pagesize = 1024;
10548                                 break;
10549                         case FLASH_5752PAGE_SIZE_2K:
10550                                 tp->nvram_pagesize = 2048;
10551                                 break;
10552                         case FLASH_5752PAGE_SIZE_4K:
10553                                 tp->nvram_pagesize = 4096;
10554                                 break;
10555                         case FLASH_5752PAGE_SIZE_264:
10556                                 tp->nvram_pagesize = 264;
10557                                 break;
10558                 }
10559         }
10560         else {
10561                 /* For eeprom, set pagesize to maximum eeprom size */
10562                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10563
10564                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10565                 tw32(NVRAM_CFG1, nvcfg1);
10566         }
10567 }
10568
10569 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10570 {
10571         u32 nvcfg1, protect = 0;
10572
10573         nvcfg1 = tr32(NVRAM_CFG1);
10574
10575         /* NVRAM protection for TPM */
10576         if (nvcfg1 & (1 << 27)) {
10577                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10578                 protect = 1;
10579         }
10580
10581         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10582         switch (nvcfg1) {
10583                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10584                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10585                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10586                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10587                         tp->nvram_jedecnum = JEDEC_ATMEL;
10588                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10589                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10590                         tp->nvram_pagesize = 264;
10591                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10592                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10593                                 tp->nvram_size = (protect ? 0x3e200 :
10594                                                   TG3_NVRAM_SIZE_512KB);
10595                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10596                                 tp->nvram_size = (protect ? 0x1f200 :
10597                                                   TG3_NVRAM_SIZE_256KB);
10598                         else
10599                                 tp->nvram_size = (protect ? 0x1f200 :
10600                                                   TG3_NVRAM_SIZE_128KB);
10601                         break;
10602                 case FLASH_5752VENDOR_ST_M45PE10:
10603                 case FLASH_5752VENDOR_ST_M45PE20:
10604                 case FLASH_5752VENDOR_ST_M45PE40:
10605                         tp->nvram_jedecnum = JEDEC_ST;
10606                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10607                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10608                         tp->nvram_pagesize = 256;
10609                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10610                                 tp->nvram_size = (protect ?
10611                                                   TG3_NVRAM_SIZE_64KB :
10612                                                   TG3_NVRAM_SIZE_128KB);
10613                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10614                                 tp->nvram_size = (protect ?
10615                                                   TG3_NVRAM_SIZE_64KB :
10616                                                   TG3_NVRAM_SIZE_256KB);
10617                         else
10618                                 tp->nvram_size = (protect ?
10619                                                   TG3_NVRAM_SIZE_128KB :
10620                                                   TG3_NVRAM_SIZE_512KB);
10621                         break;
10622         }
10623 }
10624
10625 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10626 {
10627         u32 nvcfg1;
10628
10629         nvcfg1 = tr32(NVRAM_CFG1);
10630
10631         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10632                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10633                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10634                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10635                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10636                         tp->nvram_jedecnum = JEDEC_ATMEL;
10637                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10638                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10639
10640                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10641                         tw32(NVRAM_CFG1, nvcfg1);
10642                         break;
10643                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10644                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10645                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10646                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10647                         tp->nvram_jedecnum = JEDEC_ATMEL;
10648                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10649                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10650                         tp->nvram_pagesize = 264;
10651                         break;
10652                 case FLASH_5752VENDOR_ST_M45PE10:
10653                 case FLASH_5752VENDOR_ST_M45PE20:
10654                 case FLASH_5752VENDOR_ST_M45PE40:
10655                         tp->nvram_jedecnum = JEDEC_ST;
10656                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10657                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10658                         tp->nvram_pagesize = 256;
10659                         break;
10660         }
10661 }
10662
10663 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10664 {
10665         u32 nvcfg1, protect = 0;
10666
10667         nvcfg1 = tr32(NVRAM_CFG1);
10668
10669         /* NVRAM protection for TPM */
10670         if (nvcfg1 & (1 << 27)) {
10671                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10672                 protect = 1;
10673         }
10674
10675         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10676         switch (nvcfg1) {
10677                 case FLASH_5761VENDOR_ATMEL_ADB021D:
10678                 case FLASH_5761VENDOR_ATMEL_ADB041D:
10679                 case FLASH_5761VENDOR_ATMEL_ADB081D:
10680                 case FLASH_5761VENDOR_ATMEL_ADB161D:
10681                 case FLASH_5761VENDOR_ATMEL_MDB021D:
10682                 case FLASH_5761VENDOR_ATMEL_MDB041D:
10683                 case FLASH_5761VENDOR_ATMEL_MDB081D:
10684                 case FLASH_5761VENDOR_ATMEL_MDB161D:
10685                         tp->nvram_jedecnum = JEDEC_ATMEL;
10686                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10687                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10688                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10689                         tp->nvram_pagesize = 256;
10690                         break;
10691                 case FLASH_5761VENDOR_ST_A_M45PE20:
10692                 case FLASH_5761VENDOR_ST_A_M45PE40:
10693                 case FLASH_5761VENDOR_ST_A_M45PE80:
10694                 case FLASH_5761VENDOR_ST_A_M45PE16:
10695                 case FLASH_5761VENDOR_ST_M_M45PE20:
10696                 case FLASH_5761VENDOR_ST_M_M45PE40:
10697                 case FLASH_5761VENDOR_ST_M_M45PE80:
10698                 case FLASH_5761VENDOR_ST_M_M45PE16:
10699                         tp->nvram_jedecnum = JEDEC_ST;
10700                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10701                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10702                         tp->nvram_pagesize = 256;
10703                         break;
10704         }
10705
10706         if (protect) {
10707                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10708         } else {
10709                 switch (nvcfg1) {
10710                         case FLASH_5761VENDOR_ATMEL_ADB161D:
10711                         case FLASH_5761VENDOR_ATMEL_MDB161D:
10712                         case FLASH_5761VENDOR_ST_A_M45PE16:
10713                         case FLASH_5761VENDOR_ST_M_M45PE16:
10714                                 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10715                                 break;
10716                         case FLASH_5761VENDOR_ATMEL_ADB081D:
10717                         case FLASH_5761VENDOR_ATMEL_MDB081D:
10718                         case FLASH_5761VENDOR_ST_A_M45PE80:
10719                         case FLASH_5761VENDOR_ST_M_M45PE80:
10720                                 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10721                                 break;
10722                         case FLASH_5761VENDOR_ATMEL_ADB041D:
10723                         case FLASH_5761VENDOR_ATMEL_MDB041D:
10724                         case FLASH_5761VENDOR_ST_A_M45PE40:
10725                         case FLASH_5761VENDOR_ST_M_M45PE40:
10726                                 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10727                                 break;
10728                         case FLASH_5761VENDOR_ATMEL_ADB021D:
10729                         case FLASH_5761VENDOR_ATMEL_MDB021D:
10730                         case FLASH_5761VENDOR_ST_A_M45PE20:
10731                         case FLASH_5761VENDOR_ST_M_M45PE20:
10732                                 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10733                                 break;
10734                 }
10735         }
10736 }
10737
10738 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10739 {
10740         tp->nvram_jedecnum = JEDEC_ATMEL;
10741         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10742         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10743 }
10744
10745 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10746 static void __devinit tg3_nvram_init(struct tg3 *tp)
10747 {
10748         tw32_f(GRC_EEPROM_ADDR,
10749              (EEPROM_ADDR_FSM_RESET |
10750               (EEPROM_DEFAULT_CLOCK_PERIOD <<
10751                EEPROM_ADDR_CLKPERD_SHIFT)));
10752
10753         msleep(1);
10754
10755         /* Enable seeprom accesses. */
10756         tw32_f(GRC_LOCAL_CTRL,
10757              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10758         udelay(100);
10759
10760         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10761             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10762                 tp->tg3_flags |= TG3_FLAG_NVRAM;
10763
10764                 if (tg3_nvram_lock(tp)) {
10765                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10766                                "tg3_nvram_init failed.\n", tp->dev->name);
10767                         return;
10768                 }
10769                 tg3_enable_nvram_access(tp);
10770
10771                 tp->nvram_size = 0;
10772
10773                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10774                         tg3_get_5752_nvram_info(tp);
10775                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10776                         tg3_get_5755_nvram_info(tp);
10777                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10778                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10779                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10780                         tg3_get_5787_nvram_info(tp);
10781                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10782                         tg3_get_5761_nvram_info(tp);
10783                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10784                         tg3_get_5906_nvram_info(tp);
10785                 else
10786                         tg3_get_nvram_info(tp);
10787
10788                 if (tp->nvram_size == 0)
10789                         tg3_get_nvram_size(tp);
10790
10791                 tg3_disable_nvram_access(tp);
10792                 tg3_nvram_unlock(tp);
10793
10794         } else {
10795                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10796
10797                 tg3_get_eeprom_size(tp);
10798         }
10799 }
10800
10801 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10802                                         u32 offset, u32 *val)
10803 {
10804         u32 tmp;
10805         int i;
10806
10807         if (offset > EEPROM_ADDR_ADDR_MASK ||
10808             (offset % 4) != 0)
10809                 return -EINVAL;
10810
10811         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10812                                         EEPROM_ADDR_DEVID_MASK |
10813                                         EEPROM_ADDR_READ);
10814         tw32(GRC_EEPROM_ADDR,
10815              tmp |
10816              (0 << EEPROM_ADDR_DEVID_SHIFT) |
10817              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10818               EEPROM_ADDR_ADDR_MASK) |
10819              EEPROM_ADDR_READ | EEPROM_ADDR_START);
10820
10821         for (i = 0; i < 1000; i++) {
10822                 tmp = tr32(GRC_EEPROM_ADDR);
10823
10824                 if (tmp & EEPROM_ADDR_COMPLETE)
10825                         break;
10826                 msleep(1);
10827         }
10828         if (!(tmp & EEPROM_ADDR_COMPLETE))
10829                 return -EBUSY;
10830
10831         *val = tr32(GRC_EEPROM_DATA);
10832         return 0;
10833 }
10834
10835 #define NVRAM_CMD_TIMEOUT 10000
10836
10837 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10838 {
10839         int i;
10840
10841         tw32(NVRAM_CMD, nvram_cmd);
10842         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10843                 udelay(10);
10844                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10845                         udelay(10);
10846                         break;
10847                 }
10848         }
10849         if (i == NVRAM_CMD_TIMEOUT) {
10850                 return -EBUSY;
10851         }
10852         return 0;
10853 }
10854
10855 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10856 {
10857         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10858             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10859             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10860            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10861             (tp->nvram_jedecnum == JEDEC_ATMEL))
10862
10863                 addr = ((addr / tp->nvram_pagesize) <<
10864                         ATMEL_AT45DB0X1B_PAGE_POS) +
10865                        (addr % tp->nvram_pagesize);
10866
10867         return addr;
10868 }
10869
10870 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10871 {
10872         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10873             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10874             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10875            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10876             (tp->nvram_jedecnum == JEDEC_ATMEL))
10877
10878                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10879                         tp->nvram_pagesize) +
10880                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10881
10882         return addr;
10883 }
10884
10885 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10886 {
10887         int ret;
10888
10889         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10890                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10891
10892         offset = tg3_nvram_phys_addr(tp, offset);
10893
10894         if (offset > NVRAM_ADDR_MSK)
10895                 return -EINVAL;
10896
10897         ret = tg3_nvram_lock(tp);
10898         if (ret)
10899                 return ret;
10900
10901         tg3_enable_nvram_access(tp);
10902
10903         tw32(NVRAM_ADDR, offset);
10904         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10905                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10906
10907         if (ret == 0)
10908                 *val = swab32(tr32(NVRAM_RDDATA));
10909
10910         tg3_disable_nvram_access(tp);
10911
10912         tg3_nvram_unlock(tp);
10913
10914         return ret;
10915 }
10916
10917 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10918 {
10919         u32 v;
10920         int res = tg3_nvram_read(tp, offset, &v);
10921         if (!res)
10922                 *val = cpu_to_le32(v);
10923         return res;
10924 }
10925
10926 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10927 {
10928         int err;
10929         u32 tmp;
10930
10931         err = tg3_nvram_read(tp, offset, &tmp);
10932         *val = swab32(tmp);
10933         return err;
10934 }
10935
10936 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10937                                     u32 offset, u32 len, u8 *buf)
10938 {
10939         int i, j, rc = 0;
10940         u32 val;
10941
10942         for (i = 0; i < len; i += 4) {
10943                 u32 addr;
10944                 __le32 data;
10945
10946                 addr = offset + i;
10947
10948                 memcpy(&data, buf + i, 4);
10949
10950                 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10951
10952                 val = tr32(GRC_EEPROM_ADDR);
10953                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10954
10955                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10956                         EEPROM_ADDR_READ);
10957                 tw32(GRC_EEPROM_ADDR, val |
10958                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10959                         (addr & EEPROM_ADDR_ADDR_MASK) |
10960                         EEPROM_ADDR_START |
10961                         EEPROM_ADDR_WRITE);
10962
10963                 for (j = 0; j < 1000; j++) {
10964                         val = tr32(GRC_EEPROM_ADDR);
10965
10966                         if (val & EEPROM_ADDR_COMPLETE)
10967                                 break;
10968                         msleep(1);
10969                 }
10970                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10971                         rc = -EBUSY;
10972                         break;
10973                 }
10974         }
10975
10976         return rc;
10977 }
10978
10979 /* offset and length are dword aligned */
10980 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10981                 u8 *buf)
10982 {
10983         int ret = 0;
10984         u32 pagesize = tp->nvram_pagesize;
10985         u32 pagemask = pagesize - 1;
10986         u32 nvram_cmd;
10987         u8 *tmp;
10988
10989         tmp = kmalloc(pagesize, GFP_KERNEL);
10990         if (tmp == NULL)
10991                 return -ENOMEM;
10992
10993         while (len) {
10994                 int j;
10995                 u32 phy_addr, page_off, size;
10996
10997                 phy_addr = offset & ~pagemask;
10998
10999                 for (j = 0; j < pagesize; j += 4) {
11000                         if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
11001                                                 (__le32 *) (tmp + j))))
11002                                 break;
11003                 }
11004                 if (ret)
11005                         break;
11006
11007                 page_off = offset & pagemask;
11008                 size = pagesize;
11009                 if (len < size)
11010                         size = len;
11011
11012                 len -= size;
11013
11014                 memcpy(tmp + page_off, buf, size);
11015
11016                 offset = offset + (pagesize - page_off);
11017
11018                 tg3_enable_nvram_access(tp);
11019
11020                 /*
11021                  * Before we can erase the flash page, we need
11022                  * to issue a special "write enable" command.
11023                  */
11024                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11025
11026                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11027                         break;
11028
11029                 /* Erase the target page */
11030                 tw32(NVRAM_ADDR, phy_addr);
11031
11032                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11033                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11034
11035                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11036                         break;
11037
11038                 /* Issue another write enable to start the write. */
11039                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11040
11041                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11042                         break;
11043
11044                 for (j = 0; j < pagesize; j += 4) {
11045                         __be32 data;
11046
11047                         data = *((__be32 *) (tmp + j));
11048                         /* swab32(le32_to_cpu(data)), actually */
11049                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
11050
11051                         tw32(NVRAM_ADDR, phy_addr + j);
11052
11053                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11054                                 NVRAM_CMD_WR;
11055
11056                         if (j == 0)
11057                                 nvram_cmd |= NVRAM_CMD_FIRST;
11058                         else if (j == (pagesize - 4))
11059                                 nvram_cmd |= NVRAM_CMD_LAST;
11060
11061                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11062                                 break;
11063                 }
11064                 if (ret)
11065                         break;
11066         }
11067
11068         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11069         tg3_nvram_exec_cmd(tp, nvram_cmd);
11070
11071         kfree(tmp);
11072
11073         return ret;
11074 }
11075
11076 /* offset and length are dword aligned */
11077 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11078                 u8 *buf)
11079 {
11080         int i, ret = 0;
11081
11082         for (i = 0; i < len; i += 4, offset += 4) {
11083                 u32 page_off, phy_addr, nvram_cmd;
11084                 __be32 data;
11085
11086                 memcpy(&data, buf + i, 4);
11087                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11088
11089                 page_off = offset % tp->nvram_pagesize;
11090
11091                 phy_addr = tg3_nvram_phys_addr(tp, offset);
11092
11093                 tw32(NVRAM_ADDR, phy_addr);
11094
11095                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11096
11097                 if ((page_off == 0) || (i == 0))
11098                         nvram_cmd |= NVRAM_CMD_FIRST;
11099                 if (page_off == (tp->nvram_pagesize - 4))
11100                         nvram_cmd |= NVRAM_CMD_LAST;
11101
11102                 if (i == (len - 4))
11103                         nvram_cmd |= NVRAM_CMD_LAST;
11104
11105                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
11106                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
11107                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
11108                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
11109                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
11110                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
11111                     (tp->nvram_jedecnum == JEDEC_ST) &&
11112                     (nvram_cmd & NVRAM_CMD_FIRST)) {
11113
11114                         if ((ret = tg3_nvram_exec_cmd(tp,
11115                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11116                                 NVRAM_CMD_DONE)))
11117
11118                                 break;
11119                 }
11120                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11121                         /* We always do complete word writes to eeprom. */
11122                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11123                 }
11124
11125                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11126                         break;
11127         }
11128         return ret;
11129 }
11130
11131 /* offset and length are dword aligned */
11132 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11133 {
11134         int ret;
11135
11136         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11137                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11138                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
11139                 udelay(40);
11140         }
11141
11142         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11143                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11144         }
11145         else {
11146                 u32 grc_mode;
11147
11148                 ret = tg3_nvram_lock(tp);
11149                 if (ret)
11150                         return ret;
11151
11152                 tg3_enable_nvram_access(tp);
11153                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11154                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
11155                         tw32(NVRAM_WRITE1, 0x406);
11156
11157                 grc_mode = tr32(GRC_MODE);
11158                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11159
11160                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11161                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11162
11163                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
11164                                 buf);
11165                 }
11166                 else {
11167                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11168                                 buf);
11169                 }
11170
11171                 grc_mode = tr32(GRC_MODE);
11172                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11173
11174                 tg3_disable_nvram_access(tp);
11175                 tg3_nvram_unlock(tp);
11176         }
11177
11178         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11179                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11180                 udelay(40);
11181         }
11182
11183         return ret;
11184 }
11185
11186 struct subsys_tbl_ent {
11187         u16 subsys_vendor, subsys_devid;
11188         u32 phy_id;
11189 };
11190
11191 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11192         /* Broadcom boards. */
11193         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11194         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11195         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11196         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
11197         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11198         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11199         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
11200         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11201         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11202         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11203         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11204
11205         /* 3com boards. */
11206         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11207         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11208         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
11209         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11210         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11211
11212         /* DELL boards. */
11213         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11214         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11215         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11216         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11217
11218         /* Compaq boards. */
11219         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11220         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11221         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
11222         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11223         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11224
11225         /* IBM boards. */
11226         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11227 };
11228
11229 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11230 {
11231         int i;
11232
11233         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11234                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11235                      tp->pdev->subsystem_vendor) &&
11236                     (subsys_id_to_phy_id[i].subsys_devid ==
11237                      tp->pdev->subsystem_device))
11238                         return &subsys_id_to_phy_id[i];
11239         }
11240         return NULL;
11241 }
11242
11243 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11244 {
11245         u32 val;
11246         u16 pmcsr;
11247
11248         /* On some early chips the SRAM cannot be accessed in D3hot state,
11249          * so need make sure we're in D0.
11250          */
11251         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11252         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11253         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11254         msleep(1);
11255
11256         /* Make sure register accesses (indirect or otherwise)
11257          * will function correctly.
11258          */
11259         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11260                                tp->misc_host_ctrl);
11261
11262         /* The memory arbiter has to be enabled in order for SRAM accesses
11263          * to succeed.  Normally on powerup the tg3 chip firmware will make
11264          * sure it is enabled, but other entities such as system netboot
11265          * code might disable it.
11266          */
11267         val = tr32(MEMARB_MODE);
11268         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11269
11270         tp->phy_id = PHY_ID_INVALID;
11271         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11272
11273         /* Assume an onboard device and WOL capable by default.  */
11274         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11275
11276         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11277                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11278                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11279                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11280                 }
11281                 val = tr32(VCPU_CFGSHDW);
11282                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11283                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11284                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11285                     (val & VCPU_CFGSHDW_WOL_MAGPKT))
11286                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11287                 return;
11288         }
11289
11290         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11291         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11292                 u32 nic_cfg, led_cfg;
11293                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11294                 int eeprom_phy_serdes = 0;
11295
11296                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11297                 tp->nic_sram_data_cfg = nic_cfg;
11298
11299                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11300                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11301                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11302                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11303                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11304                     (ver > 0) && (ver < 0x100))
11305                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11306
11307                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11308                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11309
11310                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11311                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11312                         eeprom_phy_serdes = 1;
11313
11314                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11315                 if (nic_phy_id != 0) {
11316                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11317                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11318
11319                         eeprom_phy_id  = (id1 >> 16) << 10;
11320                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
11321                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
11322                 } else
11323                         eeprom_phy_id = 0;
11324
11325                 tp->phy_id = eeprom_phy_id;
11326                 if (eeprom_phy_serdes) {
11327                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11328                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11329                         else
11330                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11331                 }
11332
11333                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11334                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11335                                     SHASTA_EXT_LED_MODE_MASK);
11336                 else
11337                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11338
11339                 switch (led_cfg) {
11340                 default:
11341                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11342                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11343                         break;
11344
11345                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11346                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11347                         break;
11348
11349                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11350                         tp->led_ctrl = LED_CTRL_MODE_MAC;
11351
11352                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11353                          * read on some older 5700/5701 bootcode.
11354                          */
11355                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11356                             ASIC_REV_5700 ||
11357                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
11358                             ASIC_REV_5701)
11359                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11360
11361                         break;
11362
11363                 case SHASTA_EXT_LED_SHARED:
11364                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
11365                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11366                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11367                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11368                                                  LED_CTRL_MODE_PHY_2);
11369                         break;
11370
11371                 case SHASTA_EXT_LED_MAC:
11372                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11373                         break;
11374
11375                 case SHASTA_EXT_LED_COMBO:
11376                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
11377                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11378                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11379                                                  LED_CTRL_MODE_PHY_2);
11380                         break;
11381
11382                 }
11383
11384                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11385                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11386                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11387                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11388
11389                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11390                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11391
11392                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11393                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11394                         if ((tp->pdev->subsystem_vendor ==
11395                              PCI_VENDOR_ID_ARIMA) &&
11396                             (tp->pdev->subsystem_device == 0x205a ||
11397                              tp->pdev->subsystem_device == 0x2063))
11398                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11399                 } else {
11400                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11401                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11402                 }
11403
11404                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11405                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11406                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11407                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11408                 }
11409                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
11410                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11411                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11412                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11413                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11414
11415                 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
11416                     nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
11417                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11418
11419                 if (cfg2 & (1 << 17))
11420                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11421
11422                 /* serdes signal pre-emphasis in register 0x590 set by */
11423                 /* bootcode if bit 18 is set */
11424                 if (cfg2 & (1 << 18))
11425                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11426
11427                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11428                         u32 cfg3;
11429
11430                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11431                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11432                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11433                 }
11434
11435                 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11436                         tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11437                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11438                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11439                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11440                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11441         }
11442 }
11443
11444 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11445 {
11446         int i;
11447         u32 val;
11448
11449         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11450         tw32(OTP_CTRL, cmd);
11451
11452         /* Wait for up to 1 ms for command to execute. */
11453         for (i = 0; i < 100; i++) {
11454                 val = tr32(OTP_STATUS);
11455                 if (val & OTP_STATUS_CMD_DONE)
11456                         break;
11457                 udelay(10);
11458         }
11459
11460         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11461 }
11462
11463 /* Read the gphy configuration from the OTP region of the chip.  The gphy
11464  * configuration is a 32-bit value that straddles the alignment boundary.
11465  * We do two 32-bit reads and then shift and merge the results.
11466  */
11467 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11468 {
11469         u32 bhalf_otp, thalf_otp;
11470
11471         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11472
11473         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11474                 return 0;
11475
11476         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11477
11478         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11479                 return 0;
11480
11481         thalf_otp = tr32(OTP_READ_DATA);
11482
11483         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11484
11485         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11486                 return 0;
11487
11488         bhalf_otp = tr32(OTP_READ_DATA);
11489
11490         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11491 }
11492
11493 static int __devinit tg3_phy_probe(struct tg3 *tp)
11494 {
11495         u32 hw_phy_id_1, hw_phy_id_2;
11496         u32 hw_phy_id, hw_phy_id_masked;
11497         int err;
11498
11499         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11500                 return tg3_phy_init(tp);
11501
11502         /* Reading the PHY ID register can conflict with ASF
11503          * firwmare access to the PHY hardware.
11504          */
11505         err = 0;
11506         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11507             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11508                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11509         } else {
11510                 /* Now read the physical PHY_ID from the chip and verify
11511                  * that it is sane.  If it doesn't look good, we fall back
11512                  * to either the hard-coded table based PHY_ID and failing
11513                  * that the value found in the eeprom area.
11514                  */
11515                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11516                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11517
11518                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
11519                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11520                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
11521
11522                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11523         }
11524
11525         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11526                 tp->phy_id = hw_phy_id;
11527                 if (hw_phy_id_masked == PHY_ID_BCM8002)
11528                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11529                 else
11530                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11531         } else {
11532                 if (tp->phy_id != PHY_ID_INVALID) {
11533                         /* Do nothing, phy ID already set up in
11534                          * tg3_get_eeprom_hw_cfg().
11535                          */
11536                 } else {
11537                         struct subsys_tbl_ent *p;
11538
11539                         /* No eeprom signature?  Try the hardcoded
11540                          * subsys device table.
11541                          */
11542                         p = lookup_by_subsys(tp);
11543                         if (!p)
11544                                 return -ENODEV;
11545
11546                         tp->phy_id = p->phy_id;
11547                         if (!tp->phy_id ||
11548                             tp->phy_id == PHY_ID_BCM8002)
11549                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11550                 }
11551         }
11552
11553         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11554             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11555             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11556                 u32 bmsr, adv_reg, tg3_ctrl, mask;
11557
11558                 tg3_readphy(tp, MII_BMSR, &bmsr);
11559                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11560                     (bmsr & BMSR_LSTATUS))
11561                         goto skip_phy_reset;
11562
11563                 err = tg3_phy_reset(tp);
11564                 if (err)
11565                         return err;
11566
11567                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11568                            ADVERTISE_100HALF | ADVERTISE_100FULL |
11569                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11570                 tg3_ctrl = 0;
11571                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11572                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11573                                     MII_TG3_CTRL_ADV_1000_FULL);
11574                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11575                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11576                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11577                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
11578                 }
11579
11580                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11581                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11582                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11583                 if (!tg3_copper_is_advertising_all(tp, mask)) {
11584                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11585
11586                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11587                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11588
11589                         tg3_writephy(tp, MII_BMCR,
11590                                      BMCR_ANENABLE | BMCR_ANRESTART);
11591                 }
11592                 tg3_phy_set_wirespeed(tp);
11593
11594                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11595                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11596                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11597         }
11598
11599 skip_phy_reset:
11600         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11601                 err = tg3_init_5401phy_dsp(tp);
11602                 if (err)
11603                         return err;
11604         }
11605
11606         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11607                 err = tg3_init_5401phy_dsp(tp);
11608         }
11609
11610         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11611                 tp->link_config.advertising =
11612                         (ADVERTISED_1000baseT_Half |
11613                          ADVERTISED_1000baseT_Full |
11614                          ADVERTISED_Autoneg |
11615                          ADVERTISED_FIBRE);
11616         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11617                 tp->link_config.advertising &=
11618                         ~(ADVERTISED_1000baseT_Half |
11619                           ADVERTISED_1000baseT_Full);
11620
11621         return err;
11622 }
11623
11624 static void __devinit tg3_read_partno(struct tg3 *tp)
11625 {
11626         unsigned char vpd_data[256];
11627         unsigned int i;
11628         u32 magic;
11629
11630         if (tg3_nvram_read_swab(tp, 0x0, &magic))
11631                 goto out_not_found;
11632
11633         if (magic == TG3_EEPROM_MAGIC) {
11634                 for (i = 0; i < 256; i += 4) {
11635                         u32 tmp;
11636
11637                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11638                                 goto out_not_found;
11639
11640                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
11641                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
11642                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11643                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11644                 }
11645         } else {
11646                 int vpd_cap;
11647
11648                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11649                 for (i = 0; i < 256; i += 4) {
11650                         u32 tmp, j = 0;
11651                         __le32 v;
11652                         u16 tmp16;
11653
11654                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11655                                               i);
11656                         while (j++ < 100) {
11657                                 pci_read_config_word(tp->pdev, vpd_cap +
11658                                                      PCI_VPD_ADDR, &tmp16);
11659                                 if (tmp16 & 0x8000)
11660                                         break;
11661                                 msleep(1);
11662                         }
11663                         if (!(tmp16 & 0x8000))
11664                                 goto out_not_found;
11665
11666                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11667                                               &tmp);
11668                         v = cpu_to_le32(tmp);
11669                         memcpy(&vpd_data[i], &v, 4);
11670                 }
11671         }
11672
11673         /* Now parse and find the part number. */
11674         for (i = 0; i < 254; ) {
11675                 unsigned char val = vpd_data[i];
11676                 unsigned int block_end;
11677
11678                 if (val == 0x82 || val == 0x91) {
11679                         i = (i + 3 +
11680                              (vpd_data[i + 1] +
11681                               (vpd_data[i + 2] << 8)));
11682                         continue;
11683                 }
11684
11685                 if (val != 0x90)
11686                         goto out_not_found;
11687
11688                 block_end = (i + 3 +
11689                              (vpd_data[i + 1] +
11690                               (vpd_data[i + 2] << 8)));
11691                 i += 3;
11692
11693                 if (block_end > 256)
11694                         goto out_not_found;
11695
11696                 while (i < (block_end - 2)) {
11697                         if (vpd_data[i + 0] == 'P' &&
11698                             vpd_data[i + 1] == 'N') {
11699                                 int partno_len = vpd_data[i + 2];
11700
11701                                 i += 3;
11702                                 if (partno_len > 24 || (partno_len + i) > 256)
11703                                         goto out_not_found;
11704
11705                                 memcpy(tp->board_part_number,
11706                                        &vpd_data[i], partno_len);
11707
11708                                 /* Success. */
11709                                 return;
11710                         }
11711                         i += 3 + vpd_data[i + 2];
11712                 }
11713
11714                 /* Part number not found. */
11715                 goto out_not_found;
11716         }
11717
11718 out_not_found:
11719         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11720                 strcpy(tp->board_part_number, "BCM95906");
11721         else
11722                 strcpy(tp->board_part_number, "none");
11723 }
11724
11725 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11726 {
11727         u32 val;
11728
11729         if (tg3_nvram_read_swab(tp, offset, &val) ||
11730             (val & 0xfc000000) != 0x0c000000 ||
11731             tg3_nvram_read_swab(tp, offset + 4, &val) ||
11732             val != 0)
11733                 return 0;
11734
11735         return 1;
11736 }
11737
11738 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11739 {
11740         u32 val, offset, start;
11741         u32 ver_offset;
11742         int i, bcnt;
11743
11744         if (tg3_nvram_read_swab(tp, 0, &val))
11745                 return;
11746
11747         if (val != TG3_EEPROM_MAGIC)
11748                 return;
11749
11750         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11751             tg3_nvram_read_swab(tp, 0x4, &start))
11752                 return;
11753
11754         offset = tg3_nvram_logical_addr(tp, offset);
11755
11756         if (!tg3_fw_img_is_valid(tp, offset) ||
11757             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11758                 return;
11759
11760         offset = offset + ver_offset - start;
11761         for (i = 0; i < 16; i += 4) {
11762                 __le32 v;
11763                 if (tg3_nvram_read_le(tp, offset + i, &v))
11764                         return;
11765
11766                 memcpy(tp->fw_ver + i, &v, 4);
11767         }
11768
11769         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11770              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11771                 return;
11772
11773         for (offset = TG3_NVM_DIR_START;
11774              offset < TG3_NVM_DIR_END;
11775              offset += TG3_NVM_DIRENT_SIZE) {
11776                 if (tg3_nvram_read_swab(tp, offset, &val))
11777                         return;
11778
11779                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11780                         break;
11781         }
11782
11783         if (offset == TG3_NVM_DIR_END)
11784                 return;
11785
11786         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11787                 start = 0x08000000;
11788         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11789                 return;
11790
11791         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11792             !tg3_fw_img_is_valid(tp, offset) ||
11793             tg3_nvram_read_swab(tp, offset + 8, &val))
11794                 return;
11795
11796         offset += val - start;
11797
11798         bcnt = strlen(tp->fw_ver);
11799
11800         tp->fw_ver[bcnt++] = ',';
11801         tp->fw_ver[bcnt++] = ' ';
11802
11803         for (i = 0; i < 4; i++) {
11804                 __le32 v;
11805                 if (tg3_nvram_read_le(tp, offset, &v))
11806                         return;
11807
11808                 offset += sizeof(v);
11809
11810                 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11811                         memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11812                         break;
11813                 }
11814
11815                 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11816                 bcnt += sizeof(v);
11817         }
11818
11819         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11820 }
11821
11822 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11823
11824 static int __devinit tg3_get_invariants(struct tg3 *tp)
11825 {
11826         static struct pci_device_id write_reorder_chipsets[] = {
11827                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11828                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11829                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11830                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11831                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11832                              PCI_DEVICE_ID_VIA_8385_0) },
11833                 { },
11834         };
11835         u32 misc_ctrl_reg;
11836         u32 cacheline_sz_reg;
11837         u32 pci_state_reg, grc_misc_cfg;
11838         u32 val;
11839         u16 pci_cmd;
11840         int err, pcie_cap;
11841
11842         /* Force memory write invalidate off.  If we leave it on,
11843          * then on 5700_BX chips we have to enable a workaround.
11844          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11845          * to match the cacheline size.  The Broadcom driver have this
11846          * workaround but turns MWI off all the times so never uses
11847          * it.  This seems to suggest that the workaround is insufficient.
11848          */
11849         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11850         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11851         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11852
11853         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11854          * has the register indirect write enable bit set before
11855          * we try to access any of the MMIO registers.  It is also
11856          * critical that the PCI-X hw workaround situation is decided
11857          * before that as well.
11858          */
11859         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11860                               &misc_ctrl_reg);
11861
11862         tp->pci_chip_rev_id = (misc_ctrl_reg >>
11863                                MISC_HOST_CTRL_CHIPREV_SHIFT);
11864         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11865                 u32 prod_id_asic_rev;
11866
11867                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11868                                       &prod_id_asic_rev);
11869                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11870         }
11871
11872         /* Wrong chip ID in 5752 A0. This code can be removed later
11873          * as A0 is not in production.
11874          */
11875         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11876                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11877
11878         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11879          * we need to disable memory and use config. cycles
11880          * only to access all registers. The 5702/03 chips
11881          * can mistakenly decode the special cycles from the
11882          * ICH chipsets as memory write cycles, causing corruption
11883          * of register and memory space. Only certain ICH bridges
11884          * will drive special cycles with non-zero data during the
11885          * address phase which can fall within the 5703's address
11886          * range. This is not an ICH bug as the PCI spec allows
11887          * non-zero address during special cycles. However, only
11888          * these ICH bridges are known to drive non-zero addresses
11889          * during special cycles.
11890          *
11891          * Since special cycles do not cross PCI bridges, we only
11892          * enable this workaround if the 5703 is on the secondary
11893          * bus of these ICH bridges.
11894          */
11895         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11896             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11897                 static struct tg3_dev_id {
11898                         u32     vendor;
11899                         u32     device;
11900                         u32     rev;
11901                 } ich_chipsets[] = {
11902                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11903                           PCI_ANY_ID },
11904                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11905                           PCI_ANY_ID },
11906                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11907                           0xa },
11908                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11909                           PCI_ANY_ID },
11910                         { },
11911                 };
11912                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11913                 struct pci_dev *bridge = NULL;
11914
11915                 while (pci_id->vendor != 0) {
11916                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
11917                                                 bridge);
11918                         if (!bridge) {
11919                                 pci_id++;
11920                                 continue;
11921                         }
11922                         if (pci_id->rev != PCI_ANY_ID) {
11923                                 if (bridge->revision > pci_id->rev)
11924                                         continue;
11925                         }
11926                         if (bridge->subordinate &&
11927                             (bridge->subordinate->number ==
11928                              tp->pdev->bus->number)) {
11929
11930                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11931                                 pci_dev_put(bridge);
11932                                 break;
11933                         }
11934                 }
11935         }
11936
11937         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11938                 static struct tg3_dev_id {
11939                         u32     vendor;
11940                         u32     device;
11941                 } bridge_chipsets[] = {
11942                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11943                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11944                         { },
11945                 };
11946                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11947                 struct pci_dev *bridge = NULL;
11948
11949                 while (pci_id->vendor != 0) {
11950                         bridge = pci_get_device(pci_id->vendor,
11951                                                 pci_id->device,
11952                                                 bridge);
11953                         if (!bridge) {
11954                                 pci_id++;
11955                                 continue;
11956                         }
11957                         if (bridge->subordinate &&
11958                             (bridge->subordinate->number <=
11959                              tp->pdev->bus->number) &&
11960                             (bridge->subordinate->subordinate >=
11961                              tp->pdev->bus->number)) {
11962                                 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11963                                 pci_dev_put(bridge);
11964                                 break;
11965                         }
11966                 }
11967         }
11968
11969         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11970          * DMA addresses > 40-bit. This bridge may have other additional
11971          * 57xx devices behind it in some 4-port NIC designs for example.
11972          * Any tg3 device found behind the bridge will also need the 40-bit
11973          * DMA workaround.
11974          */
11975         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11976             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11977                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11978                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11979                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11980         }
11981         else {
11982                 struct pci_dev *bridge = NULL;
11983
11984                 do {
11985                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11986                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
11987                                                 bridge);
11988                         if (bridge && bridge->subordinate &&
11989                             (bridge->subordinate->number <=
11990                              tp->pdev->bus->number) &&
11991                             (bridge->subordinate->subordinate >=
11992                              tp->pdev->bus->number)) {
11993                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11994                                 pci_dev_put(bridge);
11995                                 break;
11996                         }
11997                 } while (bridge);
11998         }
11999
12000         /* Initialize misc host control in PCI block. */
12001         tp->misc_host_ctrl |= (misc_ctrl_reg &
12002                                MISC_HOST_CTRL_CHIPREV);
12003         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12004                                tp->misc_host_ctrl);
12005
12006         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12007                               &cacheline_sz_reg);
12008
12009         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
12010         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
12011         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
12012         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
12013
12014         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12015             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12016                 tp->pdev_peer = tg3_find_peer(tp);
12017
12018         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12019             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12020             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12021             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12022             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12023             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12024             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12025             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12026             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12027                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12028
12029         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12030             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12031                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12032
12033         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12034                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12035                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12036                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12037                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12038                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12039                      tp->pdev_peer == tp->pdev))
12040                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12041
12042                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12043                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12044                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12045                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12046                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12047                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12048                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12049                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12050                 } else {
12051                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12052                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12053                                 ASIC_REV_5750 &&
12054                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12055                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12056                 }
12057         }
12058
12059         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12060              (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12061                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12062
12063         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12064         if (pcie_cap != 0) {
12065                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12066
12067                 pcie_set_readrq(tp->pdev, 4096);
12068
12069                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12070                         u16 lnkctl;
12071
12072                         pci_read_config_word(tp->pdev,
12073                                              pcie_cap + PCI_EXP_LNKCTL,
12074                                              &lnkctl);
12075                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
12076                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12077                 }
12078         }
12079
12080         /* If we have an AMD 762 or VIA K8T800 chipset, write
12081          * reordering to the mailbox registers done by the host
12082          * controller can cause major troubles.  We read back from
12083          * every mailbox register write to force the writes to be
12084          * posted to the chip in order.
12085          */
12086         if (pci_dev_present(write_reorder_chipsets) &&
12087             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12088                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12089
12090         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12091             tp->pci_lat_timer < 64) {
12092                 tp->pci_lat_timer = 64;
12093
12094                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
12095                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
12096                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
12097                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
12098
12099                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12100                                        cacheline_sz_reg);
12101         }
12102
12103         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12104             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12105                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12106                 if (!tp->pcix_cap) {
12107                         printk(KERN_ERR PFX "Cannot find PCI-X "
12108                                             "capability, aborting.\n");
12109                         return -EIO;
12110                 }
12111         }
12112
12113         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12114                               &pci_state_reg);
12115
12116         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
12117                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12118
12119                 /* If this is a 5700 BX chipset, and we are in PCI-X
12120                  * mode, enable register write workaround.
12121                  *
12122                  * The workaround is to use indirect register accesses
12123                  * for all chip writes not to mailbox registers.
12124                  */
12125                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12126                         u32 pm_reg;
12127
12128                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12129
12130                         /* The chip can have it's power management PCI config
12131                          * space registers clobbered due to this bug.
12132                          * So explicitly force the chip into D0 here.
12133                          */
12134                         pci_read_config_dword(tp->pdev,
12135                                               tp->pm_cap + PCI_PM_CTRL,
12136                                               &pm_reg);
12137                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12138                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12139                         pci_write_config_dword(tp->pdev,
12140                                                tp->pm_cap + PCI_PM_CTRL,
12141                                                pm_reg);
12142
12143                         /* Also, force SERR#/PERR# in PCI command. */
12144                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12145                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12146                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12147                 }
12148         }
12149
12150         /* 5700 BX chips need to have their TX producer index mailboxes
12151          * written twice to workaround a bug.
12152          */
12153         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
12154                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12155
12156         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12157                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12158         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12159                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12160
12161         /* Chip-specific fixup from Broadcom driver */
12162         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12163             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12164                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12165                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12166         }
12167
12168         /* Default fast path register access methods */
12169         tp->read32 = tg3_read32;
12170         tp->write32 = tg3_write32;
12171         tp->read32_mbox = tg3_read32;
12172         tp->write32_mbox = tg3_write32;
12173         tp->write32_tx_mbox = tg3_write32;
12174         tp->write32_rx_mbox = tg3_write32;
12175
12176         /* Various workaround register access methods */
12177         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12178                 tp->write32 = tg3_write_indirect_reg32;
12179         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12180                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12181                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12182                 /*
12183                  * Back to back register writes can cause problems on these
12184                  * chips, the workaround is to read back all reg writes
12185                  * except those to mailbox regs.
12186                  *
12187                  * See tg3_write_indirect_reg32().
12188                  */
12189                 tp->write32 = tg3_write_flush_reg32;
12190         }
12191
12192
12193         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12194             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12195                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12196                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12197                         tp->write32_rx_mbox = tg3_write_flush_reg32;
12198         }
12199
12200         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12201                 tp->read32 = tg3_read_indirect_reg32;
12202                 tp->write32 = tg3_write_indirect_reg32;
12203                 tp->read32_mbox = tg3_read_indirect_mbox;
12204                 tp->write32_mbox = tg3_write_indirect_mbox;
12205                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12206                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12207
12208                 iounmap(tp->regs);
12209                 tp->regs = NULL;
12210
12211                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12212                 pci_cmd &= ~PCI_COMMAND_MEMORY;
12213                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12214         }
12215         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12216                 tp->read32_mbox = tg3_read32_mbox_5906;
12217                 tp->write32_mbox = tg3_write32_mbox_5906;
12218                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12219                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12220         }
12221
12222         if (tp->write32 == tg3_write_indirect_reg32 ||
12223             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12224              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12225               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12226                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12227
12228         /* Get eeprom hw config before calling tg3_set_power_state().
12229          * In particular, the TG3_FLG2_IS_NIC flag must be
12230          * determined before calling tg3_set_power_state() so that
12231          * we know whether or not to switch out of Vaux power.
12232          * When the flag is set, it means that GPIO1 is used for eeprom
12233          * write protect and also implies that it is a LOM where GPIOs
12234          * are not used to switch power.
12235          */
12236         tg3_get_eeprom_hw_cfg(tp);
12237
12238         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12239                 /* Allow reads and writes to the
12240                  * APE register and memory space.
12241                  */
12242                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12243                                  PCISTATE_ALLOW_APE_SHMEM_WR;
12244                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12245                                        pci_state_reg);
12246         }
12247
12248         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12249             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12250             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
12251                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12252
12253                 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
12254                     tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
12255                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
12256                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
12257                         tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
12258         }
12259
12260         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12261          * GPIO1 driven high will bring 5700's external PHY out of reset.
12262          * It is also used as eeprom write protect on LOMs.
12263          */
12264         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12265         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12266             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12267                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12268                                        GRC_LCLCTRL_GPIO_OUTPUT1);
12269         /* Unused GPIO3 must be driven as output on 5752 because there
12270          * are no pull-up resistors on unused GPIO pins.
12271          */
12272         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12273                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12274
12275         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12276                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12277
12278         /* Force the chip into D0. */
12279         err = tg3_set_power_state(tp, PCI_D0);
12280         if (err) {
12281                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12282                        pci_name(tp->pdev));
12283                 return err;
12284         }
12285
12286         /* 5700 B0 chips do not support checksumming correctly due
12287          * to hardware bugs.
12288          */
12289         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12290                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12291
12292         /* Derive initial jumbo mode from MTU assigned in
12293          * ether_setup() via the alloc_etherdev() call
12294          */
12295         if (tp->dev->mtu > ETH_DATA_LEN &&
12296             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12297                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12298
12299         /* Determine WakeOnLan speed to use. */
12300         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12301             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12302             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12303             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12304                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12305         } else {
12306                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12307         }
12308
12309         /* A few boards don't want Ethernet@WireSpeed phy feature */
12310         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12311             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12312              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12313              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12314             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
12315             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12316                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12317
12318         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12319             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12320                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12321         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12322                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12323
12324         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12325                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12326                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12327                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12328                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12329                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12330                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12331                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12332                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12333                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12334                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12335                            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
12336                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12337         }
12338
12339         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12340             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12341                 tp->phy_otp = tg3_read_otp_phycfg(tp);
12342                 if (tp->phy_otp == 0)
12343                         tp->phy_otp = TG3_OTP_DEFAULT;
12344         }
12345
12346         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12347                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12348         else
12349                 tp->mi_mode = MAC_MI_MODE_BASE;
12350
12351         tp->coalesce_mode = 0;
12352         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12353             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12354                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12355
12356         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12357                 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12358
12359         err = tg3_mdio_init(tp);
12360         if (err)
12361                 return err;
12362
12363         /* Initialize data/descriptor byte/word swapping. */
12364         val = tr32(GRC_MODE);
12365         val &= GRC_MODE_HOST_STACKUP;
12366         tw32(GRC_MODE, val | tp->grc_mode);
12367
12368         tg3_switch_clocks(tp);
12369
12370         /* Clear this out for sanity. */
12371         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12372
12373         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12374                               &pci_state_reg);
12375         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12376             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12377                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12378
12379                 if (chiprevid == CHIPREV_ID_5701_A0 ||
12380                     chiprevid == CHIPREV_ID_5701_B0 ||
12381                     chiprevid == CHIPREV_ID_5701_B2 ||
12382                     chiprevid == CHIPREV_ID_5701_B5) {
12383                         void __iomem *sram_base;
12384
12385                         /* Write some dummy words into the SRAM status block
12386                          * area, see if it reads back correctly.  If the return
12387                          * value is bad, force enable the PCIX workaround.
12388                          */
12389                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12390
12391                         writel(0x00000000, sram_base);
12392                         writel(0x00000000, sram_base + 4);
12393                         writel(0xffffffff, sram_base + 4);
12394                         if (readl(sram_base) != 0x00000000)
12395                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12396                 }
12397         }
12398
12399         udelay(50);
12400         tg3_nvram_init(tp);
12401
12402         grc_misc_cfg = tr32(GRC_MISC_CFG);
12403         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12404
12405         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12406             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12407              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12408                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12409
12410         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12411             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12412                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12413         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12414                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12415                                       HOSTCC_MODE_CLRTICK_TXBD);
12416
12417                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12418                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12419                                        tp->misc_host_ctrl);
12420         }
12421
12422         /* these are limited to 10/100 only */
12423         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12424              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12425             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12426              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12427              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12428               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12429               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12430             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12431              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12432               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12433               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12434             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12435                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12436
12437         err = tg3_phy_probe(tp);
12438         if (err) {
12439                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12440                        pci_name(tp->pdev), err);
12441                 /* ... but do not return immediately ... */
12442                 tg3_mdio_fini(tp);
12443         }
12444
12445         tg3_read_partno(tp);
12446         tg3_read_fw_ver(tp);
12447
12448         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12449                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12450         } else {
12451                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12452                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12453                 else
12454                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12455         }
12456
12457         /* 5700 {AX,BX} chips have a broken status block link
12458          * change bit implementation, so we must use the
12459          * status register in those cases.
12460          */
12461         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12462                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12463         else
12464                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12465
12466         /* The led_ctrl is set during tg3_phy_probe, here we might
12467          * have to force the link status polling mechanism based
12468          * upon subsystem IDs.
12469          */
12470         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12471             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12472             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12473                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12474                                   TG3_FLAG_USE_LINKCHG_REG);
12475         }
12476
12477         /* For all SERDES we poll the MAC status register. */
12478         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12479                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12480         else
12481                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12482
12483         /* All chips before 5787 can get confused if TX buffers
12484          * straddle the 4GB address boundary in some cases.
12485          */
12486         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12487             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12488             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12489             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12490             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12491             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12492                 tp->dev->hard_start_xmit = tg3_start_xmit;
12493         else
12494                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
12495
12496         tp->rx_offset = 2;
12497         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12498             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12499                 tp->rx_offset = 0;
12500
12501         tp->rx_std_max_post = TG3_RX_RING_SIZE;
12502
12503         /* Increment the rx prod index on the rx std ring by at most
12504          * 8 for these chips to workaround hw errata.
12505          */
12506         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12507             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12508             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12509                 tp->rx_std_max_post = 8;
12510
12511         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12512                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12513                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
12514
12515         return err;
12516 }
12517
12518 #ifdef CONFIG_SPARC
12519 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12520 {
12521         struct net_device *dev = tp->dev;
12522         struct pci_dev *pdev = tp->pdev;
12523         struct device_node *dp = pci_device_to_OF_node(pdev);
12524         const unsigned char *addr;
12525         int len;
12526
12527         addr = of_get_property(dp, "local-mac-address", &len);
12528         if (addr && len == 6) {
12529                 memcpy(dev->dev_addr, addr, 6);
12530                 memcpy(dev->perm_addr, dev->dev_addr, 6);
12531                 return 0;
12532         }
12533         return -ENODEV;
12534 }
12535
12536 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12537 {
12538         struct net_device *dev = tp->dev;
12539
12540         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12541         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12542         return 0;
12543 }
12544 #endif
12545
12546 static int __devinit tg3_get_device_address(struct tg3 *tp)
12547 {
12548         struct net_device *dev = tp->dev;
12549         u32 hi, lo, mac_offset;
12550         int addr_ok = 0;
12551
12552 #ifdef CONFIG_SPARC
12553         if (!tg3_get_macaddr_sparc(tp))
12554                 return 0;
12555 #endif
12556
12557         mac_offset = 0x7c;
12558         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12559             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12560                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12561                         mac_offset = 0xcc;
12562                 if (tg3_nvram_lock(tp))
12563                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12564                 else
12565                         tg3_nvram_unlock(tp);
12566         }
12567         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12568                 mac_offset = 0x10;
12569
12570         /* First try to get it from MAC address mailbox. */
12571         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12572         if ((hi >> 16) == 0x484b) {
12573                 dev->dev_addr[0] = (hi >>  8) & 0xff;
12574                 dev->dev_addr[1] = (hi >>  0) & 0xff;
12575
12576                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12577                 dev->dev_addr[2] = (lo >> 24) & 0xff;
12578                 dev->dev_addr[3] = (lo >> 16) & 0xff;
12579                 dev->dev_addr[4] = (lo >>  8) & 0xff;
12580                 dev->dev_addr[5] = (lo >>  0) & 0xff;
12581
12582                 /* Some old bootcode may report a 0 MAC address in SRAM */
12583                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12584         }
12585         if (!addr_ok) {
12586                 /* Next, try NVRAM. */
12587                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12588                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12589                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
12590                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
12591                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
12592                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
12593                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
12594                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
12595                 }
12596                 /* Finally just fetch it out of the MAC control regs. */
12597                 else {
12598                         hi = tr32(MAC_ADDR_0_HIGH);
12599                         lo = tr32(MAC_ADDR_0_LOW);
12600
12601                         dev->dev_addr[5] = lo & 0xff;
12602                         dev->dev_addr[4] = (lo >> 8) & 0xff;
12603                         dev->dev_addr[3] = (lo >> 16) & 0xff;
12604                         dev->dev_addr[2] = (lo >> 24) & 0xff;
12605                         dev->dev_addr[1] = hi & 0xff;
12606                         dev->dev_addr[0] = (hi >> 8) & 0xff;
12607                 }
12608         }
12609
12610         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12611 #ifdef CONFIG_SPARC
12612                 if (!tg3_get_default_macaddr_sparc(tp))
12613                         return 0;
12614 #endif
12615                 return -EINVAL;
12616         }
12617         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12618         return 0;
12619 }
12620
12621 #define BOUNDARY_SINGLE_CACHELINE       1
12622 #define BOUNDARY_MULTI_CACHELINE        2
12623
12624 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12625 {
12626         int cacheline_size;
12627         u8 byte;
12628         int goal;
12629
12630         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12631         if (byte == 0)
12632                 cacheline_size = 1024;
12633         else
12634                 cacheline_size = (int) byte * 4;
12635
12636         /* On 5703 and later chips, the boundary bits have no
12637          * effect.
12638          */
12639         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12640             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12641             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12642                 goto out;
12643
12644 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12645         goal = BOUNDARY_MULTI_CACHELINE;
12646 #else
12647 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12648         goal = BOUNDARY_SINGLE_CACHELINE;
12649 #else
12650         goal = 0;
12651 #endif
12652 #endif
12653
12654         if (!goal)
12655                 goto out;
12656
12657         /* PCI controllers on most RISC systems tend to disconnect
12658          * when a device tries to burst across a cache-line boundary.
12659          * Therefore, letting tg3 do so just wastes PCI bandwidth.
12660          *
12661          * Unfortunately, for PCI-E there are only limited
12662          * write-side controls for this, and thus for reads
12663          * we will still get the disconnects.  We'll also waste
12664          * these PCI cycles for both read and write for chips
12665          * other than 5700 and 5701 which do not implement the
12666          * boundary bits.
12667          */
12668         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12669             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12670                 switch (cacheline_size) {
12671                 case 16:
12672                 case 32:
12673                 case 64:
12674                 case 128:
12675                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12676                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12677                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12678                         } else {
12679                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12680                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12681                         }
12682                         break;
12683
12684                 case 256:
12685                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12686                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12687                         break;
12688
12689                 default:
12690                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12691                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12692                         break;
12693                 }
12694         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12695                 switch (cacheline_size) {
12696                 case 16:
12697                 case 32:
12698                 case 64:
12699                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12700                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12701                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12702                                 break;
12703                         }
12704                         /* fallthrough */
12705                 case 128:
12706                 default:
12707                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12708                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12709                         break;
12710                 }
12711         } else {
12712                 switch (cacheline_size) {
12713                 case 16:
12714                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12715                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12716                                         DMA_RWCTRL_WRITE_BNDRY_16);
12717                                 break;
12718                         }
12719                         /* fallthrough */
12720                 case 32:
12721                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12722                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12723                                         DMA_RWCTRL_WRITE_BNDRY_32);
12724                                 break;
12725                         }
12726                         /* fallthrough */
12727                 case 64:
12728                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12729                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12730                                         DMA_RWCTRL_WRITE_BNDRY_64);
12731                                 break;
12732                         }
12733                         /* fallthrough */
12734                 case 128:
12735                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12736                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12737                                         DMA_RWCTRL_WRITE_BNDRY_128);
12738                                 break;
12739                         }
12740                         /* fallthrough */
12741                 case 256:
12742                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
12743                                 DMA_RWCTRL_WRITE_BNDRY_256);
12744                         break;
12745                 case 512:
12746                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
12747                                 DMA_RWCTRL_WRITE_BNDRY_512);
12748                         break;
12749                 case 1024:
12750                 default:
12751                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12752                                 DMA_RWCTRL_WRITE_BNDRY_1024);
12753                         break;
12754                 }
12755         }
12756
12757 out:
12758         return val;
12759 }
12760
12761 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12762 {
12763         struct tg3_internal_buffer_desc test_desc;
12764         u32 sram_dma_descs;
12765         int i, ret;
12766
12767         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12768
12769         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12770         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12771         tw32(RDMAC_STATUS, 0);
12772         tw32(WDMAC_STATUS, 0);
12773
12774         tw32(BUFMGR_MODE, 0);
12775         tw32(FTQ_RESET, 0);
12776
12777         test_desc.addr_hi = ((u64) buf_dma) >> 32;
12778         test_desc.addr_lo = buf_dma & 0xffffffff;
12779         test_desc.nic_mbuf = 0x00002100;
12780         test_desc.len = size;
12781
12782         /*
12783          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12784          * the *second* time the tg3 driver was getting loaded after an
12785          * initial scan.
12786          *
12787          * Broadcom tells me:
12788          *   ...the DMA engine is connected to the GRC block and a DMA
12789          *   reset may affect the GRC block in some unpredictable way...
12790          *   The behavior of resets to individual blocks has not been tested.
12791          *
12792          * Broadcom noted the GRC reset will also reset all sub-components.
12793          */
12794         if (to_device) {
12795                 test_desc.cqid_sqid = (13 << 8) | 2;
12796
12797                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12798                 udelay(40);
12799         } else {
12800                 test_desc.cqid_sqid = (16 << 8) | 7;
12801
12802                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12803                 udelay(40);
12804         }
12805         test_desc.flags = 0x00000005;
12806
12807         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12808                 u32 val;
12809
12810                 val = *(((u32 *)&test_desc) + i);
12811                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12812                                        sram_dma_descs + (i * sizeof(u32)));
12813                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12814         }
12815         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12816
12817         if (to_device) {
12818                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12819         } else {
12820                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12821         }
12822
12823         ret = -ENODEV;
12824         for (i = 0; i < 40; i++) {
12825                 u32 val;
12826
12827                 if (to_device)
12828                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12829                 else
12830                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12831                 if ((val & 0xffff) == sram_dma_descs) {
12832                         ret = 0;
12833                         break;
12834                 }
12835
12836                 udelay(100);
12837         }
12838
12839         return ret;
12840 }
12841
12842 #define TEST_BUFFER_SIZE        0x2000
12843
12844 static int __devinit tg3_test_dma(struct tg3 *tp)
12845 {
12846         dma_addr_t buf_dma;
12847         u32 *buf, saved_dma_rwctrl;
12848         int ret;
12849
12850         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12851         if (!buf) {
12852                 ret = -ENOMEM;
12853                 goto out_nofree;
12854         }
12855
12856         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12857                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12858
12859         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12860
12861         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12862                 /* DMA read watermark not used on PCIE */
12863                 tp->dma_rwctrl |= 0x00180000;
12864         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12865                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12866                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12867                         tp->dma_rwctrl |= 0x003f0000;
12868                 else
12869                         tp->dma_rwctrl |= 0x003f000f;
12870         } else {
12871                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12872                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12873                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12874                         u32 read_water = 0x7;
12875
12876                         /* If the 5704 is behind the EPB bridge, we can
12877                          * do the less restrictive ONE_DMA workaround for
12878                          * better performance.
12879                          */
12880                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12881                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12882                                 tp->dma_rwctrl |= 0x8000;
12883                         else if (ccval == 0x6 || ccval == 0x7)
12884                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12885
12886                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12887                                 read_water = 4;
12888                         /* Set bit 23 to enable PCIX hw bug fix */
12889                         tp->dma_rwctrl |=
12890                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12891                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12892                                 (1 << 23);
12893                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12894                         /* 5780 always in PCIX mode */
12895                         tp->dma_rwctrl |= 0x00144000;
12896                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12897                         /* 5714 always in PCIX mode */
12898                         tp->dma_rwctrl |= 0x00148000;
12899                 } else {
12900                         tp->dma_rwctrl |= 0x001b000f;
12901                 }
12902         }
12903
12904         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12905             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12906                 tp->dma_rwctrl &= 0xfffffff0;
12907
12908         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12909             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12910                 /* Remove this if it causes problems for some boards. */
12911                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12912
12913                 /* On 5700/5701 chips, we need to set this bit.
12914                  * Otherwise the chip will issue cacheline transactions
12915                  * to streamable DMA memory with not all the byte
12916                  * enables turned on.  This is an error on several
12917                  * RISC PCI controllers, in particular sparc64.
12918                  *
12919                  * On 5703/5704 chips, this bit has been reassigned
12920                  * a different meaning.  In particular, it is used
12921                  * on those chips to enable a PCI-X workaround.
12922                  */
12923                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12924         }
12925
12926         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12927
12928 #if 0
12929         /* Unneeded, already done by tg3_get_invariants.  */
12930         tg3_switch_clocks(tp);
12931 #endif
12932
12933         ret = 0;
12934         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12935             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12936                 goto out;
12937
12938         /* It is best to perform DMA test with maximum write burst size
12939          * to expose the 5700/5701 write DMA bug.
12940          */
12941         saved_dma_rwctrl = tp->dma_rwctrl;
12942         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12943         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12944
12945         while (1) {
12946                 u32 *p = buf, i;
12947
12948                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12949                         p[i] = i;
12950
12951                 /* Send the buffer to the chip. */
12952                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12953                 if (ret) {
12954                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12955                         break;
12956                 }
12957
12958 #if 0
12959                 /* validate data reached card RAM correctly. */
12960                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12961                         u32 val;
12962                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
12963                         if (le32_to_cpu(val) != p[i]) {
12964                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
12965                                 /* ret = -ENODEV here? */
12966                         }
12967                         p[i] = 0;
12968                 }
12969 #endif
12970                 /* Now read it back. */
12971                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12972                 if (ret) {
12973                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12974
12975                         break;
12976                 }
12977
12978                 /* Verify it. */
12979                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12980                         if (p[i] == i)
12981                                 continue;
12982
12983                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12984                             DMA_RWCTRL_WRITE_BNDRY_16) {
12985                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12986                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12987                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12988                                 break;
12989                         } else {
12990                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12991                                 ret = -ENODEV;
12992                                 goto out;
12993                         }
12994                 }
12995
12996                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12997                         /* Success. */
12998                         ret = 0;
12999                         break;
13000                 }
13001         }
13002         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13003             DMA_RWCTRL_WRITE_BNDRY_16) {
13004                 static struct pci_device_id dma_wait_state_chipsets[] = {
13005                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13006                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13007                         { },
13008                 };
13009
13010                 /* DMA test passed without adjusting DMA boundary,
13011                  * now look for chipsets that are known to expose the
13012                  * DMA bug without failing the test.
13013                  */
13014                 if (pci_dev_present(dma_wait_state_chipsets)) {
13015                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13016                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13017                 }
13018                 else
13019                         /* Safe to use the calculated DMA boundary. */
13020                         tp->dma_rwctrl = saved_dma_rwctrl;
13021
13022                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13023         }
13024
13025 out:
13026         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13027 out_nofree:
13028         return ret;
13029 }
13030
13031 static void __devinit tg3_init_link_config(struct tg3 *tp)
13032 {
13033         tp->link_config.advertising =
13034                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13035                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13036                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13037                  ADVERTISED_Autoneg | ADVERTISED_MII);
13038         tp->link_config.speed = SPEED_INVALID;
13039         tp->link_config.duplex = DUPLEX_INVALID;
13040         tp->link_config.autoneg = AUTONEG_ENABLE;
13041         tp->link_config.active_speed = SPEED_INVALID;
13042         tp->link_config.active_duplex = DUPLEX_INVALID;
13043         tp->link_config.phy_is_low_power = 0;
13044         tp->link_config.orig_speed = SPEED_INVALID;
13045         tp->link_config.orig_duplex = DUPLEX_INVALID;
13046         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13047 }
13048
13049 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13050 {
13051         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13052                 tp->bufmgr_config.mbuf_read_dma_low_water =
13053                         DEFAULT_MB_RDMA_LOW_WATER_5705;
13054                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13055                         DEFAULT_MB_MACRX_LOW_WATER_5705;
13056                 tp->bufmgr_config.mbuf_high_water =
13057                         DEFAULT_MB_HIGH_WATER_5705;
13058                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13059                         tp->bufmgr_config.mbuf_mac_rx_low_water =
13060                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
13061                         tp->bufmgr_config.mbuf_high_water =
13062                                 DEFAULT_MB_HIGH_WATER_5906;
13063                 }
13064
13065                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13066                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13067                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13068                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13069                 tp->bufmgr_config.mbuf_high_water_jumbo =
13070                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13071         } else {
13072                 tp->bufmgr_config.mbuf_read_dma_low_water =
13073                         DEFAULT_MB_RDMA_LOW_WATER;
13074                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13075                         DEFAULT_MB_MACRX_LOW_WATER;
13076                 tp->bufmgr_config.mbuf_high_water =
13077                         DEFAULT_MB_HIGH_WATER;
13078
13079                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13080                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13081                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13082                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13083                 tp->bufmgr_config.mbuf_high_water_jumbo =
13084                         DEFAULT_MB_HIGH_WATER_JUMBO;
13085         }
13086
13087         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13088         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13089 }
13090
13091 static char * __devinit tg3_phy_string(struct tg3 *tp)
13092 {
13093         switch (tp->phy_id & PHY_ID_MASK) {
13094         case PHY_ID_BCM5400:    return "5400";
13095         case PHY_ID_BCM5401:    return "5401";
13096         case PHY_ID_BCM5411:    return "5411";
13097         case PHY_ID_BCM5701:    return "5701";
13098         case PHY_ID_BCM5703:    return "5703";
13099         case PHY_ID_BCM5704:    return "5704";
13100         case PHY_ID_BCM5705:    return "5705";
13101         case PHY_ID_BCM5750:    return "5750";
13102         case PHY_ID_BCM5752:    return "5752";
13103         case PHY_ID_BCM5714:    return "5714";
13104         case PHY_ID_BCM5780:    return "5780";
13105         case PHY_ID_BCM5755:    return "5755";
13106         case PHY_ID_BCM5787:    return "5787";
13107         case PHY_ID_BCM5784:    return "5784";
13108         case PHY_ID_BCM5756:    return "5722/5756";
13109         case PHY_ID_BCM5906:    return "5906";
13110         case PHY_ID_BCM5761:    return "5761";
13111         case PHY_ID_BCM8002:    return "8002/serdes";
13112         case 0:                 return "serdes";
13113         default:                return "unknown";
13114         }
13115 }
13116
13117 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13118 {
13119         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13120                 strcpy(str, "PCI Express");
13121                 return str;
13122         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13123                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13124
13125                 strcpy(str, "PCIX:");
13126
13127                 if ((clock_ctrl == 7) ||
13128                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13129                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13130                         strcat(str, "133MHz");
13131                 else if (clock_ctrl == 0)
13132                         strcat(str, "33MHz");
13133                 else if (clock_ctrl == 2)
13134                         strcat(str, "50MHz");
13135                 else if (clock_ctrl == 4)
13136                         strcat(str, "66MHz");
13137                 else if (clock_ctrl == 6)
13138                         strcat(str, "100MHz");
13139         } else {
13140                 strcpy(str, "PCI:");
13141                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13142                         strcat(str, "66MHz");
13143                 else
13144                         strcat(str, "33MHz");
13145         }
13146         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13147                 strcat(str, ":32-bit");
13148         else
13149                 strcat(str, ":64-bit");
13150         return str;
13151 }
13152
13153 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13154 {
13155         struct pci_dev *peer;
13156         unsigned int func, devnr = tp->pdev->devfn & ~7;
13157
13158         for (func = 0; func < 8; func++) {
13159                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13160                 if (peer && peer != tp->pdev)
13161                         break;
13162                 pci_dev_put(peer);
13163         }
13164         /* 5704 can be configured in single-port mode, set peer to
13165          * tp->pdev in that case.
13166          */
13167         if (!peer) {
13168                 peer = tp->pdev;
13169                 return peer;
13170         }
13171
13172         /*
13173          * We don't need to keep the refcount elevated; there's no way
13174          * to remove one half of this device without removing the other
13175          */
13176         pci_dev_put(peer);
13177
13178         return peer;
13179 }
13180
13181 static void __devinit tg3_init_coal(struct tg3 *tp)
13182 {
13183         struct ethtool_coalesce *ec = &tp->coal;
13184
13185         memset(ec, 0, sizeof(*ec));
13186         ec->cmd = ETHTOOL_GCOALESCE;
13187         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13188         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13189         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13190         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13191         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13192         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13193         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13194         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13195         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13196
13197         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13198                                  HOSTCC_MODE_CLRTICK_TXBD)) {
13199                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13200                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13201                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13202                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13203         }
13204
13205         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13206                 ec->rx_coalesce_usecs_irq = 0;
13207                 ec->tx_coalesce_usecs_irq = 0;
13208                 ec->stats_block_coalesce_usecs = 0;
13209         }
13210 }
13211
13212 static int __devinit tg3_init_one(struct pci_dev *pdev,
13213                                   const struct pci_device_id *ent)
13214 {
13215         static int tg3_version_printed = 0;
13216         resource_size_t tg3reg_base;
13217         unsigned long tg3reg_len;
13218         struct net_device *dev;
13219         struct tg3 *tp;
13220         int err, pm_cap;
13221         char str[40];
13222         u64 dma_mask, persist_dma_mask;
13223         DECLARE_MAC_BUF(mac);
13224
13225         if (tg3_version_printed++ == 0)
13226                 printk(KERN_INFO "%s", version);
13227
13228         err = pci_enable_device(pdev);
13229         if (err) {
13230                 printk(KERN_ERR PFX "Cannot enable PCI device, "
13231                        "aborting.\n");
13232                 return err;
13233         }
13234
13235         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
13236                 printk(KERN_ERR PFX "Cannot find proper PCI device "
13237                        "base address, aborting.\n");
13238                 err = -ENODEV;
13239                 goto err_out_disable_pdev;
13240         }
13241
13242         err = pci_request_regions(pdev, DRV_MODULE_NAME);
13243         if (err) {
13244                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13245                        "aborting.\n");
13246                 goto err_out_disable_pdev;
13247         }
13248
13249         pci_set_master(pdev);
13250
13251         /* Find power-management capability. */
13252         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13253         if (pm_cap == 0) {
13254                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13255                        "aborting.\n");
13256                 err = -EIO;
13257                 goto err_out_free_res;
13258         }
13259
13260         tg3reg_base = pci_resource_start(pdev, 0);
13261         tg3reg_len = pci_resource_len(pdev, 0);
13262
13263         dev = alloc_etherdev(sizeof(*tp));
13264         if (!dev) {
13265                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13266                 err = -ENOMEM;
13267                 goto err_out_free_res;
13268         }
13269
13270         SET_NETDEV_DEV(dev, &pdev->dev);
13271
13272 #if TG3_VLAN_TAG_USED
13273         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13274         dev->vlan_rx_register = tg3_vlan_rx_register;
13275 #endif
13276
13277         tp = netdev_priv(dev);
13278         tp->pdev = pdev;
13279         tp->dev = dev;
13280         tp->pm_cap = pm_cap;
13281         tp->mac_mode = TG3_DEF_MAC_MODE;
13282         tp->rx_mode = TG3_DEF_RX_MODE;
13283         tp->tx_mode = TG3_DEF_TX_MODE;
13284
13285         if (tg3_debug > 0)
13286                 tp->msg_enable = tg3_debug;
13287         else
13288                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13289
13290         /* The word/byte swap controls here control register access byte
13291          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
13292          * setting below.
13293          */
13294         tp->misc_host_ctrl =
13295                 MISC_HOST_CTRL_MASK_PCI_INT |
13296                 MISC_HOST_CTRL_WORD_SWAP |
13297                 MISC_HOST_CTRL_INDIR_ACCESS |
13298                 MISC_HOST_CTRL_PCISTATE_RW;
13299
13300         /* The NONFRM (non-frame) byte/word swap controls take effect
13301          * on descriptor entries, anything which isn't packet data.
13302          *
13303          * The StrongARM chips on the board (one for tx, one for rx)
13304          * are running in big-endian mode.
13305          */
13306         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13307                         GRC_MODE_WSWAP_NONFRM_DATA);
13308 #ifdef __BIG_ENDIAN
13309         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13310 #endif
13311         spin_lock_init(&tp->lock);
13312         spin_lock_init(&tp->indirect_lock);
13313         INIT_WORK(&tp->reset_task, tg3_reset_task);
13314
13315         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
13316         if (!tp->regs) {
13317                 printk(KERN_ERR PFX "Cannot map device registers, "
13318                        "aborting.\n");
13319                 err = -ENOMEM;
13320                 goto err_out_free_dev;
13321         }
13322
13323         tg3_init_link_config(tp);
13324
13325         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13326         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13327         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13328
13329         dev->open = tg3_open;
13330         dev->stop = tg3_close;
13331         dev->get_stats = tg3_get_stats;
13332         dev->set_multicast_list = tg3_set_rx_mode;
13333         dev->set_mac_address = tg3_set_mac_addr;
13334         dev->do_ioctl = tg3_ioctl;
13335         dev->tx_timeout = tg3_tx_timeout;
13336         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
13337         dev->ethtool_ops = &tg3_ethtool_ops;
13338         dev->watchdog_timeo = TG3_TX_TIMEOUT;
13339         dev->change_mtu = tg3_change_mtu;
13340         dev->irq = pdev->irq;
13341 #ifdef CONFIG_NET_POLL_CONTROLLER
13342         dev->poll_controller = tg3_poll_controller;
13343 #endif
13344
13345         err = tg3_get_invariants(tp);
13346         if (err) {
13347                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13348                        "aborting.\n");
13349                 goto err_out_iounmap;
13350         }
13351
13352         /* The EPB bridge inside 5714, 5715, and 5780 and any
13353          * device behind the EPB cannot support DMA addresses > 40-bit.
13354          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13355          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13356          * do DMA address check in tg3_start_xmit().
13357          */
13358         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13359                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13360         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13361                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13362 #ifdef CONFIG_HIGHMEM
13363                 dma_mask = DMA_64BIT_MASK;
13364 #endif
13365         } else
13366                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13367
13368         /* Configure DMA attributes. */
13369         if (dma_mask > DMA_32BIT_MASK) {
13370                 err = pci_set_dma_mask(pdev, dma_mask);
13371                 if (!err) {
13372                         dev->features |= NETIF_F_HIGHDMA;
13373                         err = pci_set_consistent_dma_mask(pdev,
13374                                                           persist_dma_mask);
13375                         if (err < 0) {
13376                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13377                                        "DMA for consistent allocations\n");
13378                                 goto err_out_iounmap;
13379                         }
13380                 }
13381         }
13382         if (err || dma_mask == DMA_32BIT_MASK) {
13383                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13384                 if (err) {
13385                         printk(KERN_ERR PFX "No usable DMA configuration, "
13386                                "aborting.\n");
13387                         goto err_out_iounmap;
13388                 }
13389         }
13390
13391         tg3_init_bufmgr_config(tp);
13392
13393         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13394                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13395         }
13396         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13397             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13398             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13399             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13400             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13401                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13402         } else {
13403                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13404         }
13405
13406         /* TSO is on by default on chips that support hardware TSO.
13407          * Firmware TSO on older chips gives lower performance, so it
13408          * is off by default, but can be enabled using ethtool.
13409          */
13410         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13411                 dev->features |= NETIF_F_TSO;
13412                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13413                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
13414                         dev->features |= NETIF_F_TSO6;
13415                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13416                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13417                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13418                         GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13419                         dev->features |= NETIF_F_TSO_ECN;
13420         }
13421
13422
13423         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13424             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13425             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13426                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13427                 tp->rx_pending = 63;
13428         }
13429
13430         err = tg3_get_device_address(tp);
13431         if (err) {
13432                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13433                        "aborting.\n");
13434                 goto err_out_iounmap;
13435         }
13436
13437         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13438                 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
13439                         printk(KERN_ERR PFX "Cannot find proper PCI device "
13440                                "base address for APE, aborting.\n");
13441                         err = -ENODEV;
13442                         goto err_out_iounmap;
13443                 }
13444
13445                 tg3reg_base = pci_resource_start(pdev, 2);
13446                 tg3reg_len = pci_resource_len(pdev, 2);
13447
13448                 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
13449                 if (!tp->aperegs) {
13450                         printk(KERN_ERR PFX "Cannot map APE registers, "
13451                                "aborting.\n");
13452                         err = -ENOMEM;
13453                         goto err_out_iounmap;
13454                 }
13455
13456                 tg3_ape_lock_init(tp);
13457         }
13458
13459         /*
13460          * Reset chip in case UNDI or EFI driver did not shutdown
13461          * DMA self test will enable WDMAC and we'll see (spurious)
13462          * pending DMA on the PCI bus at that point.
13463          */
13464         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13465             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13466                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13467                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13468         }
13469
13470         err = tg3_test_dma(tp);
13471         if (err) {
13472                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13473                 goto err_out_apeunmap;
13474         }
13475
13476         /* Tigon3 can do ipv4 only... and some chips have buggy
13477          * checksumming.
13478          */
13479         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13480                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13481                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13482                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13483                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13484                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13485                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13486                         dev->features |= NETIF_F_IPV6_CSUM;
13487
13488                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13489         } else
13490                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13491
13492         /* flow control autonegotiation is default behavior */
13493         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13494         tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
13495
13496         tg3_init_coal(tp);
13497
13498         pci_set_drvdata(pdev, dev);
13499
13500         err = register_netdev(dev);
13501         if (err) {
13502                 printk(KERN_ERR PFX "Cannot register net device, "
13503                        "aborting.\n");
13504                 goto err_out_apeunmap;
13505         }
13506
13507         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
13508                "(%s) %s Ethernet %s\n",
13509                dev->name,
13510                tp->board_part_number,
13511                tp->pci_chip_rev_id,
13512                tg3_phy_string(tp),
13513                tg3_bus_string(tp, str),
13514                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13515                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13516                  "10/100/1000Base-T")),
13517                print_mac(mac, dev->dev_addr));
13518
13519         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
13520                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
13521                dev->name,
13522                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13523                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13524                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13525                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13526                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
13527                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13528         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13529                dev->name, tp->dma_rwctrl,
13530                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13531                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13532
13533         return 0;
13534
13535 err_out_apeunmap:
13536         if (tp->aperegs) {
13537                 iounmap(tp->aperegs);
13538                 tp->aperegs = NULL;
13539         }
13540
13541 err_out_iounmap:
13542         if (tp->regs) {
13543                 iounmap(tp->regs);
13544                 tp->regs = NULL;
13545         }
13546
13547 err_out_free_dev:
13548         free_netdev(dev);
13549
13550 err_out_free_res:
13551         pci_release_regions(pdev);
13552
13553 err_out_disable_pdev:
13554         pci_disable_device(pdev);
13555         pci_set_drvdata(pdev, NULL);
13556         return err;
13557 }
13558
13559 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13560 {
13561         struct net_device *dev = pci_get_drvdata(pdev);
13562
13563         if (dev) {
13564                 struct tg3 *tp = netdev_priv(dev);
13565
13566                 flush_scheduled_work();
13567
13568                 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13569                         tg3_phy_fini(tp);
13570                         tg3_mdio_fini(tp);
13571                 }
13572
13573                 unregister_netdev(dev);
13574                 if (tp->aperegs) {
13575                         iounmap(tp->aperegs);
13576                         tp->aperegs = NULL;
13577                 }
13578                 if (tp->regs) {
13579                         iounmap(tp->regs);
13580                         tp->regs = NULL;
13581                 }
13582                 free_netdev(dev);
13583                 pci_release_regions(pdev);
13584                 pci_disable_device(pdev);
13585                 pci_set_drvdata(pdev, NULL);
13586         }
13587 }
13588
13589 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13590 {
13591         struct net_device *dev = pci_get_drvdata(pdev);
13592         struct tg3 *tp = netdev_priv(dev);
13593         int err;
13594
13595         /* PCI register 4 needs to be saved whether netif_running() or not.
13596          * MSI address and data need to be saved if using MSI and
13597          * netif_running().
13598          */
13599         pci_save_state(pdev);
13600
13601         if (!netif_running(dev))
13602                 return 0;
13603
13604         flush_scheduled_work();
13605         tg3_phy_stop(tp);
13606         tg3_netif_stop(tp);
13607
13608         del_timer_sync(&tp->timer);
13609
13610         tg3_full_lock(tp, 1);
13611         tg3_disable_ints(tp);
13612         tg3_full_unlock(tp);
13613
13614         netif_device_detach(dev);
13615
13616         tg3_full_lock(tp, 0);
13617         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13618         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13619         tg3_full_unlock(tp);
13620
13621         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
13622         if (err) {
13623                 int err2;
13624
13625                 tg3_full_lock(tp, 0);
13626
13627                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13628                 err2 = tg3_restart_hw(tp, 1);
13629                 if (err2)
13630                         goto out;
13631
13632                 tp->timer.expires = jiffies + tp->timer_offset;
13633                 add_timer(&tp->timer);
13634
13635                 netif_device_attach(dev);
13636                 tg3_netif_start(tp);
13637
13638 out:
13639                 tg3_full_unlock(tp);
13640
13641                 if (!err2)
13642                         tg3_phy_start(tp);
13643         }
13644
13645         return err;
13646 }
13647
13648 static int tg3_resume(struct pci_dev *pdev)
13649 {
13650         struct net_device *dev = pci_get_drvdata(pdev);
13651         struct tg3 *tp = netdev_priv(dev);
13652         int err;
13653
13654         pci_restore_state(tp->pdev);
13655
13656         if (!netif_running(dev))
13657                 return 0;
13658
13659         err = tg3_set_power_state(tp, PCI_D0);
13660         if (err)
13661                 return err;
13662
13663         netif_device_attach(dev);
13664
13665         tg3_full_lock(tp, 0);
13666
13667         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13668         err = tg3_restart_hw(tp, 1);
13669         if (err)
13670                 goto out;
13671
13672         tp->timer.expires = jiffies + tp->timer_offset;
13673         add_timer(&tp->timer);
13674
13675         tg3_netif_start(tp);
13676
13677 out:
13678         tg3_full_unlock(tp);
13679
13680         if (!err)
13681                 tg3_phy_start(tp);
13682
13683         return err;
13684 }
13685
13686 static struct pci_driver tg3_driver = {
13687         .name           = DRV_MODULE_NAME,
13688         .id_table       = tg3_pci_tbl,
13689         .probe          = tg3_init_one,
13690         .remove         = __devexit_p(tg3_remove_one),
13691         .suspend        = tg3_suspend,
13692         .resume         = tg3_resume
13693 };
13694
13695 static int __init tg3_init(void)
13696 {
13697         return pci_register_driver(&tg3_driver);
13698 }
13699
13700 static void __exit tg3_cleanup(void)
13701 {
13702         pci_unregister_driver(&tg3_driver);
13703 }
13704
13705 module_init(tg3_init);
13706 module_exit(tg3_cleanup);