]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/tg3.c
[TG3]: Add basic 5906 support.
[linux-2.6-omap-h63xx.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43
44 #include <asm/system.h>
45 #include <asm/io.h>
46 #include <asm/byteorder.h>
47 #include <asm/uaccess.h>
48
49 #ifdef CONFIG_SPARC64
50 #include <asm/idprom.h>
51 #include <asm/oplib.h>
52 #include <asm/pbm.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #ifdef NETIF_F_TSO
62 #define TG3_TSO_SUPPORT 1
63 #else
64 #define TG3_TSO_SUPPORT 0
65 #endif
66
67 #include "tg3.h"
68
69 #define DRV_MODULE_NAME         "tg3"
70 #define PFX DRV_MODULE_NAME     ": "
71 #define DRV_MODULE_VERSION      "3.65"
72 #define DRV_MODULE_RELDATE      "August 07, 2006"
73
74 #define TG3_DEF_MAC_MODE        0
75 #define TG3_DEF_RX_MODE         0
76 #define TG3_DEF_TX_MODE         0
77 #define TG3_DEF_MSG_ENABLE        \
78         (NETIF_MSG_DRV          | \
79          NETIF_MSG_PROBE        | \
80          NETIF_MSG_LINK         | \
81          NETIF_MSG_TIMER        | \
82          NETIF_MSG_IFDOWN       | \
83          NETIF_MSG_IFUP         | \
84          NETIF_MSG_RX_ERR       | \
85          NETIF_MSG_TX_ERR)
86
87 /* length of time before we decide the hardware is borked,
88  * and dev->tx_timeout() should be called to fix the problem
89  */
90 #define TG3_TX_TIMEOUT                  (5 * HZ)
91
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU                     60
94 #define TG3_MAX_MTU(tp) \
95         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
96
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98  * You can't change the ring sizes, but you can change where you place
99  * them in the NIC onboard memory.
100  */
101 #define TG3_RX_RING_SIZE                512
102 #define TG3_DEF_RX_RING_PENDING         200
103 #define TG3_RX_JUMBO_RING_SIZE          256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
105
106 /* Do not place this n-ring entries value into the tp struct itself,
107  * we really want to expose these constants to GCC so that modulo et
108  * al.  operations are done with shifts and masks instead of with
109  * hw multiply/modulo instructions.  Another solution would be to
110  * replace things like '% foo' with '& (foo - 1)'.
111  */
112 #define TG3_RX_RCB_RING_SIZE(tp)        \
113         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
114
115 #define TG3_TX_RING_SIZE                512
116 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
117
118 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_RING_SIZE)
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121                                  TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123                                    TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
125                                  TG3_TX_RING_SIZE)
126 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
127
128 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
129 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
130
131 /* minimum number of free TX descriptors required to wake up TX process */
132 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
133
134 /* number of ETHTOOL_GSTATS u64's */
135 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
136
137 #define TG3_NUM_TEST            6
138
139 static char version[] __devinitdata =
140         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
141
142 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
143 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
144 MODULE_LICENSE("GPL");
145 MODULE_VERSION(DRV_MODULE_VERSION);
146
147 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
148 module_param(tg3_debug, int, 0);
149 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
150
151 static struct pci_device_id tg3_pci_tbl[] = {
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
204         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
205         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
206         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
207         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
210         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
211         {}
212 };
213
214 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
215
216 static const struct {
217         const char string[ETH_GSTRING_LEN];
218 } ethtool_stats_keys[TG3_NUM_STATS] = {
219         { "rx_octets" },
220         { "rx_fragments" },
221         { "rx_ucast_packets" },
222         { "rx_mcast_packets" },
223         { "rx_bcast_packets" },
224         { "rx_fcs_errors" },
225         { "rx_align_errors" },
226         { "rx_xon_pause_rcvd" },
227         { "rx_xoff_pause_rcvd" },
228         { "rx_mac_ctrl_rcvd" },
229         { "rx_xoff_entered" },
230         { "rx_frame_too_long_errors" },
231         { "rx_jabbers" },
232         { "rx_undersize_packets" },
233         { "rx_in_length_errors" },
234         { "rx_out_length_errors" },
235         { "rx_64_or_less_octet_packets" },
236         { "rx_65_to_127_octet_packets" },
237         { "rx_128_to_255_octet_packets" },
238         { "rx_256_to_511_octet_packets" },
239         { "rx_512_to_1023_octet_packets" },
240         { "rx_1024_to_1522_octet_packets" },
241         { "rx_1523_to_2047_octet_packets" },
242         { "rx_2048_to_4095_octet_packets" },
243         { "rx_4096_to_8191_octet_packets" },
244         { "rx_8192_to_9022_octet_packets" },
245
246         { "tx_octets" },
247         { "tx_collisions" },
248
249         { "tx_xon_sent" },
250         { "tx_xoff_sent" },
251         { "tx_flow_control" },
252         { "tx_mac_errors" },
253         { "tx_single_collisions" },
254         { "tx_mult_collisions" },
255         { "tx_deferred" },
256         { "tx_excessive_collisions" },
257         { "tx_late_collisions" },
258         { "tx_collide_2times" },
259         { "tx_collide_3times" },
260         { "tx_collide_4times" },
261         { "tx_collide_5times" },
262         { "tx_collide_6times" },
263         { "tx_collide_7times" },
264         { "tx_collide_8times" },
265         { "tx_collide_9times" },
266         { "tx_collide_10times" },
267         { "tx_collide_11times" },
268         { "tx_collide_12times" },
269         { "tx_collide_13times" },
270         { "tx_collide_14times" },
271         { "tx_collide_15times" },
272         { "tx_ucast_packets" },
273         { "tx_mcast_packets" },
274         { "tx_bcast_packets" },
275         { "tx_carrier_sense_errors" },
276         { "tx_discards" },
277         { "tx_errors" },
278
279         { "dma_writeq_full" },
280         { "dma_write_prioq_full" },
281         { "rxbds_empty" },
282         { "rx_discards" },
283         { "rx_errors" },
284         { "rx_threshold_hit" },
285
286         { "dma_readq_full" },
287         { "dma_read_prioq_full" },
288         { "tx_comp_queue_full" },
289
290         { "ring_set_send_prod_index" },
291         { "ring_status_update" },
292         { "nic_irqs" },
293         { "nic_avoided_irqs" },
294         { "nic_tx_threshold_hit" }
295 };
296
297 static const struct {
298         const char string[ETH_GSTRING_LEN];
299 } ethtool_test_keys[TG3_NUM_TEST] = {
300         { "nvram test     (online) " },
301         { "link test      (online) " },
302         { "register test  (offline)" },
303         { "memory test    (offline)" },
304         { "loopback test  (offline)" },
305         { "interrupt test (offline)" },
306 };
307
308 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
309 {
310         writel(val, tp->regs + off);
311 }
312
313 static u32 tg3_read32(struct tg3 *tp, u32 off)
314 {
315         return (readl(tp->regs + off));
316 }
317
318 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
319 {
320         unsigned long flags;
321
322         spin_lock_irqsave(&tp->indirect_lock, flags);
323         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
324         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
325         spin_unlock_irqrestore(&tp->indirect_lock, flags);
326 }
327
328 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
329 {
330         writel(val, tp->regs + off);
331         readl(tp->regs + off);
332 }
333
334 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
335 {
336         unsigned long flags;
337         u32 val;
338
339         spin_lock_irqsave(&tp->indirect_lock, flags);
340         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
341         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
342         spin_unlock_irqrestore(&tp->indirect_lock, flags);
343         return val;
344 }
345
346 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
347 {
348         unsigned long flags;
349
350         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
351                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
352                                        TG3_64BIT_REG_LOW, val);
353                 return;
354         }
355         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
356                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
357                                        TG3_64BIT_REG_LOW, val);
358                 return;
359         }
360
361         spin_lock_irqsave(&tp->indirect_lock, flags);
362         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
363         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
364         spin_unlock_irqrestore(&tp->indirect_lock, flags);
365
366         /* In indirect mode when disabling interrupts, we also need
367          * to clear the interrupt bit in the GRC local ctrl register.
368          */
369         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
370             (val == 0x1)) {
371                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
372                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
373         }
374 }
375
376 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
377 {
378         unsigned long flags;
379         u32 val;
380
381         spin_lock_irqsave(&tp->indirect_lock, flags);
382         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
383         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
384         spin_unlock_irqrestore(&tp->indirect_lock, flags);
385         return val;
386 }
387
388 /* usec_wait specifies the wait time in usec when writing to certain registers
389  * where it is unsafe to read back the register without some delay.
390  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
391  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
392  */
393 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
394 {
395         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
396             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
397                 /* Non-posted methods */
398                 tp->write32(tp, off, val);
399         else {
400                 /* Posted method */
401                 tg3_write32(tp, off, val);
402                 if (usec_wait)
403                         udelay(usec_wait);
404                 tp->read32(tp, off);
405         }
406         /* Wait again after the read for the posted method to guarantee that
407          * the wait time is met.
408          */
409         if (usec_wait)
410                 udelay(usec_wait);
411 }
412
413 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
414 {
415         tp->write32_mbox(tp, off, val);
416         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
417             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
418                 tp->read32_mbox(tp, off);
419 }
420
421 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
422 {
423         void __iomem *mbox = tp->regs + off;
424         writel(val, mbox);
425         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
426                 writel(val, mbox);
427         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
428                 readl(mbox);
429 }
430
431 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
432 {
433         return (readl(tp->regs + off + GRCMBOX_BASE));
434 }
435
436 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
437 {
438         writel(val, tp->regs + off + GRCMBOX_BASE);
439 }
440
441 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
442 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
443 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
444 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
445 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
446
447 #define tw32(reg,val)           tp->write32(tp, reg, val)
448 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
449 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
450 #define tr32(reg)               tp->read32(tp, reg)
451
452 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
453 {
454         unsigned long flags;
455
456         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
457             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
458                 return;
459
460         spin_lock_irqsave(&tp->indirect_lock, flags);
461         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
462                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
463                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
464
465                 /* Always leave this as zero. */
466                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
467         } else {
468                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
469                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
470
471                 /* Always leave this as zero. */
472                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
473         }
474         spin_unlock_irqrestore(&tp->indirect_lock, flags);
475 }
476
477 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
478 {
479         unsigned long flags;
480
481         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
482             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
483                 *val = 0;
484                 return;
485         }
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
489                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
490                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
491
492                 /* Always leave this as zero. */
493                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
494         } else {
495                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
496                 *val = tr32(TG3PCI_MEM_WIN_DATA);
497
498                 /* Always leave this as zero. */
499                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
500         }
501         spin_unlock_irqrestore(&tp->indirect_lock, flags);
502 }
503
504 static void tg3_disable_ints(struct tg3 *tp)
505 {
506         tw32(TG3PCI_MISC_HOST_CTRL,
507              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
508         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
509 }
510
511 static inline void tg3_cond_int(struct tg3 *tp)
512 {
513         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
514             (tp->hw_status->status & SD_STATUS_UPDATED))
515                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
516         else
517                 tw32(HOSTCC_MODE, tp->coalesce_mode |
518                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
519 }
520
521 static void tg3_enable_ints(struct tg3 *tp)
522 {
523         tp->irq_sync = 0;
524         wmb();
525
526         tw32(TG3PCI_MISC_HOST_CTRL,
527              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
528         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
529                        (tp->last_tag << 24));
530         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
531                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
532                                (tp->last_tag << 24));
533         tg3_cond_int(tp);
534 }
535
536 static inline unsigned int tg3_has_work(struct tg3 *tp)
537 {
538         struct tg3_hw_status *sblk = tp->hw_status;
539         unsigned int work_exists = 0;
540
541         /* check for phy events */
542         if (!(tp->tg3_flags &
543               (TG3_FLAG_USE_LINKCHG_REG |
544                TG3_FLAG_POLL_SERDES))) {
545                 if (sblk->status & SD_STATUS_LINK_CHG)
546                         work_exists = 1;
547         }
548         /* check for RX/TX work to do */
549         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
550             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
551                 work_exists = 1;
552
553         return work_exists;
554 }
555
556 /* tg3_restart_ints
557  *  similar to tg3_enable_ints, but it accurately determines whether there
558  *  is new work pending and can return without flushing the PIO write
559  *  which reenables interrupts
560  */
561 static void tg3_restart_ints(struct tg3 *tp)
562 {
563         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
564                      tp->last_tag << 24);
565         mmiowb();
566
567         /* When doing tagged status, this work check is unnecessary.
568          * The last_tag we write above tells the chip which piece of
569          * work we've completed.
570          */
571         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
572             tg3_has_work(tp))
573                 tw32(HOSTCC_MODE, tp->coalesce_mode |
574                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
575 }
576
577 static inline void tg3_netif_stop(struct tg3 *tp)
578 {
579         tp->dev->trans_start = jiffies; /* prevent tx timeout */
580         netif_poll_disable(tp->dev);
581         netif_tx_disable(tp->dev);
582 }
583
584 static inline void tg3_netif_start(struct tg3 *tp)
585 {
586         netif_wake_queue(tp->dev);
587         /* NOTE: unconditional netif_wake_queue is only appropriate
588          * so long as all callers are assured to have free tx slots
589          * (such as after tg3_init_hw)
590          */
591         netif_poll_enable(tp->dev);
592         tp->hw_status->status |= SD_STATUS_UPDATED;
593         tg3_enable_ints(tp);
594 }
595
596 static void tg3_switch_clocks(struct tg3 *tp)
597 {
598         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
599         u32 orig_clock_ctrl;
600
601         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
602                 return;
603
604         orig_clock_ctrl = clock_ctrl;
605         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
606                        CLOCK_CTRL_CLKRUN_OENABLE |
607                        0x1f);
608         tp->pci_clock_ctrl = clock_ctrl;
609
610         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
611                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
612                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
613                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
614                 }
615         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
616                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
617                             clock_ctrl |
618                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
619                             40);
620                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
621                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
622                             40);
623         }
624         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
625 }
626
627 #define PHY_BUSY_LOOPS  5000
628
629 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
630 {
631         u32 frame_val;
632         unsigned int loops;
633         int ret;
634
635         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
636                 tw32_f(MAC_MI_MODE,
637                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
638                 udelay(80);
639         }
640
641         *val = 0x0;
642
643         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
644                       MI_COM_PHY_ADDR_MASK);
645         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
646                       MI_COM_REG_ADDR_MASK);
647         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
648
649         tw32_f(MAC_MI_COM, frame_val);
650
651         loops = PHY_BUSY_LOOPS;
652         while (loops != 0) {
653                 udelay(10);
654                 frame_val = tr32(MAC_MI_COM);
655
656                 if ((frame_val & MI_COM_BUSY) == 0) {
657                         udelay(5);
658                         frame_val = tr32(MAC_MI_COM);
659                         break;
660                 }
661                 loops -= 1;
662         }
663
664         ret = -EBUSY;
665         if (loops != 0) {
666                 *val = frame_val & MI_COM_DATA_MASK;
667                 ret = 0;
668         }
669
670         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
671                 tw32_f(MAC_MI_MODE, tp->mi_mode);
672                 udelay(80);
673         }
674
675         return ret;
676 }
677
678 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
679 {
680         u32 frame_val;
681         unsigned int loops;
682         int ret;
683
684         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
685             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
686                 return 0;
687
688         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
689                 tw32_f(MAC_MI_MODE,
690                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
691                 udelay(80);
692         }
693
694         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
695                       MI_COM_PHY_ADDR_MASK);
696         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
697                       MI_COM_REG_ADDR_MASK);
698         frame_val |= (val & MI_COM_DATA_MASK);
699         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
700
701         tw32_f(MAC_MI_COM, frame_val);
702
703         loops = PHY_BUSY_LOOPS;
704         while (loops != 0) {
705                 udelay(10);
706                 frame_val = tr32(MAC_MI_COM);
707                 if ((frame_val & MI_COM_BUSY) == 0) {
708                         udelay(5);
709                         frame_val = tr32(MAC_MI_COM);
710                         break;
711                 }
712                 loops -= 1;
713         }
714
715         ret = -EBUSY;
716         if (loops != 0)
717                 ret = 0;
718
719         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
720                 tw32_f(MAC_MI_MODE, tp->mi_mode);
721                 udelay(80);
722         }
723
724         return ret;
725 }
726
727 static void tg3_phy_set_wirespeed(struct tg3 *tp)
728 {
729         u32 val;
730
731         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
732                 return;
733
734         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
735             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
736                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
737                              (val | (1 << 15) | (1 << 4)));
738 }
739
740 static int tg3_bmcr_reset(struct tg3 *tp)
741 {
742         u32 phy_control;
743         int limit, err;
744
745         /* OK, reset it, and poll the BMCR_RESET bit until it
746          * clears or we time out.
747          */
748         phy_control = BMCR_RESET;
749         err = tg3_writephy(tp, MII_BMCR, phy_control);
750         if (err != 0)
751                 return -EBUSY;
752
753         limit = 5000;
754         while (limit--) {
755                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
756                 if (err != 0)
757                         return -EBUSY;
758
759                 if ((phy_control & BMCR_RESET) == 0) {
760                         udelay(40);
761                         break;
762                 }
763                 udelay(10);
764         }
765         if (limit <= 0)
766                 return -EBUSY;
767
768         return 0;
769 }
770
771 static int tg3_wait_macro_done(struct tg3 *tp)
772 {
773         int limit = 100;
774
775         while (limit--) {
776                 u32 tmp32;
777
778                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
779                         if ((tmp32 & 0x1000) == 0)
780                                 break;
781                 }
782         }
783         if (limit <= 0)
784                 return -EBUSY;
785
786         return 0;
787 }
788
789 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
790 {
791         static const u32 test_pat[4][6] = {
792         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
793         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
794         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
795         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
796         };
797         int chan;
798
799         for (chan = 0; chan < 4; chan++) {
800                 int i;
801
802                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
803                              (chan * 0x2000) | 0x0200);
804                 tg3_writephy(tp, 0x16, 0x0002);
805
806                 for (i = 0; i < 6; i++)
807                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
808                                      test_pat[chan][i]);
809
810                 tg3_writephy(tp, 0x16, 0x0202);
811                 if (tg3_wait_macro_done(tp)) {
812                         *resetp = 1;
813                         return -EBUSY;
814                 }
815
816                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
817                              (chan * 0x2000) | 0x0200);
818                 tg3_writephy(tp, 0x16, 0x0082);
819                 if (tg3_wait_macro_done(tp)) {
820                         *resetp = 1;
821                         return -EBUSY;
822                 }
823
824                 tg3_writephy(tp, 0x16, 0x0802);
825                 if (tg3_wait_macro_done(tp)) {
826                         *resetp = 1;
827                         return -EBUSY;
828                 }
829
830                 for (i = 0; i < 6; i += 2) {
831                         u32 low, high;
832
833                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
834                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
835                             tg3_wait_macro_done(tp)) {
836                                 *resetp = 1;
837                                 return -EBUSY;
838                         }
839                         low &= 0x7fff;
840                         high &= 0x000f;
841                         if (low != test_pat[chan][i] ||
842                             high != test_pat[chan][i+1]) {
843                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
844                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
845                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
846
847                                 return -EBUSY;
848                         }
849                 }
850         }
851
852         return 0;
853 }
854
855 static int tg3_phy_reset_chanpat(struct tg3 *tp)
856 {
857         int chan;
858
859         for (chan = 0; chan < 4; chan++) {
860                 int i;
861
862                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
863                              (chan * 0x2000) | 0x0200);
864                 tg3_writephy(tp, 0x16, 0x0002);
865                 for (i = 0; i < 6; i++)
866                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
867                 tg3_writephy(tp, 0x16, 0x0202);
868                 if (tg3_wait_macro_done(tp))
869                         return -EBUSY;
870         }
871
872         return 0;
873 }
874
875 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
876 {
877         u32 reg32, phy9_orig;
878         int retries, do_phy_reset, err;
879
880         retries = 10;
881         do_phy_reset = 1;
882         do {
883                 if (do_phy_reset) {
884                         err = tg3_bmcr_reset(tp);
885                         if (err)
886                                 return err;
887                         do_phy_reset = 0;
888                 }
889
890                 /* Disable transmitter and interrupt.  */
891                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
892                         continue;
893
894                 reg32 |= 0x3000;
895                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
896
897                 /* Set full-duplex, 1000 mbps.  */
898                 tg3_writephy(tp, MII_BMCR,
899                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
900
901                 /* Set to master mode.  */
902                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
903                         continue;
904
905                 tg3_writephy(tp, MII_TG3_CTRL,
906                              (MII_TG3_CTRL_AS_MASTER |
907                               MII_TG3_CTRL_ENABLE_AS_MASTER));
908
909                 /* Enable SM_DSP_CLOCK and 6dB.  */
910                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
911
912                 /* Block the PHY control access.  */
913                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
914                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
915
916                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
917                 if (!err)
918                         break;
919         } while (--retries);
920
921         err = tg3_phy_reset_chanpat(tp);
922         if (err)
923                 return err;
924
925         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
926         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
927
928         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
929         tg3_writephy(tp, 0x16, 0x0000);
930
931         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
932             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
933                 /* Set Extended packet length bit for jumbo frames */
934                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
935         }
936         else {
937                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
938         }
939
940         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
941
942         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
943                 reg32 &= ~0x3000;
944                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
945         } else if (!err)
946                 err = -EBUSY;
947
948         return err;
949 }
950
951 static void tg3_link_report(struct tg3 *);
952
953 /* This will reset the tigon3 PHY if there is no valid
954  * link unless the FORCE argument is non-zero.
955  */
956 static int tg3_phy_reset(struct tg3 *tp)
957 {
958         u32 phy_status;
959         int err;
960
961         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
962         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
963         if (err != 0)
964                 return -EBUSY;
965
966         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
967                 netif_carrier_off(tp->dev);
968                 tg3_link_report(tp);
969         }
970
971         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
972             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
973             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
974                 err = tg3_phy_reset_5703_4_5(tp);
975                 if (err)
976                         return err;
977                 goto out;
978         }
979
980         err = tg3_bmcr_reset(tp);
981         if (err)
982                 return err;
983
984 out:
985         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
986                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
987                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
988                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
989                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
990                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
991                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
992         }
993         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
994                 tg3_writephy(tp, 0x1c, 0x8d68);
995                 tg3_writephy(tp, 0x1c, 0x8d68);
996         }
997         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
998                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
999                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1000                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1001                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1002                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1003                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1004                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1005                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1006         }
1007         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1008                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1009                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1010                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1011                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1012         }
1013         /* Set Extended packet length bit (bit 14) on all chips that */
1014         /* support jumbo frames */
1015         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1016                 /* Cannot do read-modify-write on 5401 */
1017                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1018         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1019                 u32 phy_reg;
1020
1021                 /* Set bit 14 with read-modify-write to preserve other bits */
1022                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1023                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1024                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1025         }
1026
1027         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1028          * jumbo frames transmission.
1029          */
1030         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1031                 u32 phy_reg;
1032
1033                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1034                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1035                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1036         }
1037
1038         tg3_phy_set_wirespeed(tp);
1039         return 0;
1040 }
1041
1042 static void tg3_frob_aux_power(struct tg3 *tp)
1043 {
1044         struct tg3 *tp_peer = tp;
1045
1046         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1047                 return;
1048
1049         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1050             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1051                 struct net_device *dev_peer;
1052
1053                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1054                 /* remove_one() may have been run on the peer. */
1055                 if (!dev_peer)
1056                         tp_peer = tp;
1057                 else
1058                         tp_peer = netdev_priv(dev_peer);
1059         }
1060
1061         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1062             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1063             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1064             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1065                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1066                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1067                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1068                                     (GRC_LCLCTRL_GPIO_OE0 |
1069                                      GRC_LCLCTRL_GPIO_OE1 |
1070                                      GRC_LCLCTRL_GPIO_OE2 |
1071                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1072                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1073                                     100);
1074                 } else {
1075                         u32 no_gpio2;
1076                         u32 grc_local_ctrl = 0;
1077
1078                         if (tp_peer != tp &&
1079                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1080                                 return;
1081
1082                         /* Workaround to prevent overdrawing Amps. */
1083                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1084                             ASIC_REV_5714) {
1085                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1086                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1087                                             grc_local_ctrl, 100);
1088                         }
1089
1090                         /* On 5753 and variants, GPIO2 cannot be used. */
1091                         no_gpio2 = tp->nic_sram_data_cfg &
1092                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1093
1094                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1095                                          GRC_LCLCTRL_GPIO_OE1 |
1096                                          GRC_LCLCTRL_GPIO_OE2 |
1097                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1098                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1099                         if (no_gpio2) {
1100                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1101                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1102                         }
1103                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1104                                                     grc_local_ctrl, 100);
1105
1106                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1107
1108                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1109                                                     grc_local_ctrl, 100);
1110
1111                         if (!no_gpio2) {
1112                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1113                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1114                                             grc_local_ctrl, 100);
1115                         }
1116                 }
1117         } else {
1118                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1119                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1120                         if (tp_peer != tp &&
1121                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1122                                 return;
1123
1124                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1125                                     (GRC_LCLCTRL_GPIO_OE1 |
1126                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1127
1128                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1129                                     GRC_LCLCTRL_GPIO_OE1, 100);
1130
1131                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1132                                     (GRC_LCLCTRL_GPIO_OE1 |
1133                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1134                 }
1135         }
1136 }
1137
1138 static int tg3_setup_phy(struct tg3 *, int);
1139
1140 #define RESET_KIND_SHUTDOWN     0
1141 #define RESET_KIND_INIT         1
1142 #define RESET_KIND_SUSPEND      2
1143
1144 static void tg3_write_sig_post_reset(struct tg3 *, int);
1145 static int tg3_halt_cpu(struct tg3 *, u32);
1146 static int tg3_nvram_lock(struct tg3 *);
1147 static void tg3_nvram_unlock(struct tg3 *);
1148
1149 static void tg3_power_down_phy(struct tg3 *tp)
1150 {
1151         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
1152                 return;
1153
1154         tg3_writephy(tp, MII_TG3_EXT_CTRL, MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1155         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1156
1157         /* The PHY should not be powered down on some chips because
1158          * of bugs.
1159          */
1160         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1161             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1162             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1163              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1164                 return;
1165         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1166 }
1167
1168 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1169 {
1170         u32 misc_host_ctrl;
1171         u16 power_control, power_caps;
1172         int pm = tp->pm_cap;
1173
1174         /* Make sure register accesses (indirect or otherwise)
1175          * will function correctly.
1176          */
1177         pci_write_config_dword(tp->pdev,
1178                                TG3PCI_MISC_HOST_CTRL,
1179                                tp->misc_host_ctrl);
1180
1181         pci_read_config_word(tp->pdev,
1182                              pm + PCI_PM_CTRL,
1183                              &power_control);
1184         power_control |= PCI_PM_CTRL_PME_STATUS;
1185         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1186         switch (state) {
1187         case PCI_D0:
1188                 power_control |= 0;
1189                 pci_write_config_word(tp->pdev,
1190                                       pm + PCI_PM_CTRL,
1191                                       power_control);
1192                 udelay(100);    /* Delay after power state change */
1193
1194                 /* Switch out of Vaux if it is not a LOM */
1195                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1196                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1197
1198                 return 0;
1199
1200         case PCI_D1:
1201                 power_control |= 1;
1202                 break;
1203
1204         case PCI_D2:
1205                 power_control |= 2;
1206                 break;
1207
1208         case PCI_D3hot:
1209                 power_control |= 3;
1210                 break;
1211
1212         default:
1213                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1214                        "requested.\n",
1215                        tp->dev->name, state);
1216                 return -EINVAL;
1217         };
1218
1219         power_control |= PCI_PM_CTRL_PME_ENABLE;
1220
1221         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1222         tw32(TG3PCI_MISC_HOST_CTRL,
1223              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1224
1225         if (tp->link_config.phy_is_low_power == 0) {
1226                 tp->link_config.phy_is_low_power = 1;
1227                 tp->link_config.orig_speed = tp->link_config.speed;
1228                 tp->link_config.orig_duplex = tp->link_config.duplex;
1229                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1230         }
1231
1232         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1233                 tp->link_config.speed = SPEED_10;
1234                 tp->link_config.duplex = DUPLEX_HALF;
1235                 tp->link_config.autoneg = AUTONEG_ENABLE;
1236                 tg3_setup_phy(tp, 0);
1237         }
1238
1239         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1240                 u32 val;
1241
1242                 val = tr32(GRC_VCPU_EXT_CTRL);
1243                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1244         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1245                 int i;
1246                 u32 val;
1247
1248                 for (i = 0; i < 200; i++) {
1249                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1250                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1251                                 break;
1252                         msleep(1);
1253                 }
1254         }
1255         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1256                                              WOL_DRV_STATE_SHUTDOWN |
1257                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1258
1259         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1260
1261         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1262                 u32 mac_mode;
1263
1264                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1265                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1266                         udelay(40);
1267
1268                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1269                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1270                         else
1271                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1272
1273                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1274                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1275                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1276                 } else {
1277                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1278                 }
1279
1280                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1281                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1282
1283                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1284                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1285                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1286
1287                 tw32_f(MAC_MODE, mac_mode);
1288                 udelay(100);
1289
1290                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1291                 udelay(10);
1292         }
1293
1294         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1295             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1296              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1297                 u32 base_val;
1298
1299                 base_val = tp->pci_clock_ctrl;
1300                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1301                              CLOCK_CTRL_TXCLK_DISABLE);
1302
1303                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1304                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1305         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1306                 /* do nothing */
1307         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1308                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1309                 u32 newbits1, newbits2;
1310
1311                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1312                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1313                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1314                                     CLOCK_CTRL_TXCLK_DISABLE |
1315                                     CLOCK_CTRL_ALTCLK);
1316                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1317                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1318                         newbits1 = CLOCK_CTRL_625_CORE;
1319                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1320                 } else {
1321                         newbits1 = CLOCK_CTRL_ALTCLK;
1322                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1323                 }
1324
1325                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1326                             40);
1327
1328                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1329                             40);
1330
1331                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1332                         u32 newbits3;
1333
1334                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1335                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1336                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1337                                             CLOCK_CTRL_TXCLK_DISABLE |
1338                                             CLOCK_CTRL_44MHZ_CORE);
1339                         } else {
1340                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1341                         }
1342
1343                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1344                                     tp->pci_clock_ctrl | newbits3, 40);
1345                 }
1346         }
1347
1348         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1349             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1350                 tg3_power_down_phy(tp);
1351
1352         tg3_frob_aux_power(tp);
1353
1354         /* Workaround for unstable PLL clock */
1355         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1356             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1357                 u32 val = tr32(0x7d00);
1358
1359                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1360                 tw32(0x7d00, val);
1361                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1362                         int err;
1363
1364                         err = tg3_nvram_lock(tp);
1365                         tg3_halt_cpu(tp, RX_CPU_BASE);
1366                         if (!err)
1367                                 tg3_nvram_unlock(tp);
1368                 }
1369         }
1370
1371         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1372
1373         /* Finally, set the new power state. */
1374         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1375         udelay(100);    /* Delay after power state change */
1376
1377         return 0;
1378 }
1379
1380 static void tg3_link_report(struct tg3 *tp)
1381 {
1382         if (!netif_carrier_ok(tp->dev)) {
1383                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1384         } else {
1385                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1386                        tp->dev->name,
1387                        (tp->link_config.active_speed == SPEED_1000 ?
1388                         1000 :
1389                         (tp->link_config.active_speed == SPEED_100 ?
1390                          100 : 10)),
1391                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1392                         "full" : "half"));
1393
1394                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1395                        "%s for RX.\n",
1396                        tp->dev->name,
1397                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1398                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1399         }
1400 }
1401
1402 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1403 {
1404         u32 new_tg3_flags = 0;
1405         u32 old_rx_mode = tp->rx_mode;
1406         u32 old_tx_mode = tp->tx_mode;
1407
1408         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1409
1410                 /* Convert 1000BaseX flow control bits to 1000BaseT
1411                  * bits before resolving flow control.
1412                  */
1413                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1414                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1415                                        ADVERTISE_PAUSE_ASYM);
1416                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1417
1418                         if (local_adv & ADVERTISE_1000XPAUSE)
1419                                 local_adv |= ADVERTISE_PAUSE_CAP;
1420                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1421                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1422                         if (remote_adv & LPA_1000XPAUSE)
1423                                 remote_adv |= LPA_PAUSE_CAP;
1424                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1425                                 remote_adv |= LPA_PAUSE_ASYM;
1426                 }
1427
1428                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1429                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1430                                 if (remote_adv & LPA_PAUSE_CAP)
1431                                         new_tg3_flags |=
1432                                                 (TG3_FLAG_RX_PAUSE |
1433                                                 TG3_FLAG_TX_PAUSE);
1434                                 else if (remote_adv & LPA_PAUSE_ASYM)
1435                                         new_tg3_flags |=
1436                                                 (TG3_FLAG_RX_PAUSE);
1437                         } else {
1438                                 if (remote_adv & LPA_PAUSE_CAP)
1439                                         new_tg3_flags |=
1440                                                 (TG3_FLAG_RX_PAUSE |
1441                                                 TG3_FLAG_TX_PAUSE);
1442                         }
1443                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1444                         if ((remote_adv & LPA_PAUSE_CAP) &&
1445                         (remote_adv & LPA_PAUSE_ASYM))
1446                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1447                 }
1448
1449                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1450                 tp->tg3_flags |= new_tg3_flags;
1451         } else {
1452                 new_tg3_flags = tp->tg3_flags;
1453         }
1454
1455         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1456                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1457         else
1458                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1459
1460         if (old_rx_mode != tp->rx_mode) {
1461                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1462         }
1463
1464         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1465                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1466         else
1467                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1468
1469         if (old_tx_mode != tp->tx_mode) {
1470                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1471         }
1472 }
1473
1474 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1475 {
1476         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1477         case MII_TG3_AUX_STAT_10HALF:
1478                 *speed = SPEED_10;
1479                 *duplex = DUPLEX_HALF;
1480                 break;
1481
1482         case MII_TG3_AUX_STAT_10FULL:
1483                 *speed = SPEED_10;
1484                 *duplex = DUPLEX_FULL;
1485                 break;
1486
1487         case MII_TG3_AUX_STAT_100HALF:
1488                 *speed = SPEED_100;
1489                 *duplex = DUPLEX_HALF;
1490                 break;
1491
1492         case MII_TG3_AUX_STAT_100FULL:
1493                 *speed = SPEED_100;
1494                 *duplex = DUPLEX_FULL;
1495                 break;
1496
1497         case MII_TG3_AUX_STAT_1000HALF:
1498                 *speed = SPEED_1000;
1499                 *duplex = DUPLEX_HALF;
1500                 break;
1501
1502         case MII_TG3_AUX_STAT_1000FULL:
1503                 *speed = SPEED_1000;
1504                 *duplex = DUPLEX_FULL;
1505                 break;
1506
1507         default:
1508                 *speed = SPEED_INVALID;
1509                 *duplex = DUPLEX_INVALID;
1510                 break;
1511         };
1512 }
1513
1514 static void tg3_phy_copper_begin(struct tg3 *tp)
1515 {
1516         u32 new_adv;
1517         int i;
1518
1519         if (tp->link_config.phy_is_low_power) {
1520                 /* Entering low power mode.  Disable gigabit and
1521                  * 100baseT advertisements.
1522                  */
1523                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1524
1525                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1526                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1527                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1528                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1529
1530                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1531         } else if (tp->link_config.speed == SPEED_INVALID) {
1532                 tp->link_config.advertising =
1533                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1534                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1535                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1536                          ADVERTISED_Autoneg | ADVERTISED_MII);
1537
1538                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1539                         tp->link_config.advertising &=
1540                                 ~(ADVERTISED_1000baseT_Half |
1541                                   ADVERTISED_1000baseT_Full);
1542
1543                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1544                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1545                         new_adv |= ADVERTISE_10HALF;
1546                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1547                         new_adv |= ADVERTISE_10FULL;
1548                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1549                         new_adv |= ADVERTISE_100HALF;
1550                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1551                         new_adv |= ADVERTISE_100FULL;
1552                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1553
1554                 if (tp->link_config.advertising &
1555                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1556                         new_adv = 0;
1557                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1558                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1559                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1560                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1561                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1562                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1563                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1564                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1565                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1566                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1567                 } else {
1568                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1569                 }
1570         } else {
1571                 /* Asking for a specific link mode. */
1572                 if (tp->link_config.speed == SPEED_1000) {
1573                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1574                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1575
1576                         if (tp->link_config.duplex == DUPLEX_FULL)
1577                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1578                         else
1579                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1580                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1581                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1582                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1583                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1584                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1585                 } else {
1586                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1587
1588                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1589                         if (tp->link_config.speed == SPEED_100) {
1590                                 if (tp->link_config.duplex == DUPLEX_FULL)
1591                                         new_adv |= ADVERTISE_100FULL;
1592                                 else
1593                                         new_adv |= ADVERTISE_100HALF;
1594                         } else {
1595                                 if (tp->link_config.duplex == DUPLEX_FULL)
1596                                         new_adv |= ADVERTISE_10FULL;
1597                                 else
1598                                         new_adv |= ADVERTISE_10HALF;
1599                         }
1600                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1601                 }
1602         }
1603
1604         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1605             tp->link_config.speed != SPEED_INVALID) {
1606                 u32 bmcr, orig_bmcr;
1607
1608                 tp->link_config.active_speed = tp->link_config.speed;
1609                 tp->link_config.active_duplex = tp->link_config.duplex;
1610
1611                 bmcr = 0;
1612                 switch (tp->link_config.speed) {
1613                 default:
1614                 case SPEED_10:
1615                         break;
1616
1617                 case SPEED_100:
1618                         bmcr |= BMCR_SPEED100;
1619                         break;
1620
1621                 case SPEED_1000:
1622                         bmcr |= TG3_BMCR_SPEED1000;
1623                         break;
1624                 };
1625
1626                 if (tp->link_config.duplex == DUPLEX_FULL)
1627                         bmcr |= BMCR_FULLDPLX;
1628
1629                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1630                     (bmcr != orig_bmcr)) {
1631                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1632                         for (i = 0; i < 1500; i++) {
1633                                 u32 tmp;
1634
1635                                 udelay(10);
1636                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1637                                     tg3_readphy(tp, MII_BMSR, &tmp))
1638                                         continue;
1639                                 if (!(tmp & BMSR_LSTATUS)) {
1640                                         udelay(40);
1641                                         break;
1642                                 }
1643                         }
1644                         tg3_writephy(tp, MII_BMCR, bmcr);
1645                         udelay(40);
1646                 }
1647         } else {
1648                 tg3_writephy(tp, MII_BMCR,
1649                              BMCR_ANENABLE | BMCR_ANRESTART);
1650         }
1651 }
1652
1653 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1654 {
1655         int err;
1656
1657         /* Turn off tap power management. */
1658         /* Set Extended packet length bit */
1659         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1660
1661         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1662         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1663
1664         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1665         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1666
1667         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1668         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1669
1670         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1671         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1672
1673         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1674         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1675
1676         udelay(40);
1677
1678         return err;
1679 }
1680
1681 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1682 {
1683         u32 adv_reg, all_mask;
1684
1685         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1686                 return 0;
1687
1688         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1689                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1690         if ((adv_reg & all_mask) != all_mask)
1691                 return 0;
1692         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1693                 u32 tg3_ctrl;
1694
1695                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1696                         return 0;
1697
1698                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1699                             MII_TG3_CTRL_ADV_1000_FULL);
1700                 if ((tg3_ctrl & all_mask) != all_mask)
1701                         return 0;
1702         }
1703         return 1;
1704 }
1705
1706 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1707 {
1708         int current_link_up;
1709         u32 bmsr, dummy;
1710         u16 current_speed;
1711         u8 current_duplex;
1712         int i, err;
1713
1714         tw32(MAC_EVENT, 0);
1715
1716         tw32_f(MAC_STATUS,
1717              (MAC_STATUS_SYNC_CHANGED |
1718               MAC_STATUS_CFG_CHANGED |
1719               MAC_STATUS_MI_COMPLETION |
1720               MAC_STATUS_LNKSTATE_CHANGED));
1721         udelay(40);
1722
1723         tp->mi_mode = MAC_MI_MODE_BASE;
1724         tw32_f(MAC_MI_MODE, tp->mi_mode);
1725         udelay(80);
1726
1727         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1728
1729         /* Some third-party PHYs need to be reset on link going
1730          * down.
1731          */
1732         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1733              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1734              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1735             netif_carrier_ok(tp->dev)) {
1736                 tg3_readphy(tp, MII_BMSR, &bmsr);
1737                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1738                     !(bmsr & BMSR_LSTATUS))
1739                         force_reset = 1;
1740         }
1741         if (force_reset)
1742                 tg3_phy_reset(tp);
1743
1744         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1745                 tg3_readphy(tp, MII_BMSR, &bmsr);
1746                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1747                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1748                         bmsr = 0;
1749
1750                 if (!(bmsr & BMSR_LSTATUS)) {
1751                         err = tg3_init_5401phy_dsp(tp);
1752                         if (err)
1753                                 return err;
1754
1755                         tg3_readphy(tp, MII_BMSR, &bmsr);
1756                         for (i = 0; i < 1000; i++) {
1757                                 udelay(10);
1758                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1759                                     (bmsr & BMSR_LSTATUS)) {
1760                                         udelay(40);
1761                                         break;
1762                                 }
1763                         }
1764
1765                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1766                             !(bmsr & BMSR_LSTATUS) &&
1767                             tp->link_config.active_speed == SPEED_1000) {
1768                                 err = tg3_phy_reset(tp);
1769                                 if (!err)
1770                                         err = tg3_init_5401phy_dsp(tp);
1771                                 if (err)
1772                                         return err;
1773                         }
1774                 }
1775         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1776                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1777                 /* 5701 {A0,B0} CRC bug workaround */
1778                 tg3_writephy(tp, 0x15, 0x0a75);
1779                 tg3_writephy(tp, 0x1c, 0x8c68);
1780                 tg3_writephy(tp, 0x1c, 0x8d68);
1781                 tg3_writephy(tp, 0x1c, 0x8c68);
1782         }
1783
1784         /* Clear pending interrupts... */
1785         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1786         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1787
1788         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1789                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1790         else
1791                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1792
1793         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1794             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1795                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1796                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1797                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1798                 else
1799                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1800         }
1801
1802         current_link_up = 0;
1803         current_speed = SPEED_INVALID;
1804         current_duplex = DUPLEX_INVALID;
1805
1806         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1807                 u32 val;
1808
1809                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1810                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1811                 if (!(val & (1 << 10))) {
1812                         val |= (1 << 10);
1813                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1814                         goto relink;
1815                 }
1816         }
1817
1818         bmsr = 0;
1819         for (i = 0; i < 100; i++) {
1820                 tg3_readphy(tp, MII_BMSR, &bmsr);
1821                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1822                     (bmsr & BMSR_LSTATUS))
1823                         break;
1824                 udelay(40);
1825         }
1826
1827         if (bmsr & BMSR_LSTATUS) {
1828                 u32 aux_stat, bmcr;
1829
1830                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1831                 for (i = 0; i < 2000; i++) {
1832                         udelay(10);
1833                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1834                             aux_stat)
1835                                 break;
1836                 }
1837
1838                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1839                                              &current_speed,
1840                                              &current_duplex);
1841
1842                 bmcr = 0;
1843                 for (i = 0; i < 200; i++) {
1844                         tg3_readphy(tp, MII_BMCR, &bmcr);
1845                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1846                                 continue;
1847                         if (bmcr && bmcr != 0x7fff)
1848                                 break;
1849                         udelay(10);
1850                 }
1851
1852                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1853                         if (bmcr & BMCR_ANENABLE) {
1854                                 current_link_up = 1;
1855
1856                                 /* Force autoneg restart if we are exiting
1857                                  * low power mode.
1858                                  */
1859                                 if (!tg3_copper_is_advertising_all(tp))
1860                                         current_link_up = 0;
1861                         } else {
1862                                 current_link_up = 0;
1863                         }
1864                 } else {
1865                         if (!(bmcr & BMCR_ANENABLE) &&
1866                             tp->link_config.speed == current_speed &&
1867                             tp->link_config.duplex == current_duplex) {
1868                                 current_link_up = 1;
1869                         } else {
1870                                 current_link_up = 0;
1871                         }
1872                 }
1873
1874                 tp->link_config.active_speed = current_speed;
1875                 tp->link_config.active_duplex = current_duplex;
1876         }
1877
1878         if (current_link_up == 1 &&
1879             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1880             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1881                 u32 local_adv, remote_adv;
1882
1883                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1884                         local_adv = 0;
1885                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1886
1887                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1888                         remote_adv = 0;
1889
1890                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1891
1892                 /* If we are not advertising full pause capability,
1893                  * something is wrong.  Bring the link down and reconfigure.
1894                  */
1895                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1896                         current_link_up = 0;
1897                 } else {
1898                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1899                 }
1900         }
1901 relink:
1902         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1903                 u32 tmp;
1904
1905                 tg3_phy_copper_begin(tp);
1906
1907                 tg3_readphy(tp, MII_BMSR, &tmp);
1908                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1909                     (tmp & BMSR_LSTATUS))
1910                         current_link_up = 1;
1911         }
1912
1913         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1914         if (current_link_up == 1) {
1915                 if (tp->link_config.active_speed == SPEED_100 ||
1916                     tp->link_config.active_speed == SPEED_10)
1917                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1918                 else
1919                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1920         } else
1921                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1922
1923         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1924         if (tp->link_config.active_duplex == DUPLEX_HALF)
1925                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1926
1927         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1928         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1929                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1930                     (current_link_up == 1 &&
1931                      tp->link_config.active_speed == SPEED_10))
1932                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1933         } else {
1934                 if (current_link_up == 1)
1935                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1936         }
1937
1938         /* ??? Without this setting Netgear GA302T PHY does not
1939          * ??? send/receive packets...
1940          */
1941         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1942             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1943                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1944                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1945                 udelay(80);
1946         }
1947
1948         tw32_f(MAC_MODE, tp->mac_mode);
1949         udelay(40);
1950
1951         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1952                 /* Polled via timer. */
1953                 tw32_f(MAC_EVENT, 0);
1954         } else {
1955                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1956         }
1957         udelay(40);
1958
1959         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1960             current_link_up == 1 &&
1961             tp->link_config.active_speed == SPEED_1000 &&
1962             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1963              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1964                 udelay(120);
1965                 tw32_f(MAC_STATUS,
1966                      (MAC_STATUS_SYNC_CHANGED |
1967                       MAC_STATUS_CFG_CHANGED));
1968                 udelay(40);
1969                 tg3_write_mem(tp,
1970                               NIC_SRAM_FIRMWARE_MBOX,
1971                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1972         }
1973
1974         if (current_link_up != netif_carrier_ok(tp->dev)) {
1975                 if (current_link_up)
1976                         netif_carrier_on(tp->dev);
1977                 else
1978                         netif_carrier_off(tp->dev);
1979                 tg3_link_report(tp);
1980         }
1981
1982         return 0;
1983 }
1984
1985 struct tg3_fiber_aneginfo {
1986         int state;
1987 #define ANEG_STATE_UNKNOWN              0
1988 #define ANEG_STATE_AN_ENABLE            1
1989 #define ANEG_STATE_RESTART_INIT         2
1990 #define ANEG_STATE_RESTART              3
1991 #define ANEG_STATE_DISABLE_LINK_OK      4
1992 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1993 #define ANEG_STATE_ABILITY_DETECT       6
1994 #define ANEG_STATE_ACK_DETECT_INIT      7
1995 #define ANEG_STATE_ACK_DETECT           8
1996 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1997 #define ANEG_STATE_COMPLETE_ACK         10
1998 #define ANEG_STATE_IDLE_DETECT_INIT     11
1999 #define ANEG_STATE_IDLE_DETECT          12
2000 #define ANEG_STATE_LINK_OK              13
2001 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2002 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2003
2004         u32 flags;
2005 #define MR_AN_ENABLE            0x00000001
2006 #define MR_RESTART_AN           0x00000002
2007 #define MR_AN_COMPLETE          0x00000004
2008 #define MR_PAGE_RX              0x00000008
2009 #define MR_NP_LOADED            0x00000010
2010 #define MR_TOGGLE_TX            0x00000020
2011 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2012 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2013 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2014 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2015 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2016 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2017 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2018 #define MR_TOGGLE_RX            0x00002000
2019 #define MR_NP_RX                0x00004000
2020
2021 #define MR_LINK_OK              0x80000000
2022
2023         unsigned long link_time, cur_time;
2024
2025         u32 ability_match_cfg;
2026         int ability_match_count;
2027
2028         char ability_match, idle_match, ack_match;
2029
2030         u32 txconfig, rxconfig;
2031 #define ANEG_CFG_NP             0x00000080
2032 #define ANEG_CFG_ACK            0x00000040
2033 #define ANEG_CFG_RF2            0x00000020
2034 #define ANEG_CFG_RF1            0x00000010
2035 #define ANEG_CFG_PS2            0x00000001
2036 #define ANEG_CFG_PS1            0x00008000
2037 #define ANEG_CFG_HD             0x00004000
2038 #define ANEG_CFG_FD             0x00002000
2039 #define ANEG_CFG_INVAL          0x00001f06
2040
2041 };
2042 #define ANEG_OK         0
2043 #define ANEG_DONE       1
2044 #define ANEG_TIMER_ENAB 2
2045 #define ANEG_FAILED     -1
2046
2047 #define ANEG_STATE_SETTLE_TIME  10000
2048
2049 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2050                                    struct tg3_fiber_aneginfo *ap)
2051 {
2052         unsigned long delta;
2053         u32 rx_cfg_reg;
2054         int ret;
2055
2056         if (ap->state == ANEG_STATE_UNKNOWN) {
2057                 ap->rxconfig = 0;
2058                 ap->link_time = 0;
2059                 ap->cur_time = 0;
2060                 ap->ability_match_cfg = 0;
2061                 ap->ability_match_count = 0;
2062                 ap->ability_match = 0;
2063                 ap->idle_match = 0;
2064                 ap->ack_match = 0;
2065         }
2066         ap->cur_time++;
2067
2068         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2069                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2070
2071                 if (rx_cfg_reg != ap->ability_match_cfg) {
2072                         ap->ability_match_cfg = rx_cfg_reg;
2073                         ap->ability_match = 0;
2074                         ap->ability_match_count = 0;
2075                 } else {
2076                         if (++ap->ability_match_count > 1) {
2077                                 ap->ability_match = 1;
2078                                 ap->ability_match_cfg = rx_cfg_reg;
2079                         }
2080                 }
2081                 if (rx_cfg_reg & ANEG_CFG_ACK)
2082                         ap->ack_match = 1;
2083                 else
2084                         ap->ack_match = 0;
2085
2086                 ap->idle_match = 0;
2087         } else {
2088                 ap->idle_match = 1;
2089                 ap->ability_match_cfg = 0;
2090                 ap->ability_match_count = 0;
2091                 ap->ability_match = 0;
2092                 ap->ack_match = 0;
2093
2094                 rx_cfg_reg = 0;
2095         }
2096
2097         ap->rxconfig = rx_cfg_reg;
2098         ret = ANEG_OK;
2099
2100         switch(ap->state) {
2101         case ANEG_STATE_UNKNOWN:
2102                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2103                         ap->state = ANEG_STATE_AN_ENABLE;
2104
2105                 /* fallthru */
2106         case ANEG_STATE_AN_ENABLE:
2107                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2108                 if (ap->flags & MR_AN_ENABLE) {
2109                         ap->link_time = 0;
2110                         ap->cur_time = 0;
2111                         ap->ability_match_cfg = 0;
2112                         ap->ability_match_count = 0;
2113                         ap->ability_match = 0;
2114                         ap->idle_match = 0;
2115                         ap->ack_match = 0;
2116
2117                         ap->state = ANEG_STATE_RESTART_INIT;
2118                 } else {
2119                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2120                 }
2121                 break;
2122
2123         case ANEG_STATE_RESTART_INIT:
2124                 ap->link_time = ap->cur_time;
2125                 ap->flags &= ~(MR_NP_LOADED);
2126                 ap->txconfig = 0;
2127                 tw32(MAC_TX_AUTO_NEG, 0);
2128                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2129                 tw32_f(MAC_MODE, tp->mac_mode);
2130                 udelay(40);
2131
2132                 ret = ANEG_TIMER_ENAB;
2133                 ap->state = ANEG_STATE_RESTART;
2134
2135                 /* fallthru */
2136         case ANEG_STATE_RESTART:
2137                 delta = ap->cur_time - ap->link_time;
2138                 if (delta > ANEG_STATE_SETTLE_TIME) {
2139                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2140                 } else {
2141                         ret = ANEG_TIMER_ENAB;
2142                 }
2143                 break;
2144
2145         case ANEG_STATE_DISABLE_LINK_OK:
2146                 ret = ANEG_DONE;
2147                 break;
2148
2149         case ANEG_STATE_ABILITY_DETECT_INIT:
2150                 ap->flags &= ~(MR_TOGGLE_TX);
2151                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2152                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2153                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2154                 tw32_f(MAC_MODE, tp->mac_mode);
2155                 udelay(40);
2156
2157                 ap->state = ANEG_STATE_ABILITY_DETECT;
2158                 break;
2159
2160         case ANEG_STATE_ABILITY_DETECT:
2161                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2162                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2163                 }
2164                 break;
2165
2166         case ANEG_STATE_ACK_DETECT_INIT:
2167                 ap->txconfig |= ANEG_CFG_ACK;
2168                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2169                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2170                 tw32_f(MAC_MODE, tp->mac_mode);
2171                 udelay(40);
2172
2173                 ap->state = ANEG_STATE_ACK_DETECT;
2174
2175                 /* fallthru */
2176         case ANEG_STATE_ACK_DETECT:
2177                 if (ap->ack_match != 0) {
2178                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2179                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2180                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2181                         } else {
2182                                 ap->state = ANEG_STATE_AN_ENABLE;
2183                         }
2184                 } else if (ap->ability_match != 0 &&
2185                            ap->rxconfig == 0) {
2186                         ap->state = ANEG_STATE_AN_ENABLE;
2187                 }
2188                 break;
2189
2190         case ANEG_STATE_COMPLETE_ACK_INIT:
2191                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2192                         ret = ANEG_FAILED;
2193                         break;
2194                 }
2195                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2196                                MR_LP_ADV_HALF_DUPLEX |
2197                                MR_LP_ADV_SYM_PAUSE |
2198                                MR_LP_ADV_ASYM_PAUSE |
2199                                MR_LP_ADV_REMOTE_FAULT1 |
2200                                MR_LP_ADV_REMOTE_FAULT2 |
2201                                MR_LP_ADV_NEXT_PAGE |
2202                                MR_TOGGLE_RX |
2203                                MR_NP_RX);
2204                 if (ap->rxconfig & ANEG_CFG_FD)
2205                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2206                 if (ap->rxconfig & ANEG_CFG_HD)
2207                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2208                 if (ap->rxconfig & ANEG_CFG_PS1)
2209                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2210                 if (ap->rxconfig & ANEG_CFG_PS2)
2211                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2212                 if (ap->rxconfig & ANEG_CFG_RF1)
2213                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2214                 if (ap->rxconfig & ANEG_CFG_RF2)
2215                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2216                 if (ap->rxconfig & ANEG_CFG_NP)
2217                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2218
2219                 ap->link_time = ap->cur_time;
2220
2221                 ap->flags ^= (MR_TOGGLE_TX);
2222                 if (ap->rxconfig & 0x0008)
2223                         ap->flags |= MR_TOGGLE_RX;
2224                 if (ap->rxconfig & ANEG_CFG_NP)
2225                         ap->flags |= MR_NP_RX;
2226                 ap->flags |= MR_PAGE_RX;
2227
2228                 ap->state = ANEG_STATE_COMPLETE_ACK;
2229                 ret = ANEG_TIMER_ENAB;
2230                 break;
2231
2232         case ANEG_STATE_COMPLETE_ACK:
2233                 if (ap->ability_match != 0 &&
2234                     ap->rxconfig == 0) {
2235                         ap->state = ANEG_STATE_AN_ENABLE;
2236                         break;
2237                 }
2238                 delta = ap->cur_time - ap->link_time;
2239                 if (delta > ANEG_STATE_SETTLE_TIME) {
2240                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2241                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2242                         } else {
2243                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2244                                     !(ap->flags & MR_NP_RX)) {
2245                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2246                                 } else {
2247                                         ret = ANEG_FAILED;
2248                                 }
2249                         }
2250                 }
2251                 break;
2252
2253         case ANEG_STATE_IDLE_DETECT_INIT:
2254                 ap->link_time = ap->cur_time;
2255                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2256                 tw32_f(MAC_MODE, tp->mac_mode);
2257                 udelay(40);
2258
2259                 ap->state = ANEG_STATE_IDLE_DETECT;
2260                 ret = ANEG_TIMER_ENAB;
2261                 break;
2262
2263         case ANEG_STATE_IDLE_DETECT:
2264                 if (ap->ability_match != 0 &&
2265                     ap->rxconfig == 0) {
2266                         ap->state = ANEG_STATE_AN_ENABLE;
2267                         break;
2268                 }
2269                 delta = ap->cur_time - ap->link_time;
2270                 if (delta > ANEG_STATE_SETTLE_TIME) {
2271                         /* XXX another gem from the Broadcom driver :( */
2272                         ap->state = ANEG_STATE_LINK_OK;
2273                 }
2274                 break;
2275
2276         case ANEG_STATE_LINK_OK:
2277                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2278                 ret = ANEG_DONE;
2279                 break;
2280
2281         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2282                 /* ??? unimplemented */
2283                 break;
2284
2285         case ANEG_STATE_NEXT_PAGE_WAIT:
2286                 /* ??? unimplemented */
2287                 break;
2288
2289         default:
2290                 ret = ANEG_FAILED;
2291                 break;
2292         };
2293
2294         return ret;
2295 }
2296
2297 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2298 {
2299         int res = 0;
2300         struct tg3_fiber_aneginfo aninfo;
2301         int status = ANEG_FAILED;
2302         unsigned int tick;
2303         u32 tmp;
2304
2305         tw32_f(MAC_TX_AUTO_NEG, 0);
2306
2307         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2308         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2309         udelay(40);
2310
2311         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2312         udelay(40);
2313
2314         memset(&aninfo, 0, sizeof(aninfo));
2315         aninfo.flags |= MR_AN_ENABLE;
2316         aninfo.state = ANEG_STATE_UNKNOWN;
2317         aninfo.cur_time = 0;
2318         tick = 0;
2319         while (++tick < 195000) {
2320                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2321                 if (status == ANEG_DONE || status == ANEG_FAILED)
2322                         break;
2323
2324                 udelay(1);
2325         }
2326
2327         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2328         tw32_f(MAC_MODE, tp->mac_mode);
2329         udelay(40);
2330
2331         *flags = aninfo.flags;
2332
2333         if (status == ANEG_DONE &&
2334             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2335                              MR_LP_ADV_FULL_DUPLEX)))
2336                 res = 1;
2337
2338         return res;
2339 }
2340
2341 static void tg3_init_bcm8002(struct tg3 *tp)
2342 {
2343         u32 mac_status = tr32(MAC_STATUS);
2344         int i;
2345
2346         /* Reset when initting first time or we have a link. */
2347         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2348             !(mac_status & MAC_STATUS_PCS_SYNCED))
2349                 return;
2350
2351         /* Set PLL lock range. */
2352         tg3_writephy(tp, 0x16, 0x8007);
2353
2354         /* SW reset */
2355         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2356
2357         /* Wait for reset to complete. */
2358         /* XXX schedule_timeout() ... */
2359         for (i = 0; i < 500; i++)
2360                 udelay(10);
2361
2362         /* Config mode; select PMA/Ch 1 regs. */
2363         tg3_writephy(tp, 0x10, 0x8411);
2364
2365         /* Enable auto-lock and comdet, select txclk for tx. */
2366         tg3_writephy(tp, 0x11, 0x0a10);
2367
2368         tg3_writephy(tp, 0x18, 0x00a0);
2369         tg3_writephy(tp, 0x16, 0x41ff);
2370
2371         /* Assert and deassert POR. */
2372         tg3_writephy(tp, 0x13, 0x0400);
2373         udelay(40);
2374         tg3_writephy(tp, 0x13, 0x0000);
2375
2376         tg3_writephy(tp, 0x11, 0x0a50);
2377         udelay(40);
2378         tg3_writephy(tp, 0x11, 0x0a10);
2379
2380         /* Wait for signal to stabilize */
2381         /* XXX schedule_timeout() ... */
2382         for (i = 0; i < 15000; i++)
2383                 udelay(10);
2384
2385         /* Deselect the channel register so we can read the PHYID
2386          * later.
2387          */
2388         tg3_writephy(tp, 0x10, 0x8011);
2389 }
2390
2391 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2392 {
2393         u32 sg_dig_ctrl, sg_dig_status;
2394         u32 serdes_cfg, expected_sg_dig_ctrl;
2395         int workaround, port_a;
2396         int current_link_up;
2397
2398         serdes_cfg = 0;
2399         expected_sg_dig_ctrl = 0;
2400         workaround = 0;
2401         port_a = 1;
2402         current_link_up = 0;
2403
2404         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2405             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2406                 workaround = 1;
2407                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2408                         port_a = 0;
2409
2410                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2411                 /* preserve bits 20-23 for voltage regulator */
2412                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2413         }
2414
2415         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2416
2417         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2418                 if (sg_dig_ctrl & (1 << 31)) {
2419                         if (workaround) {
2420                                 u32 val = serdes_cfg;
2421
2422                                 if (port_a)
2423                                         val |= 0xc010000;
2424                                 else
2425                                         val |= 0x4010000;
2426                                 tw32_f(MAC_SERDES_CFG, val);
2427                         }
2428                         tw32_f(SG_DIG_CTRL, 0x01388400);
2429                 }
2430                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2431                         tg3_setup_flow_control(tp, 0, 0);
2432                         current_link_up = 1;
2433                 }
2434                 goto out;
2435         }
2436
2437         /* Want auto-negotiation.  */
2438         expected_sg_dig_ctrl = 0x81388400;
2439
2440         /* Pause capability */
2441         expected_sg_dig_ctrl |= (1 << 11);
2442
2443         /* Asymettric pause */
2444         expected_sg_dig_ctrl |= (1 << 12);
2445
2446         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2447                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2448                     tp->serdes_counter &&
2449                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2450                                     MAC_STATUS_RCVD_CFG)) ==
2451                      MAC_STATUS_PCS_SYNCED)) {
2452                         tp->serdes_counter--;
2453                         current_link_up = 1;
2454                         goto out;
2455                 }
2456 restart_autoneg:
2457                 if (workaround)
2458                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2459                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2460                 udelay(5);
2461                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2462
2463                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2464                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2465         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2466                                  MAC_STATUS_SIGNAL_DET)) {
2467                 sg_dig_status = tr32(SG_DIG_STATUS);
2468                 mac_status = tr32(MAC_STATUS);
2469
2470                 if ((sg_dig_status & (1 << 1)) &&
2471                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2472                         u32 local_adv, remote_adv;
2473
2474                         local_adv = ADVERTISE_PAUSE_CAP;
2475                         remote_adv = 0;
2476                         if (sg_dig_status & (1 << 19))
2477                                 remote_adv |= LPA_PAUSE_CAP;
2478                         if (sg_dig_status & (1 << 20))
2479                                 remote_adv |= LPA_PAUSE_ASYM;
2480
2481                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2482                         current_link_up = 1;
2483                         tp->serdes_counter = 0;
2484                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2485                 } else if (!(sg_dig_status & (1 << 1))) {
2486                         if (tp->serdes_counter)
2487                                 tp->serdes_counter--;
2488                         else {
2489                                 if (workaround) {
2490                                         u32 val = serdes_cfg;
2491
2492                                         if (port_a)
2493                                                 val |= 0xc010000;
2494                                         else
2495                                                 val |= 0x4010000;
2496
2497                                         tw32_f(MAC_SERDES_CFG, val);
2498                                 }
2499
2500                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2501                                 udelay(40);
2502
2503                                 /* Link parallel detection - link is up */
2504                                 /* only if we have PCS_SYNC and not */
2505                                 /* receiving config code words */
2506                                 mac_status = tr32(MAC_STATUS);
2507                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2508                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2509                                         tg3_setup_flow_control(tp, 0, 0);
2510                                         current_link_up = 1;
2511                                         tp->tg3_flags2 |=
2512                                                 TG3_FLG2_PARALLEL_DETECT;
2513                                         tp->serdes_counter =
2514                                                 SERDES_PARALLEL_DET_TIMEOUT;
2515                                 } else
2516                                         goto restart_autoneg;
2517                         }
2518                 }
2519         } else {
2520                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2521                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2522         }
2523
2524 out:
2525         return current_link_up;
2526 }
2527
2528 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2529 {
2530         int current_link_up = 0;
2531
2532         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2533                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2534                 goto out;
2535         }
2536
2537         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2538                 u32 flags;
2539                 int i;
2540
2541                 if (fiber_autoneg(tp, &flags)) {
2542                         u32 local_adv, remote_adv;
2543
2544                         local_adv = ADVERTISE_PAUSE_CAP;
2545                         remote_adv = 0;
2546                         if (flags & MR_LP_ADV_SYM_PAUSE)
2547                                 remote_adv |= LPA_PAUSE_CAP;
2548                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2549                                 remote_adv |= LPA_PAUSE_ASYM;
2550
2551                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2552
2553                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2554                         current_link_up = 1;
2555                 }
2556                 for (i = 0; i < 30; i++) {
2557                         udelay(20);
2558                         tw32_f(MAC_STATUS,
2559                                (MAC_STATUS_SYNC_CHANGED |
2560                                 MAC_STATUS_CFG_CHANGED));
2561                         udelay(40);
2562                         if ((tr32(MAC_STATUS) &
2563                              (MAC_STATUS_SYNC_CHANGED |
2564                               MAC_STATUS_CFG_CHANGED)) == 0)
2565                                 break;
2566                 }
2567
2568                 mac_status = tr32(MAC_STATUS);
2569                 if (current_link_up == 0 &&
2570                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2571                     !(mac_status & MAC_STATUS_RCVD_CFG))
2572                         current_link_up = 1;
2573         } else {
2574                 /* Forcing 1000FD link up. */
2575                 current_link_up = 1;
2576                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2577
2578                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2579                 udelay(40);
2580         }
2581
2582 out:
2583         return current_link_up;
2584 }
2585
2586 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2587 {
2588         u32 orig_pause_cfg;
2589         u16 orig_active_speed;
2590         u8 orig_active_duplex;
2591         u32 mac_status;
2592         int current_link_up;
2593         int i;
2594
2595         orig_pause_cfg =
2596                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2597                                   TG3_FLAG_TX_PAUSE));
2598         orig_active_speed = tp->link_config.active_speed;
2599         orig_active_duplex = tp->link_config.active_duplex;
2600
2601         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2602             netif_carrier_ok(tp->dev) &&
2603             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2604                 mac_status = tr32(MAC_STATUS);
2605                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2606                                MAC_STATUS_SIGNAL_DET |
2607                                MAC_STATUS_CFG_CHANGED |
2608                                MAC_STATUS_RCVD_CFG);
2609                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2610                                    MAC_STATUS_SIGNAL_DET)) {
2611                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2612                                             MAC_STATUS_CFG_CHANGED));
2613                         return 0;
2614                 }
2615         }
2616
2617         tw32_f(MAC_TX_AUTO_NEG, 0);
2618
2619         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2620         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2621         tw32_f(MAC_MODE, tp->mac_mode);
2622         udelay(40);
2623
2624         if (tp->phy_id == PHY_ID_BCM8002)
2625                 tg3_init_bcm8002(tp);
2626
2627         /* Enable link change event even when serdes polling.  */
2628         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2629         udelay(40);
2630
2631         current_link_up = 0;
2632         mac_status = tr32(MAC_STATUS);
2633
2634         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2635                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2636         else
2637                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2638
2639         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2640         tw32_f(MAC_MODE, tp->mac_mode);
2641         udelay(40);
2642
2643         tp->hw_status->status =
2644                 (SD_STATUS_UPDATED |
2645                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2646
2647         for (i = 0; i < 100; i++) {
2648                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2649                                     MAC_STATUS_CFG_CHANGED));
2650                 udelay(5);
2651                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2652                                          MAC_STATUS_CFG_CHANGED |
2653                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2654                         break;
2655         }
2656
2657         mac_status = tr32(MAC_STATUS);
2658         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2659                 current_link_up = 0;
2660                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2661                     tp->serdes_counter == 0) {
2662                         tw32_f(MAC_MODE, (tp->mac_mode |
2663                                           MAC_MODE_SEND_CONFIGS));
2664                         udelay(1);
2665                         tw32_f(MAC_MODE, tp->mac_mode);
2666                 }
2667         }
2668
2669         if (current_link_up == 1) {
2670                 tp->link_config.active_speed = SPEED_1000;
2671                 tp->link_config.active_duplex = DUPLEX_FULL;
2672                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2673                                     LED_CTRL_LNKLED_OVERRIDE |
2674                                     LED_CTRL_1000MBPS_ON));
2675         } else {
2676                 tp->link_config.active_speed = SPEED_INVALID;
2677                 tp->link_config.active_duplex = DUPLEX_INVALID;
2678                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2679                                     LED_CTRL_LNKLED_OVERRIDE |
2680                                     LED_CTRL_TRAFFIC_OVERRIDE));
2681         }
2682
2683         if (current_link_up != netif_carrier_ok(tp->dev)) {
2684                 if (current_link_up)
2685                         netif_carrier_on(tp->dev);
2686                 else
2687                         netif_carrier_off(tp->dev);
2688                 tg3_link_report(tp);
2689         } else {
2690                 u32 now_pause_cfg =
2691                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2692                                          TG3_FLAG_TX_PAUSE);
2693                 if (orig_pause_cfg != now_pause_cfg ||
2694                     orig_active_speed != tp->link_config.active_speed ||
2695                     orig_active_duplex != tp->link_config.active_duplex)
2696                         tg3_link_report(tp);
2697         }
2698
2699         return 0;
2700 }
2701
2702 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2703 {
2704         int current_link_up, err = 0;
2705         u32 bmsr, bmcr;
2706         u16 current_speed;
2707         u8 current_duplex;
2708
2709         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2710         tw32_f(MAC_MODE, tp->mac_mode);
2711         udelay(40);
2712
2713         tw32(MAC_EVENT, 0);
2714
2715         tw32_f(MAC_STATUS,
2716              (MAC_STATUS_SYNC_CHANGED |
2717               MAC_STATUS_CFG_CHANGED |
2718               MAC_STATUS_MI_COMPLETION |
2719               MAC_STATUS_LNKSTATE_CHANGED));
2720         udelay(40);
2721
2722         if (force_reset)
2723                 tg3_phy_reset(tp);
2724
2725         current_link_up = 0;
2726         current_speed = SPEED_INVALID;
2727         current_duplex = DUPLEX_INVALID;
2728
2729         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2730         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2731         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2732                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2733                         bmsr |= BMSR_LSTATUS;
2734                 else
2735                         bmsr &= ~BMSR_LSTATUS;
2736         }
2737
2738         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2739
2740         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2741             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2742                 /* do nothing, just check for link up at the end */
2743         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2744                 u32 adv, new_adv;
2745
2746                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2747                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2748                                   ADVERTISE_1000XPAUSE |
2749                                   ADVERTISE_1000XPSE_ASYM |
2750                                   ADVERTISE_SLCT);
2751
2752                 /* Always advertise symmetric PAUSE just like copper */
2753                 new_adv |= ADVERTISE_1000XPAUSE;
2754
2755                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2756                         new_adv |= ADVERTISE_1000XHALF;
2757                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2758                         new_adv |= ADVERTISE_1000XFULL;
2759
2760                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2761                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2762                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2763                         tg3_writephy(tp, MII_BMCR, bmcr);
2764
2765                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2766                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2767                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2768
2769                         return err;
2770                 }
2771         } else {
2772                 u32 new_bmcr;
2773
2774                 bmcr &= ~BMCR_SPEED1000;
2775                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2776
2777                 if (tp->link_config.duplex == DUPLEX_FULL)
2778                         new_bmcr |= BMCR_FULLDPLX;
2779
2780                 if (new_bmcr != bmcr) {
2781                         /* BMCR_SPEED1000 is a reserved bit that needs
2782                          * to be set on write.
2783                          */
2784                         new_bmcr |= BMCR_SPEED1000;
2785
2786                         /* Force a linkdown */
2787                         if (netif_carrier_ok(tp->dev)) {
2788                                 u32 adv;
2789
2790                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2791                                 adv &= ~(ADVERTISE_1000XFULL |
2792                                          ADVERTISE_1000XHALF |
2793                                          ADVERTISE_SLCT);
2794                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2795                                 tg3_writephy(tp, MII_BMCR, bmcr |
2796                                                            BMCR_ANRESTART |
2797                                                            BMCR_ANENABLE);
2798                                 udelay(10);
2799                                 netif_carrier_off(tp->dev);
2800                         }
2801                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2802                         bmcr = new_bmcr;
2803                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2804                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2805                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2806                             ASIC_REV_5714) {
2807                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2808                                         bmsr |= BMSR_LSTATUS;
2809                                 else
2810                                         bmsr &= ~BMSR_LSTATUS;
2811                         }
2812                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2813                 }
2814         }
2815
2816         if (bmsr & BMSR_LSTATUS) {
2817                 current_speed = SPEED_1000;
2818                 current_link_up = 1;
2819                 if (bmcr & BMCR_FULLDPLX)
2820                         current_duplex = DUPLEX_FULL;
2821                 else
2822                         current_duplex = DUPLEX_HALF;
2823
2824                 if (bmcr & BMCR_ANENABLE) {
2825                         u32 local_adv, remote_adv, common;
2826
2827                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2828                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2829                         common = local_adv & remote_adv;
2830                         if (common & (ADVERTISE_1000XHALF |
2831                                       ADVERTISE_1000XFULL)) {
2832                                 if (common & ADVERTISE_1000XFULL)
2833                                         current_duplex = DUPLEX_FULL;
2834                                 else
2835                                         current_duplex = DUPLEX_HALF;
2836
2837                                 tg3_setup_flow_control(tp, local_adv,
2838                                                        remote_adv);
2839                         }
2840                         else
2841                                 current_link_up = 0;
2842                 }
2843         }
2844
2845         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2846         if (tp->link_config.active_duplex == DUPLEX_HALF)
2847                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2848
2849         tw32_f(MAC_MODE, tp->mac_mode);
2850         udelay(40);
2851
2852         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2853
2854         tp->link_config.active_speed = current_speed;
2855         tp->link_config.active_duplex = current_duplex;
2856
2857         if (current_link_up != netif_carrier_ok(tp->dev)) {
2858                 if (current_link_up)
2859                         netif_carrier_on(tp->dev);
2860                 else {
2861                         netif_carrier_off(tp->dev);
2862                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2863                 }
2864                 tg3_link_report(tp);
2865         }
2866         return err;
2867 }
2868
2869 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2870 {
2871         if (tp->serdes_counter) {
2872                 /* Give autoneg time to complete. */
2873                 tp->serdes_counter--;
2874                 return;
2875         }
2876         if (!netif_carrier_ok(tp->dev) &&
2877             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2878                 u32 bmcr;
2879
2880                 tg3_readphy(tp, MII_BMCR, &bmcr);
2881                 if (bmcr & BMCR_ANENABLE) {
2882                         u32 phy1, phy2;
2883
2884                         /* Select shadow register 0x1f */
2885                         tg3_writephy(tp, 0x1c, 0x7c00);
2886                         tg3_readphy(tp, 0x1c, &phy1);
2887
2888                         /* Select expansion interrupt status register */
2889                         tg3_writephy(tp, 0x17, 0x0f01);
2890                         tg3_readphy(tp, 0x15, &phy2);
2891                         tg3_readphy(tp, 0x15, &phy2);
2892
2893                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2894                                 /* We have signal detect and not receiving
2895                                  * config code words, link is up by parallel
2896                                  * detection.
2897                                  */
2898
2899                                 bmcr &= ~BMCR_ANENABLE;
2900                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2901                                 tg3_writephy(tp, MII_BMCR, bmcr);
2902                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2903                         }
2904                 }
2905         }
2906         else if (netif_carrier_ok(tp->dev) &&
2907                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2908                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2909                 u32 phy2;
2910
2911                 /* Select expansion interrupt status register */
2912                 tg3_writephy(tp, 0x17, 0x0f01);
2913                 tg3_readphy(tp, 0x15, &phy2);
2914                 if (phy2 & 0x20) {
2915                         u32 bmcr;
2916
2917                         /* Config code words received, turn on autoneg. */
2918                         tg3_readphy(tp, MII_BMCR, &bmcr);
2919                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2920
2921                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2922
2923                 }
2924         }
2925 }
2926
2927 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2928 {
2929         int err;
2930
2931         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2932                 err = tg3_setup_fiber_phy(tp, force_reset);
2933         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2934                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2935         } else {
2936                 err = tg3_setup_copper_phy(tp, force_reset);
2937         }
2938
2939         if (tp->link_config.active_speed == SPEED_1000 &&
2940             tp->link_config.active_duplex == DUPLEX_HALF)
2941                 tw32(MAC_TX_LENGTHS,
2942                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2943                       (6 << TX_LENGTHS_IPG_SHIFT) |
2944                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2945         else
2946                 tw32(MAC_TX_LENGTHS,
2947                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2948                       (6 << TX_LENGTHS_IPG_SHIFT) |
2949                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2950
2951         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2952                 if (netif_carrier_ok(tp->dev)) {
2953                         tw32(HOSTCC_STAT_COAL_TICKS,
2954                              tp->coal.stats_block_coalesce_usecs);
2955                 } else {
2956                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2957                 }
2958         }
2959
2960         return err;
2961 }
2962
2963 /* This is called whenever we suspect that the system chipset is re-
2964  * ordering the sequence of MMIO to the tx send mailbox. The symptom
2965  * is bogus tx completions. We try to recover by setting the
2966  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2967  * in the workqueue.
2968  */
2969 static void tg3_tx_recover(struct tg3 *tp)
2970 {
2971         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
2972                tp->write32_tx_mbox == tg3_write_indirect_mbox);
2973
2974         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
2975                "mapped I/O cycles to the network device, attempting to "
2976                "recover. Please report the problem to the driver maintainer "
2977                "and include system chipset information.\n", tp->dev->name);
2978
2979         spin_lock(&tp->lock);
2980         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
2981         spin_unlock(&tp->lock);
2982 }
2983
2984 static inline u32 tg3_tx_avail(struct tg3 *tp)
2985 {
2986         smp_mb();
2987         return (tp->tx_pending -
2988                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
2989 }
2990
2991 /* Tigon3 never reports partial packet sends.  So we do not
2992  * need special logic to handle SKBs that have not had all
2993  * of their frags sent yet, like SunGEM does.
2994  */
2995 static void tg3_tx(struct tg3 *tp)
2996 {
2997         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2998         u32 sw_idx = tp->tx_cons;
2999
3000         while (sw_idx != hw_idx) {
3001                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3002                 struct sk_buff *skb = ri->skb;
3003                 int i, tx_bug = 0;
3004
3005                 if (unlikely(skb == NULL)) {
3006                         tg3_tx_recover(tp);
3007                         return;
3008                 }
3009
3010                 pci_unmap_single(tp->pdev,
3011                                  pci_unmap_addr(ri, mapping),
3012                                  skb_headlen(skb),
3013                                  PCI_DMA_TODEVICE);
3014
3015                 ri->skb = NULL;
3016
3017                 sw_idx = NEXT_TX(sw_idx);
3018
3019                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3020                         ri = &tp->tx_buffers[sw_idx];
3021                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3022                                 tx_bug = 1;
3023
3024                         pci_unmap_page(tp->pdev,
3025                                        pci_unmap_addr(ri, mapping),
3026                                        skb_shinfo(skb)->frags[i].size,
3027                                        PCI_DMA_TODEVICE);
3028
3029                         sw_idx = NEXT_TX(sw_idx);
3030                 }
3031
3032                 dev_kfree_skb(skb);
3033
3034                 if (unlikely(tx_bug)) {
3035                         tg3_tx_recover(tp);
3036                         return;
3037                 }
3038         }
3039
3040         tp->tx_cons = sw_idx;
3041
3042         /* Need to make the tx_cons update visible to tg3_start_xmit()
3043          * before checking for netif_queue_stopped().  Without the
3044          * memory barrier, there is a small possibility that tg3_start_xmit()
3045          * will miss it and cause the queue to be stopped forever.
3046          */
3047         smp_mb();
3048
3049         if (unlikely(netif_queue_stopped(tp->dev) &&
3050                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) {
3051                 netif_tx_lock(tp->dev);
3052                 if (netif_queue_stopped(tp->dev) &&
3053                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))
3054                         netif_wake_queue(tp->dev);
3055                 netif_tx_unlock(tp->dev);
3056         }
3057 }
3058
3059 /* Returns size of skb allocated or < 0 on error.
3060  *
3061  * We only need to fill in the address because the other members
3062  * of the RX descriptor are invariant, see tg3_init_rings.
3063  *
3064  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3065  * posting buffers we only dirty the first cache line of the RX
3066  * descriptor (containing the address).  Whereas for the RX status
3067  * buffers the cpu only reads the last cacheline of the RX descriptor
3068  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3069  */
3070 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3071                             int src_idx, u32 dest_idx_unmasked)
3072 {
3073         struct tg3_rx_buffer_desc *desc;
3074         struct ring_info *map, *src_map;
3075         struct sk_buff *skb;
3076         dma_addr_t mapping;
3077         int skb_size, dest_idx;
3078
3079         src_map = NULL;
3080         switch (opaque_key) {
3081         case RXD_OPAQUE_RING_STD:
3082                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3083                 desc = &tp->rx_std[dest_idx];
3084                 map = &tp->rx_std_buffers[dest_idx];
3085                 if (src_idx >= 0)
3086                         src_map = &tp->rx_std_buffers[src_idx];
3087                 skb_size = tp->rx_pkt_buf_sz;
3088                 break;
3089
3090         case RXD_OPAQUE_RING_JUMBO:
3091                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3092                 desc = &tp->rx_jumbo[dest_idx];
3093                 map = &tp->rx_jumbo_buffers[dest_idx];
3094                 if (src_idx >= 0)
3095                         src_map = &tp->rx_jumbo_buffers[src_idx];
3096                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3097                 break;
3098
3099         default:
3100                 return -EINVAL;
3101         };
3102
3103         /* Do not overwrite any of the map or rp information
3104          * until we are sure we can commit to a new buffer.
3105          *
3106          * Callers depend upon this behavior and assume that
3107          * we leave everything unchanged if we fail.
3108          */
3109         skb = netdev_alloc_skb(tp->dev, skb_size);
3110         if (skb == NULL)
3111                 return -ENOMEM;
3112
3113         skb_reserve(skb, tp->rx_offset);
3114
3115         mapping = pci_map_single(tp->pdev, skb->data,
3116                                  skb_size - tp->rx_offset,
3117                                  PCI_DMA_FROMDEVICE);
3118
3119         map->skb = skb;
3120         pci_unmap_addr_set(map, mapping, mapping);
3121
3122         if (src_map != NULL)
3123                 src_map->skb = NULL;
3124
3125         desc->addr_hi = ((u64)mapping >> 32);
3126         desc->addr_lo = ((u64)mapping & 0xffffffff);
3127
3128         return skb_size;
3129 }
3130
3131 /* We only need to move over in the address because the other
3132  * members of the RX descriptor are invariant.  See notes above
3133  * tg3_alloc_rx_skb for full details.
3134  */
3135 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3136                            int src_idx, u32 dest_idx_unmasked)
3137 {
3138         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3139         struct ring_info *src_map, *dest_map;
3140         int dest_idx;
3141
3142         switch (opaque_key) {
3143         case RXD_OPAQUE_RING_STD:
3144                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3145                 dest_desc = &tp->rx_std[dest_idx];
3146                 dest_map = &tp->rx_std_buffers[dest_idx];
3147                 src_desc = &tp->rx_std[src_idx];
3148                 src_map = &tp->rx_std_buffers[src_idx];
3149                 break;
3150
3151         case RXD_OPAQUE_RING_JUMBO:
3152                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3153                 dest_desc = &tp->rx_jumbo[dest_idx];
3154                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3155                 src_desc = &tp->rx_jumbo[src_idx];
3156                 src_map = &tp->rx_jumbo_buffers[src_idx];
3157                 break;
3158
3159         default:
3160                 return;
3161         };
3162
3163         dest_map->skb = src_map->skb;
3164         pci_unmap_addr_set(dest_map, mapping,
3165                            pci_unmap_addr(src_map, mapping));
3166         dest_desc->addr_hi = src_desc->addr_hi;
3167         dest_desc->addr_lo = src_desc->addr_lo;
3168
3169         src_map->skb = NULL;
3170 }
3171
3172 #if TG3_VLAN_TAG_USED
3173 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3174 {
3175         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3176 }
3177 #endif
3178
3179 /* The RX ring scheme is composed of multiple rings which post fresh
3180  * buffers to the chip, and one special ring the chip uses to report
3181  * status back to the host.
3182  *
3183  * The special ring reports the status of received packets to the
3184  * host.  The chip does not write into the original descriptor the
3185  * RX buffer was obtained from.  The chip simply takes the original
3186  * descriptor as provided by the host, updates the status and length
3187  * field, then writes this into the next status ring entry.
3188  *
3189  * Each ring the host uses to post buffers to the chip is described
3190  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3191  * it is first placed into the on-chip ram.  When the packet's length
3192  * is known, it walks down the TG3_BDINFO entries to select the ring.
3193  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3194  * which is within the range of the new packet's length is chosen.
3195  *
3196  * The "separate ring for rx status" scheme may sound queer, but it makes
3197  * sense from a cache coherency perspective.  If only the host writes
3198  * to the buffer post rings, and only the chip writes to the rx status
3199  * rings, then cache lines never move beyond shared-modified state.
3200  * If both the host and chip were to write into the same ring, cache line
3201  * eviction could occur since both entities want it in an exclusive state.
3202  */
3203 static int tg3_rx(struct tg3 *tp, int budget)
3204 {
3205         u32 work_mask, rx_std_posted = 0;
3206         u32 sw_idx = tp->rx_rcb_ptr;
3207         u16 hw_idx;
3208         int received;
3209
3210         hw_idx = tp->hw_status->idx[0].rx_producer;
3211         /*
3212          * We need to order the read of hw_idx and the read of
3213          * the opaque cookie.
3214          */
3215         rmb();
3216         work_mask = 0;
3217         received = 0;
3218         while (sw_idx != hw_idx && budget > 0) {
3219                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3220                 unsigned int len;
3221                 struct sk_buff *skb;
3222                 dma_addr_t dma_addr;
3223                 u32 opaque_key, desc_idx, *post_ptr;
3224
3225                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3226                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3227                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3228                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3229                                                   mapping);
3230                         skb = tp->rx_std_buffers[desc_idx].skb;
3231                         post_ptr = &tp->rx_std_ptr;
3232                         rx_std_posted++;
3233                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3234                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3235                                                   mapping);
3236                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3237                         post_ptr = &tp->rx_jumbo_ptr;
3238                 }
3239                 else {
3240                         goto next_pkt_nopost;
3241                 }
3242
3243                 work_mask |= opaque_key;
3244
3245                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3246                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3247                 drop_it:
3248                         tg3_recycle_rx(tp, opaque_key,
3249                                        desc_idx, *post_ptr);
3250                 drop_it_no_recycle:
3251                         /* Other statistics kept track of by card. */
3252                         tp->net_stats.rx_dropped++;
3253                         goto next_pkt;
3254                 }
3255
3256                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3257
3258                 if (len > RX_COPY_THRESHOLD
3259                         && tp->rx_offset == 2
3260                         /* rx_offset != 2 iff this is a 5701 card running
3261                          * in PCI-X mode [see tg3_get_invariants()] */
3262                 ) {
3263                         int skb_size;
3264
3265                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3266                                                     desc_idx, *post_ptr);
3267                         if (skb_size < 0)
3268                                 goto drop_it;
3269
3270                         pci_unmap_single(tp->pdev, dma_addr,
3271                                          skb_size - tp->rx_offset,
3272                                          PCI_DMA_FROMDEVICE);
3273
3274                         skb_put(skb, len);
3275                 } else {
3276                         struct sk_buff *copy_skb;
3277
3278                         tg3_recycle_rx(tp, opaque_key,
3279                                        desc_idx, *post_ptr);
3280
3281                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3282                         if (copy_skb == NULL)
3283                                 goto drop_it_no_recycle;
3284
3285                         skb_reserve(copy_skb, 2);
3286                         skb_put(copy_skb, len);
3287                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3288                         memcpy(copy_skb->data, skb->data, len);
3289                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3290
3291                         /* We'll reuse the original ring buffer. */
3292                         skb = copy_skb;
3293                 }
3294
3295                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3296                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3297                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3298                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3299                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3300                 else
3301                         skb->ip_summed = CHECKSUM_NONE;
3302
3303                 skb->protocol = eth_type_trans(skb, tp->dev);
3304 #if TG3_VLAN_TAG_USED
3305                 if (tp->vlgrp != NULL &&
3306                     desc->type_flags & RXD_FLAG_VLAN) {
3307                         tg3_vlan_rx(tp, skb,
3308                                     desc->err_vlan & RXD_VLAN_MASK);
3309                 } else
3310 #endif
3311                         netif_receive_skb(skb);
3312
3313                 tp->dev->last_rx = jiffies;
3314                 received++;
3315                 budget--;
3316
3317 next_pkt:
3318                 (*post_ptr)++;
3319
3320                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3321                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3322
3323                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3324                                      TG3_64BIT_REG_LOW, idx);
3325                         work_mask &= ~RXD_OPAQUE_RING_STD;
3326                         rx_std_posted = 0;
3327                 }
3328 next_pkt_nopost:
3329                 sw_idx++;
3330                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3331
3332                 /* Refresh hw_idx to see if there is new work */
3333                 if (sw_idx == hw_idx) {
3334                         hw_idx = tp->hw_status->idx[0].rx_producer;
3335                         rmb();
3336                 }
3337         }
3338
3339         /* ACK the status ring. */
3340         tp->rx_rcb_ptr = sw_idx;
3341         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3342
3343         /* Refill RX ring(s). */
3344         if (work_mask & RXD_OPAQUE_RING_STD) {
3345                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3346                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3347                              sw_idx);
3348         }
3349         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3350                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3351                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3352                              sw_idx);
3353         }
3354         mmiowb();
3355
3356         return received;
3357 }
3358
3359 static int tg3_poll(struct net_device *netdev, int *budget)
3360 {
3361         struct tg3 *tp = netdev_priv(netdev);
3362         struct tg3_hw_status *sblk = tp->hw_status;
3363         int done;
3364
3365         /* handle link change and other phy events */
3366         if (!(tp->tg3_flags &
3367               (TG3_FLAG_USE_LINKCHG_REG |
3368                TG3_FLAG_POLL_SERDES))) {
3369                 if (sblk->status & SD_STATUS_LINK_CHG) {
3370                         sblk->status = SD_STATUS_UPDATED |
3371                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3372                         spin_lock(&tp->lock);
3373                         tg3_setup_phy(tp, 0);
3374                         spin_unlock(&tp->lock);
3375                 }
3376         }
3377
3378         /* run TX completion thread */
3379         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3380                 tg3_tx(tp);
3381                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3382                         netif_rx_complete(netdev);
3383                         schedule_work(&tp->reset_task);
3384                         return 0;
3385                 }
3386         }
3387
3388         /* run RX thread, within the bounds set by NAPI.
3389          * All RX "locking" is done by ensuring outside
3390          * code synchronizes with dev->poll()
3391          */
3392         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3393                 int orig_budget = *budget;
3394                 int work_done;
3395
3396                 if (orig_budget > netdev->quota)
3397                         orig_budget = netdev->quota;
3398
3399                 work_done = tg3_rx(tp, orig_budget);
3400
3401                 *budget -= work_done;
3402                 netdev->quota -= work_done;
3403         }
3404
3405         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3406                 tp->last_tag = sblk->status_tag;
3407                 rmb();
3408         } else
3409                 sblk->status &= ~SD_STATUS_UPDATED;
3410
3411         /* if no more work, tell net stack and NIC we're done */
3412         done = !tg3_has_work(tp);
3413         if (done) {
3414                 netif_rx_complete(netdev);
3415                 tg3_restart_ints(tp);
3416         }
3417
3418         return (done ? 0 : 1);
3419 }
3420
3421 static void tg3_irq_quiesce(struct tg3 *tp)
3422 {
3423         BUG_ON(tp->irq_sync);
3424
3425         tp->irq_sync = 1;
3426         smp_mb();
3427
3428         synchronize_irq(tp->pdev->irq);
3429 }
3430
3431 static inline int tg3_irq_sync(struct tg3 *tp)
3432 {
3433         return tp->irq_sync;
3434 }
3435
3436 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3437  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3438  * with as well.  Most of the time, this is not necessary except when
3439  * shutting down the device.
3440  */
3441 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3442 {
3443         if (irq_sync)
3444                 tg3_irq_quiesce(tp);
3445         spin_lock_bh(&tp->lock);
3446 }
3447
3448 static inline void tg3_full_unlock(struct tg3 *tp)
3449 {
3450         spin_unlock_bh(&tp->lock);
3451 }
3452
3453 /* One-shot MSI handler - Chip automatically disables interrupt
3454  * after sending MSI so driver doesn't have to do it.
3455  */
3456 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3457 {
3458         struct net_device *dev = dev_id;
3459         struct tg3 *tp = netdev_priv(dev);
3460
3461         prefetch(tp->hw_status);
3462         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3463
3464         if (likely(!tg3_irq_sync(tp)))
3465                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3466
3467         return IRQ_HANDLED;
3468 }
3469
3470 /* MSI ISR - No need to check for interrupt sharing and no need to
3471  * flush status block and interrupt mailbox. PCI ordering rules
3472  * guarantee that MSI will arrive after the status block.
3473  */
3474 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3475 {
3476         struct net_device *dev = dev_id;
3477         struct tg3 *tp = netdev_priv(dev);
3478
3479         prefetch(tp->hw_status);
3480         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3481         /*
3482          * Writing any value to intr-mbox-0 clears PCI INTA# and
3483          * chip-internal interrupt pending events.
3484          * Writing non-zero to intr-mbox-0 additional tells the
3485          * NIC to stop sending us irqs, engaging "in-intr-handler"
3486          * event coalescing.
3487          */
3488         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3489         if (likely(!tg3_irq_sync(tp)))
3490                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3491
3492         return IRQ_RETVAL(1);
3493 }
3494
3495 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3496 {
3497         struct net_device *dev = dev_id;
3498         struct tg3 *tp = netdev_priv(dev);
3499         struct tg3_hw_status *sblk = tp->hw_status;
3500         unsigned int handled = 1;
3501
3502         /* In INTx mode, it is possible for the interrupt to arrive at
3503          * the CPU before the status block posted prior to the interrupt.
3504          * Reading the PCI State register will confirm whether the
3505          * interrupt is ours and will flush the status block.
3506          */
3507         if ((sblk->status & SD_STATUS_UPDATED) ||
3508             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3509                 /*
3510                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3511                  * chip-internal interrupt pending events.
3512                  * Writing non-zero to intr-mbox-0 additional tells the
3513                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3514                  * event coalescing.
3515                  */
3516                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3517                              0x00000001);
3518                 if (tg3_irq_sync(tp))
3519                         goto out;
3520                 sblk->status &= ~SD_STATUS_UPDATED;
3521                 if (likely(tg3_has_work(tp))) {
3522                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3523                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3524                 } else {
3525                         /* No work, shared interrupt perhaps?  re-enable
3526                          * interrupts, and flush that PCI write
3527                          */
3528                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3529                                 0x00000000);
3530                 }
3531         } else {        /* shared interrupt */
3532                 handled = 0;
3533         }
3534 out:
3535         return IRQ_RETVAL(handled);
3536 }
3537
3538 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3539 {
3540         struct net_device *dev = dev_id;
3541         struct tg3 *tp = netdev_priv(dev);
3542         struct tg3_hw_status *sblk = tp->hw_status;
3543         unsigned int handled = 1;
3544
3545         /* In INTx mode, it is possible for the interrupt to arrive at
3546          * the CPU before the status block posted prior to the interrupt.
3547          * Reading the PCI State register will confirm whether the
3548          * interrupt is ours and will flush the status block.
3549          */
3550         if ((sblk->status_tag != tp->last_tag) ||
3551             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3552                 /*
3553                  * writing any value to intr-mbox-0 clears PCI INTA# and
3554                  * chip-internal interrupt pending events.
3555                  * writing non-zero to intr-mbox-0 additional tells the
3556                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3557                  * event coalescing.
3558                  */
3559                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3560                              0x00000001);
3561                 if (tg3_irq_sync(tp))
3562                         goto out;
3563                 if (netif_rx_schedule_prep(dev)) {
3564                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3565                         /* Update last_tag to mark that this status has been
3566                          * seen. Because interrupt may be shared, we may be
3567                          * racing with tg3_poll(), so only update last_tag
3568                          * if tg3_poll() is not scheduled.
3569                          */
3570                         tp->last_tag = sblk->status_tag;
3571                         __netif_rx_schedule(dev);
3572                 }
3573         } else {        /* shared interrupt */
3574                 handled = 0;
3575         }
3576 out:
3577         return IRQ_RETVAL(handled);
3578 }
3579
3580 /* ISR for interrupt test */
3581 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3582                 struct pt_regs *regs)
3583 {
3584         struct net_device *dev = dev_id;
3585         struct tg3 *tp = netdev_priv(dev);
3586         struct tg3_hw_status *sblk = tp->hw_status;
3587
3588         if ((sblk->status & SD_STATUS_UPDATED) ||
3589             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3590                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3591                              0x00000001);
3592                 return IRQ_RETVAL(1);
3593         }
3594         return IRQ_RETVAL(0);
3595 }
3596
3597 static int tg3_init_hw(struct tg3 *, int);
3598 static int tg3_halt(struct tg3 *, int, int);
3599
3600 /* Restart hardware after configuration changes, self-test, etc.
3601  * Invoked with tp->lock held.
3602  */
3603 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3604 {
3605         int err;
3606
3607         err = tg3_init_hw(tp, reset_phy);
3608         if (err) {
3609                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3610                        "aborting.\n", tp->dev->name);
3611                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3612                 tg3_full_unlock(tp);
3613                 del_timer_sync(&tp->timer);
3614                 tp->irq_sync = 0;
3615                 netif_poll_enable(tp->dev);
3616                 dev_close(tp->dev);
3617                 tg3_full_lock(tp, 0);
3618         }
3619         return err;
3620 }
3621
3622 #ifdef CONFIG_NET_POLL_CONTROLLER
3623 static void tg3_poll_controller(struct net_device *dev)
3624 {
3625         struct tg3 *tp = netdev_priv(dev);
3626
3627         tg3_interrupt(tp->pdev->irq, dev, NULL);
3628 }
3629 #endif
3630
3631 static void tg3_reset_task(void *_data)
3632 {
3633         struct tg3 *tp = _data;
3634         unsigned int restart_timer;
3635
3636         tg3_full_lock(tp, 0);
3637         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3638
3639         if (!netif_running(tp->dev)) {
3640                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3641                 tg3_full_unlock(tp);
3642                 return;
3643         }
3644
3645         tg3_full_unlock(tp);
3646
3647         tg3_netif_stop(tp);
3648
3649         tg3_full_lock(tp, 1);
3650
3651         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3652         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3653
3654         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3655                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3656                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3657                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3658                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3659         }
3660
3661         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3662         if (tg3_init_hw(tp, 1))
3663                 goto out;
3664
3665         tg3_netif_start(tp);
3666
3667         if (restart_timer)
3668                 mod_timer(&tp->timer, jiffies + 1);
3669
3670 out:
3671         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3672
3673         tg3_full_unlock(tp);
3674 }
3675
3676 static void tg3_tx_timeout(struct net_device *dev)
3677 {
3678         struct tg3 *tp = netdev_priv(dev);
3679
3680         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3681                dev->name);
3682
3683         schedule_work(&tp->reset_task);
3684 }
3685
3686 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3687 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3688 {
3689         u32 base = (u32) mapping & 0xffffffff;
3690
3691         return ((base > 0xffffdcc0) &&
3692                 (base + len + 8 < base));
3693 }
3694
3695 /* Test for DMA addresses > 40-bit */
3696 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3697                                           int len)
3698 {
3699 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3700         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3701                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3702         return 0;
3703 #else
3704         return 0;
3705 #endif
3706 }
3707
3708 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3709
3710 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3711 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3712                                        u32 last_plus_one, u32 *start,
3713                                        u32 base_flags, u32 mss)
3714 {
3715         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3716         dma_addr_t new_addr = 0;
3717         u32 entry = *start;
3718         int i, ret = 0;
3719
3720         if (!new_skb) {
3721                 ret = -1;
3722         } else {
3723                 /* New SKB is guaranteed to be linear. */
3724                 entry = *start;
3725                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3726                                           PCI_DMA_TODEVICE);
3727                 /* Make sure new skb does not cross any 4G boundaries.
3728                  * Drop the packet if it does.
3729                  */
3730                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3731                         ret = -1;
3732                         dev_kfree_skb(new_skb);
3733                         new_skb = NULL;
3734                 } else {
3735                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3736                                     base_flags, 1 | (mss << 1));
3737                         *start = NEXT_TX(entry);
3738                 }
3739         }
3740
3741         /* Now clean up the sw ring entries. */
3742         i = 0;
3743         while (entry != last_plus_one) {
3744                 int len;
3745
3746                 if (i == 0)
3747                         len = skb_headlen(skb);
3748                 else
3749                         len = skb_shinfo(skb)->frags[i-1].size;
3750                 pci_unmap_single(tp->pdev,
3751                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3752                                  len, PCI_DMA_TODEVICE);
3753                 if (i == 0) {
3754                         tp->tx_buffers[entry].skb = new_skb;
3755                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3756                 } else {
3757                         tp->tx_buffers[entry].skb = NULL;
3758                 }
3759                 entry = NEXT_TX(entry);
3760                 i++;
3761         }
3762
3763         dev_kfree_skb(skb);
3764
3765         return ret;
3766 }
3767
3768 static void tg3_set_txd(struct tg3 *tp, int entry,
3769                         dma_addr_t mapping, int len, u32 flags,
3770                         u32 mss_and_is_end)
3771 {
3772         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3773         int is_end = (mss_and_is_end & 0x1);
3774         u32 mss = (mss_and_is_end >> 1);
3775         u32 vlan_tag = 0;
3776
3777         if (is_end)
3778                 flags |= TXD_FLAG_END;
3779         if (flags & TXD_FLAG_VLAN) {
3780                 vlan_tag = flags >> 16;
3781                 flags &= 0xffff;
3782         }
3783         vlan_tag |= (mss << TXD_MSS_SHIFT);
3784
3785         txd->addr_hi = ((u64) mapping >> 32);
3786         txd->addr_lo = ((u64) mapping & 0xffffffff);
3787         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3788         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3789 }
3790
3791 /* hard_start_xmit for devices that don't have any bugs and
3792  * support TG3_FLG2_HW_TSO_2 only.
3793  */
3794 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3795 {
3796         struct tg3 *tp = netdev_priv(dev);
3797         dma_addr_t mapping;
3798         u32 len, entry, base_flags, mss;
3799
3800         len = skb_headlen(skb);
3801
3802         /* We are running in BH disabled context with netif_tx_lock
3803          * and TX reclaim runs via tp->poll inside of a software
3804          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3805          * no IRQ context deadlocks to worry about either.  Rejoice!
3806          */
3807         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3808                 if (!netif_queue_stopped(dev)) {
3809                         netif_stop_queue(dev);
3810
3811                         /* This is a hard error, log it. */
3812                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3813                                "queue awake!\n", dev->name);
3814                 }
3815                 return NETDEV_TX_BUSY;
3816         }
3817
3818         entry = tp->tx_prod;
3819         base_flags = 0;
3820 #if TG3_TSO_SUPPORT != 0
3821         mss = 0;
3822         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3823             (mss = skb_shinfo(skb)->gso_size) != 0) {
3824                 int tcp_opt_len, ip_tcp_len;
3825
3826                 if (skb_header_cloned(skb) &&
3827                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3828                         dev_kfree_skb(skb);
3829                         goto out_unlock;
3830                 }
3831
3832                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3833                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3834                 else {
3835                         tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3836                         ip_tcp_len = (skb->nh.iph->ihl * 4) +
3837                                      sizeof(struct tcphdr);
3838
3839                         skb->nh.iph->check = 0;
3840                         skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
3841                                                      tcp_opt_len);
3842                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
3843                 }
3844
3845                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3846                                TXD_FLAG_CPU_POST_DMA);
3847
3848                 skb->h.th->check = 0;
3849
3850         }
3851         else if (skb->ip_summed == CHECKSUM_PARTIAL)
3852                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3853 #else
3854         mss = 0;
3855         if (skb->ip_summed == CHECKSUM_PARTIAL)
3856                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3857 #endif
3858 #if TG3_VLAN_TAG_USED
3859         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3860                 base_flags |= (TXD_FLAG_VLAN |
3861                                (vlan_tx_tag_get(skb) << 16));
3862 #endif
3863
3864         /* Queue skb data, a.k.a. the main skb fragment. */
3865         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3866
3867         tp->tx_buffers[entry].skb = skb;
3868         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3869
3870         tg3_set_txd(tp, entry, mapping, len, base_flags,
3871                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3872
3873         entry = NEXT_TX(entry);
3874
3875         /* Now loop through additional data fragments, and queue them. */
3876         if (skb_shinfo(skb)->nr_frags > 0) {
3877                 unsigned int i, last;
3878
3879                 last = skb_shinfo(skb)->nr_frags - 1;
3880                 for (i = 0; i <= last; i++) {
3881                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3882
3883                         len = frag->size;
3884                         mapping = pci_map_page(tp->pdev,
3885                                                frag->page,
3886                                                frag->page_offset,
3887                                                len, PCI_DMA_TODEVICE);
3888
3889                         tp->tx_buffers[entry].skb = NULL;
3890                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3891
3892                         tg3_set_txd(tp, entry, mapping, len,
3893                                     base_flags, (i == last) | (mss << 1));
3894
3895                         entry = NEXT_TX(entry);
3896                 }
3897         }
3898
3899         /* Packets are ready, update Tx producer idx local and on card. */
3900         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3901
3902         tp->tx_prod = entry;
3903         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
3904                 netif_stop_queue(dev);
3905                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
3906                         netif_wake_queue(tp->dev);
3907         }
3908
3909 out_unlock:
3910         mmiowb();
3911
3912         dev->trans_start = jiffies;
3913
3914         return NETDEV_TX_OK;
3915 }
3916
3917 #if TG3_TSO_SUPPORT != 0
3918 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3919
3920 /* Use GSO to workaround a rare TSO bug that may be triggered when the
3921  * TSO header is greater than 80 bytes.
3922  */
3923 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3924 {
3925         struct sk_buff *segs, *nskb;
3926
3927         /* Estimate the number of fragments in the worst case */
3928         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3929                 netif_stop_queue(tp->dev);
3930                 return NETDEV_TX_BUSY;
3931         }
3932
3933         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3934         if (unlikely(IS_ERR(segs)))
3935                 goto tg3_tso_bug_end;
3936
3937         do {
3938                 nskb = segs;
3939                 segs = segs->next;
3940                 nskb->next = NULL;
3941                 tg3_start_xmit_dma_bug(nskb, tp->dev);
3942         } while (segs);
3943
3944 tg3_tso_bug_end:
3945         dev_kfree_skb(skb);
3946
3947         return NETDEV_TX_OK;
3948 }
3949 #endif
3950
3951 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3952  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3953  */
3954 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3955 {
3956         struct tg3 *tp = netdev_priv(dev);
3957         dma_addr_t mapping;
3958         u32 len, entry, base_flags, mss;
3959         int would_hit_hwbug;
3960
3961         len = skb_headlen(skb);
3962
3963         /* We are running in BH disabled context with netif_tx_lock
3964          * and TX reclaim runs via tp->poll inside of a software
3965          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3966          * no IRQ context deadlocks to worry about either.  Rejoice!
3967          */
3968         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3969                 if (!netif_queue_stopped(dev)) {
3970                         netif_stop_queue(dev);
3971
3972                         /* This is a hard error, log it. */
3973                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3974                                "queue awake!\n", dev->name);
3975                 }
3976                 return NETDEV_TX_BUSY;
3977         }
3978
3979         entry = tp->tx_prod;
3980         base_flags = 0;
3981         if (skb->ip_summed == CHECKSUM_PARTIAL)
3982                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3983 #if TG3_TSO_SUPPORT != 0
3984         mss = 0;
3985         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3986             (mss = skb_shinfo(skb)->gso_size) != 0) {
3987                 int tcp_opt_len, ip_tcp_len, hdr_len;
3988
3989                 if (skb_header_cloned(skb) &&
3990                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3991                         dev_kfree_skb(skb);
3992                         goto out_unlock;
3993                 }
3994
3995                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3996                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3997
3998                 hdr_len = ip_tcp_len + tcp_opt_len;
3999                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4000                              (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
4001                         return (tg3_tso_bug(tp, skb));
4002
4003                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4004                                TXD_FLAG_CPU_POST_DMA);
4005
4006                 skb->nh.iph->check = 0;
4007                 skb->nh.iph->tot_len = htons(mss + hdr_len);
4008                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4009                         skb->h.th->check = 0;
4010                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4011                 }
4012                 else {
4013                         skb->h.th->check =
4014                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4015                                                    skb->nh.iph->daddr,
4016                                                    0, IPPROTO_TCP, 0);
4017                 }
4018
4019                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4020                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4021                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4022                                 int tsflags;
4023
4024                                 tsflags = ((skb->nh.iph->ihl - 5) +
4025                                            (tcp_opt_len >> 2));
4026                                 mss |= (tsflags << 11);
4027                         }
4028                 } else {
4029                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4030                                 int tsflags;
4031
4032                                 tsflags = ((skb->nh.iph->ihl - 5) +
4033                                            (tcp_opt_len >> 2));
4034                                 base_flags |= tsflags << 12;
4035                         }
4036                 }
4037         }
4038 #else
4039         mss = 0;
4040 #endif
4041 #if TG3_VLAN_TAG_USED
4042         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4043                 base_flags |= (TXD_FLAG_VLAN |
4044                                (vlan_tx_tag_get(skb) << 16));
4045 #endif
4046
4047         /* Queue skb data, a.k.a. the main skb fragment. */
4048         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4049
4050         tp->tx_buffers[entry].skb = skb;
4051         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4052
4053         would_hit_hwbug = 0;
4054
4055         if (tg3_4g_overflow_test(mapping, len))
4056                 would_hit_hwbug = 1;
4057
4058         tg3_set_txd(tp, entry, mapping, len, base_flags,
4059                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4060
4061         entry = NEXT_TX(entry);
4062
4063         /* Now loop through additional data fragments, and queue them. */
4064         if (skb_shinfo(skb)->nr_frags > 0) {
4065                 unsigned int i, last;
4066
4067                 last = skb_shinfo(skb)->nr_frags - 1;
4068                 for (i = 0; i <= last; i++) {
4069                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4070
4071                         len = frag->size;
4072                         mapping = pci_map_page(tp->pdev,
4073                                                frag->page,
4074                                                frag->page_offset,
4075                                                len, PCI_DMA_TODEVICE);
4076
4077                         tp->tx_buffers[entry].skb = NULL;
4078                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4079
4080                         if (tg3_4g_overflow_test(mapping, len))
4081                                 would_hit_hwbug = 1;
4082
4083                         if (tg3_40bit_overflow_test(tp, mapping, len))
4084                                 would_hit_hwbug = 1;
4085
4086                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4087                                 tg3_set_txd(tp, entry, mapping, len,
4088                                             base_flags, (i == last)|(mss << 1));
4089                         else
4090                                 tg3_set_txd(tp, entry, mapping, len,
4091                                             base_flags, (i == last));
4092
4093                         entry = NEXT_TX(entry);
4094                 }
4095         }
4096
4097         if (would_hit_hwbug) {
4098                 u32 last_plus_one = entry;
4099                 u32 start;
4100
4101                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4102                 start &= (TG3_TX_RING_SIZE - 1);
4103
4104                 /* If the workaround fails due to memory/mapping
4105                  * failure, silently drop this packet.
4106                  */
4107                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4108                                                 &start, base_flags, mss))
4109                         goto out_unlock;
4110
4111                 entry = start;
4112         }
4113
4114         /* Packets are ready, update Tx producer idx local and on card. */
4115         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4116
4117         tp->tx_prod = entry;
4118         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4119                 netif_stop_queue(dev);
4120                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
4121                         netif_wake_queue(tp->dev);
4122         }
4123
4124 out_unlock:
4125         mmiowb();
4126
4127         dev->trans_start = jiffies;
4128
4129         return NETDEV_TX_OK;
4130 }
4131
4132 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4133                                int new_mtu)
4134 {
4135         dev->mtu = new_mtu;
4136
4137         if (new_mtu > ETH_DATA_LEN) {
4138                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4139                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4140                         ethtool_op_set_tso(dev, 0);
4141                 }
4142                 else
4143                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4144         } else {
4145                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4146                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4147                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4148         }
4149 }
4150
4151 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4152 {
4153         struct tg3 *tp = netdev_priv(dev);
4154         int err;
4155
4156         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4157                 return -EINVAL;
4158
4159         if (!netif_running(dev)) {
4160                 /* We'll just catch it later when the
4161                  * device is up'd.
4162                  */
4163                 tg3_set_mtu(dev, tp, new_mtu);
4164                 return 0;
4165         }
4166
4167         tg3_netif_stop(tp);
4168
4169         tg3_full_lock(tp, 1);
4170
4171         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4172
4173         tg3_set_mtu(dev, tp, new_mtu);
4174
4175         err = tg3_restart_hw(tp, 0);
4176
4177         if (!err)
4178                 tg3_netif_start(tp);
4179
4180         tg3_full_unlock(tp);
4181
4182         return err;
4183 }
4184
4185 /* Free up pending packets in all rx/tx rings.
4186  *
4187  * The chip has been shut down and the driver detached from
4188  * the networking, so no interrupts or new tx packets will
4189  * end up in the driver.  tp->{tx,}lock is not held and we are not
4190  * in an interrupt context and thus may sleep.
4191  */
4192 static void tg3_free_rings(struct tg3 *tp)
4193 {
4194         struct ring_info *rxp;
4195         int i;
4196
4197         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4198                 rxp = &tp->rx_std_buffers[i];
4199
4200                 if (rxp->skb == NULL)
4201                         continue;
4202                 pci_unmap_single(tp->pdev,
4203                                  pci_unmap_addr(rxp, mapping),
4204                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4205                                  PCI_DMA_FROMDEVICE);
4206                 dev_kfree_skb_any(rxp->skb);
4207                 rxp->skb = NULL;
4208         }
4209
4210         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4211                 rxp = &tp->rx_jumbo_buffers[i];
4212
4213                 if (rxp->skb == NULL)
4214                         continue;
4215                 pci_unmap_single(tp->pdev,
4216                                  pci_unmap_addr(rxp, mapping),
4217                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4218                                  PCI_DMA_FROMDEVICE);
4219                 dev_kfree_skb_any(rxp->skb);
4220                 rxp->skb = NULL;
4221         }
4222
4223         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4224                 struct tx_ring_info *txp;
4225                 struct sk_buff *skb;
4226                 int j;
4227
4228                 txp = &tp->tx_buffers[i];
4229                 skb = txp->skb;
4230
4231                 if (skb == NULL) {
4232                         i++;
4233                         continue;
4234                 }
4235
4236                 pci_unmap_single(tp->pdev,
4237                                  pci_unmap_addr(txp, mapping),
4238                                  skb_headlen(skb),
4239                                  PCI_DMA_TODEVICE);
4240                 txp->skb = NULL;
4241
4242                 i++;
4243
4244                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4245                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4246                         pci_unmap_page(tp->pdev,
4247                                        pci_unmap_addr(txp, mapping),
4248                                        skb_shinfo(skb)->frags[j].size,
4249                                        PCI_DMA_TODEVICE);
4250                         i++;
4251                 }
4252
4253                 dev_kfree_skb_any(skb);
4254         }
4255 }
4256
4257 /* Initialize tx/rx rings for packet processing.
4258  *
4259  * The chip has been shut down and the driver detached from
4260  * the networking, so no interrupts or new tx packets will
4261  * end up in the driver.  tp->{tx,}lock are held and thus
4262  * we may not sleep.
4263  */
4264 static int tg3_init_rings(struct tg3 *tp)
4265 {
4266         u32 i;
4267
4268         /* Free up all the SKBs. */
4269         tg3_free_rings(tp);
4270
4271         /* Zero out all descriptors. */
4272         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4273         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4274         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4275         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4276
4277         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4278         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4279             (tp->dev->mtu > ETH_DATA_LEN))
4280                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4281
4282         /* Initialize invariants of the rings, we only set this
4283          * stuff once.  This works because the card does not
4284          * write into the rx buffer posting rings.
4285          */
4286         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4287                 struct tg3_rx_buffer_desc *rxd;
4288
4289                 rxd = &tp->rx_std[i];
4290                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4291                         << RXD_LEN_SHIFT;
4292                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4293                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4294                                (i << RXD_OPAQUE_INDEX_SHIFT));
4295         }
4296
4297         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4298                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4299                         struct tg3_rx_buffer_desc *rxd;
4300
4301                         rxd = &tp->rx_jumbo[i];
4302                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4303                                 << RXD_LEN_SHIFT;
4304                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4305                                 RXD_FLAG_JUMBO;
4306                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4307                                (i << RXD_OPAQUE_INDEX_SHIFT));
4308                 }
4309         }
4310
4311         /* Now allocate fresh SKBs for each rx ring. */
4312         for (i = 0; i < tp->rx_pending; i++) {
4313                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4314                         printk(KERN_WARNING PFX
4315                                "%s: Using a smaller RX standard ring, "
4316                                "only %d out of %d buffers were allocated "
4317                                "successfully.\n",
4318                                tp->dev->name, i, tp->rx_pending);
4319                         if (i == 0)
4320                                 return -ENOMEM;
4321                         tp->rx_pending = i;
4322                         break;
4323                 }
4324         }
4325
4326         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4327                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4328                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4329                                              -1, i) < 0) {
4330                                 printk(KERN_WARNING PFX
4331                                        "%s: Using a smaller RX jumbo ring, "
4332                                        "only %d out of %d buffers were "
4333                                        "allocated successfully.\n",
4334                                        tp->dev->name, i, tp->rx_jumbo_pending);
4335                                 if (i == 0) {
4336                                         tg3_free_rings(tp);
4337                                         return -ENOMEM;
4338                                 }
4339                                 tp->rx_jumbo_pending = i;
4340                                 break;
4341                         }
4342                 }
4343         }
4344         return 0;
4345 }
4346
4347 /*
4348  * Must not be invoked with interrupt sources disabled and
4349  * the hardware shutdown down.
4350  */
4351 static void tg3_free_consistent(struct tg3 *tp)
4352 {
4353         kfree(tp->rx_std_buffers);
4354         tp->rx_std_buffers = NULL;
4355         if (tp->rx_std) {
4356                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4357                                     tp->rx_std, tp->rx_std_mapping);
4358                 tp->rx_std = NULL;
4359         }
4360         if (tp->rx_jumbo) {
4361                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4362                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4363                 tp->rx_jumbo = NULL;
4364         }
4365         if (tp->rx_rcb) {
4366                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4367                                     tp->rx_rcb, tp->rx_rcb_mapping);
4368                 tp->rx_rcb = NULL;
4369         }
4370         if (tp->tx_ring) {
4371                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4372                         tp->tx_ring, tp->tx_desc_mapping);
4373                 tp->tx_ring = NULL;
4374         }
4375         if (tp->hw_status) {
4376                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4377                                     tp->hw_status, tp->status_mapping);
4378                 tp->hw_status = NULL;
4379         }
4380         if (tp->hw_stats) {
4381                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4382                                     tp->hw_stats, tp->stats_mapping);
4383                 tp->hw_stats = NULL;
4384         }
4385 }
4386
4387 /*
4388  * Must not be invoked with interrupt sources disabled and
4389  * the hardware shutdown down.  Can sleep.
4390  */
4391 static int tg3_alloc_consistent(struct tg3 *tp)
4392 {
4393         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4394                                       (TG3_RX_RING_SIZE +
4395                                        TG3_RX_JUMBO_RING_SIZE)) +
4396                                      (sizeof(struct tx_ring_info) *
4397                                       TG3_TX_RING_SIZE),
4398                                      GFP_KERNEL);
4399         if (!tp->rx_std_buffers)
4400                 return -ENOMEM;
4401
4402         memset(tp->rx_std_buffers, 0,
4403                (sizeof(struct ring_info) *
4404                 (TG3_RX_RING_SIZE +
4405                  TG3_RX_JUMBO_RING_SIZE)) +
4406                (sizeof(struct tx_ring_info) *
4407                 TG3_TX_RING_SIZE));
4408
4409         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4410         tp->tx_buffers = (struct tx_ring_info *)
4411                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4412
4413         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4414                                           &tp->rx_std_mapping);
4415         if (!tp->rx_std)
4416                 goto err_out;
4417
4418         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4419                                             &tp->rx_jumbo_mapping);
4420
4421         if (!tp->rx_jumbo)
4422                 goto err_out;
4423
4424         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4425                                           &tp->rx_rcb_mapping);
4426         if (!tp->rx_rcb)
4427                 goto err_out;
4428
4429         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4430                                            &tp->tx_desc_mapping);
4431         if (!tp->tx_ring)
4432                 goto err_out;
4433
4434         tp->hw_status = pci_alloc_consistent(tp->pdev,
4435                                              TG3_HW_STATUS_SIZE,
4436                                              &tp->status_mapping);
4437         if (!tp->hw_status)
4438                 goto err_out;
4439
4440         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4441                                             sizeof(struct tg3_hw_stats),
4442                                             &tp->stats_mapping);
4443         if (!tp->hw_stats)
4444                 goto err_out;
4445
4446         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4447         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4448
4449         return 0;
4450
4451 err_out:
4452         tg3_free_consistent(tp);
4453         return -ENOMEM;
4454 }
4455
4456 #define MAX_WAIT_CNT 1000
4457
4458 /* To stop a block, clear the enable bit and poll till it
4459  * clears.  tp->lock is held.
4460  */
4461 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4462 {
4463         unsigned int i;
4464         u32 val;
4465
4466         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4467                 switch (ofs) {
4468                 case RCVLSC_MODE:
4469                 case DMAC_MODE:
4470                 case MBFREE_MODE:
4471                 case BUFMGR_MODE:
4472                 case MEMARB_MODE:
4473                         /* We can't enable/disable these bits of the
4474                          * 5705/5750, just say success.
4475                          */
4476                         return 0;
4477
4478                 default:
4479                         break;
4480                 };
4481         }
4482
4483         val = tr32(ofs);
4484         val &= ~enable_bit;
4485         tw32_f(ofs, val);
4486
4487         for (i = 0; i < MAX_WAIT_CNT; i++) {
4488                 udelay(100);
4489                 val = tr32(ofs);
4490                 if ((val & enable_bit) == 0)
4491                         break;
4492         }
4493
4494         if (i == MAX_WAIT_CNT && !silent) {
4495                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4496                        "ofs=%lx enable_bit=%x\n",
4497                        ofs, enable_bit);
4498                 return -ENODEV;
4499         }
4500
4501         return 0;
4502 }
4503
4504 /* tp->lock is held. */
4505 static int tg3_abort_hw(struct tg3 *tp, int silent)
4506 {
4507         int i, err;
4508
4509         tg3_disable_ints(tp);
4510
4511         tp->rx_mode &= ~RX_MODE_ENABLE;
4512         tw32_f(MAC_RX_MODE, tp->rx_mode);
4513         udelay(10);
4514
4515         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4516         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4517         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4518         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4519         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4520         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4521
4522         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4523         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4524         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4525         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4526         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4527         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4528         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4529
4530         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4531         tw32_f(MAC_MODE, tp->mac_mode);
4532         udelay(40);
4533
4534         tp->tx_mode &= ~TX_MODE_ENABLE;
4535         tw32_f(MAC_TX_MODE, tp->tx_mode);
4536
4537         for (i = 0; i < MAX_WAIT_CNT; i++) {
4538                 udelay(100);
4539                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4540                         break;
4541         }
4542         if (i >= MAX_WAIT_CNT) {
4543                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4544                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4545                        tp->dev->name, tr32(MAC_TX_MODE));
4546                 err |= -ENODEV;
4547         }
4548
4549         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4550         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4551         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4552
4553         tw32(FTQ_RESET, 0xffffffff);
4554         tw32(FTQ_RESET, 0x00000000);
4555
4556         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4557         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4558
4559         if (tp->hw_status)
4560                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4561         if (tp->hw_stats)
4562                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4563
4564         return err;
4565 }
4566
4567 /* tp->lock is held. */
4568 static int tg3_nvram_lock(struct tg3 *tp)
4569 {
4570         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4571                 int i;
4572
4573                 if (tp->nvram_lock_cnt == 0) {
4574                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4575                         for (i = 0; i < 8000; i++) {
4576                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4577                                         break;
4578                                 udelay(20);
4579                         }
4580                         if (i == 8000) {
4581                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4582                                 return -ENODEV;
4583                         }
4584                 }
4585                 tp->nvram_lock_cnt++;
4586         }
4587         return 0;
4588 }
4589
4590 /* tp->lock is held. */
4591 static void tg3_nvram_unlock(struct tg3 *tp)
4592 {
4593         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4594                 if (tp->nvram_lock_cnt > 0)
4595                         tp->nvram_lock_cnt--;
4596                 if (tp->nvram_lock_cnt == 0)
4597                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4598         }
4599 }
4600
4601 /* tp->lock is held. */
4602 static void tg3_enable_nvram_access(struct tg3 *tp)
4603 {
4604         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4605             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4606                 u32 nvaccess = tr32(NVRAM_ACCESS);
4607
4608                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4609         }
4610 }
4611
4612 /* tp->lock is held. */
4613 static void tg3_disable_nvram_access(struct tg3 *tp)
4614 {
4615         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4616             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4617                 u32 nvaccess = tr32(NVRAM_ACCESS);
4618
4619                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4620         }
4621 }
4622
4623 /* tp->lock is held. */
4624 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4625 {
4626         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4627                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4628
4629         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4630                 switch (kind) {
4631                 case RESET_KIND_INIT:
4632                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4633                                       DRV_STATE_START);
4634                         break;
4635
4636                 case RESET_KIND_SHUTDOWN:
4637                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4638                                       DRV_STATE_UNLOAD);
4639                         break;
4640
4641                 case RESET_KIND_SUSPEND:
4642                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4643                                       DRV_STATE_SUSPEND);
4644                         break;
4645
4646                 default:
4647                         break;
4648                 };
4649         }
4650 }
4651
4652 /* tp->lock is held. */
4653 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4654 {
4655         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4656                 switch (kind) {
4657                 case RESET_KIND_INIT:
4658                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4659                                       DRV_STATE_START_DONE);
4660                         break;
4661
4662                 case RESET_KIND_SHUTDOWN:
4663                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4664                                       DRV_STATE_UNLOAD_DONE);
4665                         break;
4666
4667                 default:
4668                         break;
4669                 };
4670         }
4671 }
4672
4673 /* tp->lock is held. */
4674 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4675 {
4676         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4677                 switch (kind) {
4678                 case RESET_KIND_INIT:
4679                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4680                                       DRV_STATE_START);
4681                         break;
4682
4683                 case RESET_KIND_SHUTDOWN:
4684                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4685                                       DRV_STATE_UNLOAD);
4686                         break;
4687
4688                 case RESET_KIND_SUSPEND:
4689                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4690                                       DRV_STATE_SUSPEND);
4691                         break;
4692
4693                 default:
4694                         break;
4695                 };
4696         }
4697 }
4698
4699 static int tg3_poll_fw(struct tg3 *tp)
4700 {
4701         int i;
4702         u32 val;
4703
4704         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4705                 for (i = 0; i < 400; i++) {
4706                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
4707                                 return 0;
4708                         udelay(10);
4709                 }
4710                 return -ENODEV;
4711         }
4712
4713         /* Wait for firmware initialization to complete. */
4714         for (i = 0; i < 100000; i++) {
4715                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4716                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4717                         break;
4718                 udelay(10);
4719         }
4720
4721         /* Chip might not be fitted with firmware.  Some Sun onboard
4722          * parts are configured like that.  So don't signal the timeout
4723          * of the above loop as an error, but do report the lack of
4724          * running firmware once.
4725          */
4726         if (i >= 100000 &&
4727             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4728                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4729
4730                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4731                        tp->dev->name);
4732         }
4733
4734         return 0;
4735 }
4736
4737 static void tg3_stop_fw(struct tg3 *);
4738
4739 /* tp->lock is held. */
4740 static int tg3_chip_reset(struct tg3 *tp)
4741 {
4742         u32 val;
4743         void (*write_op)(struct tg3 *, u32, u32);
4744         int err;
4745
4746         tg3_nvram_lock(tp);
4747
4748         /* No matching tg3_nvram_unlock() after this because
4749          * chip reset below will undo the nvram lock.
4750          */
4751         tp->nvram_lock_cnt = 0;
4752
4753         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4754             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4755             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4756                 tw32(GRC_FASTBOOT_PC, 0);
4757
4758         /*
4759          * We must avoid the readl() that normally takes place.
4760          * It locks machines, causes machine checks, and other
4761          * fun things.  So, temporarily disable the 5701
4762          * hardware workaround, while we do the reset.
4763          */
4764         write_op = tp->write32;
4765         if (write_op == tg3_write_flush_reg32)
4766                 tp->write32 = tg3_write32;
4767
4768         /* do the reset */
4769         val = GRC_MISC_CFG_CORECLK_RESET;
4770
4771         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4772                 if (tr32(0x7e2c) == 0x60) {
4773                         tw32(0x7e2c, 0x20);
4774                 }
4775                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4776                         tw32(GRC_MISC_CFG, (1 << 29));
4777                         val |= (1 << 29);
4778                 }
4779         }
4780
4781         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4782                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
4783                 tw32(GRC_VCPU_EXT_CTRL,
4784                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
4785         }
4786
4787         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4788                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4789         tw32(GRC_MISC_CFG, val);
4790
4791         /* restore 5701 hardware bug workaround write method */
4792         tp->write32 = write_op;
4793
4794         /* Unfortunately, we have to delay before the PCI read back.
4795          * Some 575X chips even will not respond to a PCI cfg access
4796          * when the reset command is given to the chip.
4797          *
4798          * How do these hardware designers expect things to work
4799          * properly if the PCI write is posted for a long period
4800          * of time?  It is always necessary to have some method by
4801          * which a register read back can occur to push the write
4802          * out which does the reset.
4803          *
4804          * For most tg3 variants the trick below was working.
4805          * Ho hum...
4806          */
4807         udelay(120);
4808
4809         /* Flush PCI posted writes.  The normal MMIO registers
4810          * are inaccessible at this time so this is the only
4811          * way to make this reliably (actually, this is no longer
4812          * the case, see above).  I tried to use indirect
4813          * register read/write but this upset some 5701 variants.
4814          */
4815         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4816
4817         udelay(120);
4818
4819         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4820                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4821                         int i;
4822                         u32 cfg_val;
4823
4824                         /* Wait for link training to complete.  */
4825                         for (i = 0; i < 5000; i++)
4826                                 udelay(100);
4827
4828                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4829                         pci_write_config_dword(tp->pdev, 0xc4,
4830                                                cfg_val | (1 << 15));
4831                 }
4832                 /* Set PCIE max payload size and clear error status.  */
4833                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4834         }
4835
4836         /* Re-enable indirect register accesses. */
4837         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4838                                tp->misc_host_ctrl);
4839
4840         /* Set MAX PCI retry to zero. */
4841         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4842         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4843             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4844                 val |= PCISTATE_RETRY_SAME_DMA;
4845         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4846
4847         pci_restore_state(tp->pdev);
4848
4849         /* Make sure PCI-X relaxed ordering bit is clear. */
4850         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4851         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4852         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4853
4854         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4855                 u32 val;
4856
4857                 /* Chip reset on 5780 will reset MSI enable bit,
4858                  * so need to restore it.
4859                  */
4860                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4861                         u16 ctrl;
4862
4863                         pci_read_config_word(tp->pdev,
4864                                              tp->msi_cap + PCI_MSI_FLAGS,
4865                                              &ctrl);
4866                         pci_write_config_word(tp->pdev,
4867                                               tp->msi_cap + PCI_MSI_FLAGS,
4868                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4869                         val = tr32(MSGINT_MODE);
4870                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4871                 }
4872
4873                 val = tr32(MEMARB_MODE);
4874                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4875
4876         } else
4877                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4878
4879         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4880                 tg3_stop_fw(tp);
4881                 tw32(0x5000, 0x400);
4882         }
4883
4884         tw32(GRC_MODE, tp->grc_mode);
4885
4886         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4887                 u32 val = tr32(0xc4);
4888
4889                 tw32(0xc4, val | (1 << 15));
4890         }
4891
4892         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4893             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4894                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4895                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4896                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4897                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4898         }
4899
4900         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4901                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4902                 tw32_f(MAC_MODE, tp->mac_mode);
4903         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4904                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4905                 tw32_f(MAC_MODE, tp->mac_mode);
4906         } else
4907                 tw32_f(MAC_MODE, 0);
4908         udelay(40);
4909
4910         err = tg3_poll_fw(tp);
4911         if (err)
4912                 return err;
4913
4914         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4915             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4916                 u32 val = tr32(0x7c00);
4917
4918                 tw32(0x7c00, val | (1 << 25));
4919         }
4920
4921         /* Reprobe ASF enable state.  */
4922         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4923         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4924         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4925         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4926                 u32 nic_cfg;
4927
4928                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4929                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4930                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4931                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4932                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4933                 }
4934         }
4935
4936         return 0;
4937 }
4938
4939 /* tp->lock is held. */
4940 static void tg3_stop_fw(struct tg3 *tp)
4941 {
4942         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4943                 u32 val;
4944                 int i;
4945
4946                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4947                 val = tr32(GRC_RX_CPU_EVENT);
4948                 val |= (1 << 14);
4949                 tw32(GRC_RX_CPU_EVENT, val);
4950
4951                 /* Wait for RX cpu to ACK the event.  */
4952                 for (i = 0; i < 100; i++) {
4953                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4954                                 break;
4955                         udelay(1);
4956                 }
4957         }
4958 }
4959
4960 /* tp->lock is held. */
4961 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4962 {
4963         int err;
4964
4965         tg3_stop_fw(tp);
4966
4967         tg3_write_sig_pre_reset(tp, kind);
4968
4969         tg3_abort_hw(tp, silent);
4970         err = tg3_chip_reset(tp);
4971
4972         tg3_write_sig_legacy(tp, kind);
4973         tg3_write_sig_post_reset(tp, kind);
4974
4975         if (err)
4976                 return err;
4977
4978         return 0;
4979 }
4980
4981 #define TG3_FW_RELEASE_MAJOR    0x0
4982 #define TG3_FW_RELASE_MINOR     0x0
4983 #define TG3_FW_RELEASE_FIX      0x0
4984 #define TG3_FW_START_ADDR       0x08000000
4985 #define TG3_FW_TEXT_ADDR        0x08000000
4986 #define TG3_FW_TEXT_LEN         0x9c0
4987 #define TG3_FW_RODATA_ADDR      0x080009c0
4988 #define TG3_FW_RODATA_LEN       0x60
4989 #define TG3_FW_DATA_ADDR        0x08000a40
4990 #define TG3_FW_DATA_LEN         0x20
4991 #define TG3_FW_SBSS_ADDR        0x08000a60
4992 #define TG3_FW_SBSS_LEN         0xc
4993 #define TG3_FW_BSS_ADDR         0x08000a70
4994 #define TG3_FW_BSS_LEN          0x10
4995
4996 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4997         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4998         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4999         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5000         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5001         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5002         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5003         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5004         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5005         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5006         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5007         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5008         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5009         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5010         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5011         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5012         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5013         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5014         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5015         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5016         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5017         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5018         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5019         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5020         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5021         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5022         0, 0, 0, 0, 0, 0,
5023         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5024         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5025         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5026         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5027         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5028         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5029         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5030         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5031         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5032         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5033         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5034         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5035         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5036         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5037         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5038         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5039         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5040         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5041         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5042         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5043         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5044         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5045         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5046         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5047         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5048         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5049         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5050         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5051         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5052         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5053         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5054         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5055         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5056         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5057         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5058         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5059         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5060         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5061         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5062         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5063         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5064         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5065         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5066         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5067         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5068         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5069         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5070         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5071         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5072         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5073         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5074         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5075         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5076         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5077         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5078         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5079         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5080         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5081         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5082         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5083         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5084         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5085         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5086         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5087         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5088 };
5089
5090 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5091         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5092         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5093         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5094         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5095         0x00000000
5096 };
5097
5098 #if 0 /* All zeros, don't eat up space with it. */
5099 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5100         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5101         0x00000000, 0x00000000, 0x00000000, 0x00000000
5102 };
5103 #endif
5104
5105 #define RX_CPU_SCRATCH_BASE     0x30000
5106 #define RX_CPU_SCRATCH_SIZE     0x04000
5107 #define TX_CPU_SCRATCH_BASE     0x34000
5108 #define TX_CPU_SCRATCH_SIZE     0x04000
5109
5110 /* tp->lock is held. */
5111 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5112 {
5113         int i;
5114
5115         BUG_ON(offset == TX_CPU_BASE &&
5116             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5117
5118         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5119                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5120
5121                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5122                 return 0;
5123         }
5124         if (offset == RX_CPU_BASE) {
5125                 for (i = 0; i < 10000; i++) {
5126                         tw32(offset + CPU_STATE, 0xffffffff);
5127                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5128                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5129                                 break;
5130                 }
5131
5132                 tw32(offset + CPU_STATE, 0xffffffff);
5133                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5134                 udelay(10);
5135         } else {
5136                 for (i = 0; i < 10000; i++) {
5137                         tw32(offset + CPU_STATE, 0xffffffff);
5138                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5139                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5140                                 break;
5141                 }
5142         }
5143
5144         if (i >= 10000) {
5145                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5146                        "and %s CPU\n",
5147                        tp->dev->name,
5148                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5149                 return -ENODEV;
5150         }
5151
5152         /* Clear firmware's nvram arbitration. */
5153         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5154                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5155         return 0;
5156 }
5157
5158 struct fw_info {
5159         unsigned int text_base;
5160         unsigned int text_len;
5161         const u32 *text_data;
5162         unsigned int rodata_base;
5163         unsigned int rodata_len;
5164         const u32 *rodata_data;
5165         unsigned int data_base;
5166         unsigned int data_len;
5167         const u32 *data_data;
5168 };
5169
5170 /* tp->lock is held. */
5171 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5172                                  int cpu_scratch_size, struct fw_info *info)
5173 {
5174         int err, lock_err, i;
5175         void (*write_op)(struct tg3 *, u32, u32);
5176
5177         if (cpu_base == TX_CPU_BASE &&
5178             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5179                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5180                        "TX cpu firmware on %s which is 5705.\n",
5181                        tp->dev->name);
5182                 return -EINVAL;
5183         }
5184
5185         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5186                 write_op = tg3_write_mem;
5187         else
5188                 write_op = tg3_write_indirect_reg32;
5189
5190         /* It is possible that bootcode is still loading at this point.
5191          * Get the nvram lock first before halting the cpu.
5192          */
5193         lock_err = tg3_nvram_lock(tp);
5194         err = tg3_halt_cpu(tp, cpu_base);
5195         if (!lock_err)
5196                 tg3_nvram_unlock(tp);
5197         if (err)
5198                 goto out;
5199
5200         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5201                 write_op(tp, cpu_scratch_base + i, 0);
5202         tw32(cpu_base + CPU_STATE, 0xffffffff);
5203         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5204         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5205                 write_op(tp, (cpu_scratch_base +
5206                               (info->text_base & 0xffff) +
5207                               (i * sizeof(u32))),
5208                          (info->text_data ?
5209                           info->text_data[i] : 0));
5210         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5211                 write_op(tp, (cpu_scratch_base +
5212                               (info->rodata_base & 0xffff) +
5213                               (i * sizeof(u32))),
5214                          (info->rodata_data ?
5215                           info->rodata_data[i] : 0));
5216         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5217                 write_op(tp, (cpu_scratch_base +
5218                               (info->data_base & 0xffff) +
5219                               (i * sizeof(u32))),
5220                          (info->data_data ?
5221                           info->data_data[i] : 0));
5222
5223         err = 0;
5224
5225 out:
5226         return err;
5227 }
5228
5229 /* tp->lock is held. */
5230 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5231 {
5232         struct fw_info info;
5233         int err, i;
5234
5235         info.text_base = TG3_FW_TEXT_ADDR;
5236         info.text_len = TG3_FW_TEXT_LEN;
5237         info.text_data = &tg3FwText[0];
5238         info.rodata_base = TG3_FW_RODATA_ADDR;
5239         info.rodata_len = TG3_FW_RODATA_LEN;
5240         info.rodata_data = &tg3FwRodata[0];
5241         info.data_base = TG3_FW_DATA_ADDR;
5242         info.data_len = TG3_FW_DATA_LEN;
5243         info.data_data = NULL;
5244
5245         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5246                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5247                                     &info);
5248         if (err)
5249                 return err;
5250
5251         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5252                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5253                                     &info);
5254         if (err)
5255                 return err;
5256
5257         /* Now startup only the RX cpu. */
5258         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5259         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5260
5261         for (i = 0; i < 5; i++) {
5262                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5263                         break;
5264                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5265                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5266                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5267                 udelay(1000);
5268         }
5269         if (i >= 5) {
5270                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5271                        "to set RX CPU PC, is %08x should be %08x\n",
5272                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5273                        TG3_FW_TEXT_ADDR);
5274                 return -ENODEV;
5275         }
5276         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5277         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5278
5279         return 0;
5280 }
5281
5282 #if TG3_TSO_SUPPORT != 0
5283
5284 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5285 #define TG3_TSO_FW_RELASE_MINOR         0x6
5286 #define TG3_TSO_FW_RELEASE_FIX          0x0
5287 #define TG3_TSO_FW_START_ADDR           0x08000000
5288 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5289 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5290 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5291 #define TG3_TSO_FW_RODATA_LEN           0x60
5292 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5293 #define TG3_TSO_FW_DATA_LEN             0x30
5294 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5295 #define TG3_TSO_FW_SBSS_LEN             0x2c
5296 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5297 #define TG3_TSO_FW_BSS_LEN              0x894
5298
5299 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5300         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5301         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5302         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5303         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5304         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5305         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5306         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5307         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5308         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5309         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5310         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5311         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5312         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5313         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5314         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5315         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5316         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5317         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5318         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5319         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5320         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5321         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5322         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5323         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5324         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5325         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5326         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5327         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5328         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5329         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5330         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5331         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5332         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5333         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5334         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5335         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5336         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5337         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5338         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5339         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5340         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5341         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5342         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5343         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5344         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5345         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5346         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5347         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5348         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5349         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5350         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5351         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5352         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5353         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5354         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5355         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5356         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5357         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5358         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5359         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5360         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5361         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5362         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5363         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5364         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5365         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5366         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5367         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5368         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5369         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5370         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5371         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5372         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5373         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5374         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5375         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5376         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5377         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5378         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5379         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5380         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5381         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5382         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5383         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5384         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5385         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5386         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5387         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5388         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5389         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5390         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5391         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5392         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5393         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5394         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5395         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5396         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5397         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5398         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5399         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5400         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5401         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5402         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5403         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5404         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5405         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5406         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5407         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5408         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5409         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5410         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5411         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5412         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5413         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5414         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5415         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5416         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5417         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5418         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5419         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5420         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5421         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5422         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5423         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5424         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5425         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5426         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5427         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5428         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5429         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5430         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5431         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5432         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5433         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5434         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5435         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5436         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5437         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5438         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5439         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5440         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5441         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5442         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5443         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5444         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5445         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5446         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5447         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5448         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5449         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5450         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5451         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5452         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5453         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5454         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5455         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5456         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5457         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5458         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5459         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5460         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5461         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5462         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5463         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5464         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5465         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5466         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5467         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5468         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5469         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5470         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5471         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5472         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5473         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5474         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5475         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5476         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5477         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5478         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5479         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5480         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5481         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5482         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5483         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5484         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5485         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5486         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5487         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5488         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5489         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5490         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5491         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5492         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5493         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5494         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5495         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5496         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5497         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5498         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5499         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5500         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5501         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5502         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5503         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5504         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5505         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5506         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5507         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5508         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5509         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5510         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5511         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5512         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5513         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5514         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5515         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5516         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5517         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5518         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5519         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5520         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5521         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5522         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5523         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5524         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5525         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5526         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5527         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5528         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5529         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5530         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5531         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5532         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5533         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5534         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5535         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5536         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5537         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5538         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5539         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5540         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5541         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5542         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5543         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5544         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5545         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5546         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5547         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5548         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5549         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5550         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5551         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5552         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5553         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5554         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5555         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5556         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5557         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5558         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5559         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5560         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5561         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5562         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5563         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5564         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5565         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5566         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5567         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5568         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5569         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5570         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5571         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5572         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5573         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5574         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5575         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5576         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5577         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5578         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5579         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5580         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5581         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5582         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5583         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5584 };
5585
5586 static const u32 tg3TsoFwRodata[] = {
5587         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5588         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5589         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5590         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5591         0x00000000,
5592 };
5593
5594 static const u32 tg3TsoFwData[] = {
5595         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5596         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5597         0x00000000,
5598 };
5599
5600 /* 5705 needs a special version of the TSO firmware.  */
5601 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5602 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5603 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5604 #define TG3_TSO5_FW_START_ADDR          0x00010000
5605 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5606 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5607 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5608 #define TG3_TSO5_FW_RODATA_LEN          0x50
5609 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5610 #define TG3_TSO5_FW_DATA_LEN            0x20
5611 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5612 #define TG3_TSO5_FW_SBSS_LEN            0x28
5613 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5614 #define TG3_TSO5_FW_BSS_LEN             0x88
5615
5616 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5617         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5618         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5619         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5620         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5621         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5622         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5623         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5624         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5625         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5626         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5627         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5628         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5629         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5630         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5631         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5632         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5633         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5634         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5635         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5636         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5637         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5638         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5639         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5640         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5641         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5642         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5643         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5644         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5645         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5646         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5647         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5648         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5649         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5650         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5651         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5652         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5653         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5654         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5655         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5656         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5657         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5658         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5659         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5660         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5661         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5662         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5663         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5664         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5665         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5666         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5667         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5668         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5669         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5670         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5671         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5672         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5673         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5674         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5675         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5676         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5677         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5678         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5679         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5680         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5681         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5682         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5683         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5684         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5685         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5686         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5687         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5688         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5689         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5690         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5691         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5692         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5693         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5694         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5695         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5696         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5697         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5698         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5699         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5700         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5701         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5702         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5703         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5704         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5705         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5706         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5707         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5708         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5709         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5710         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5711         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5712         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5713         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5714         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5715         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5716         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5717         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5718         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5719         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5720         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5721         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5722         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5723         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5724         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5725         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5726         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5727         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5728         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5729         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5730         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5731         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5732         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5733         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5734         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5735         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5736         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5737         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5738         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5739         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5740         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5741         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5742         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5743         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5744         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5745         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5746         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5747         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5748         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5749         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5750         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5751         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5752         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5753         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5754         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5755         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5756         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5757         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5758         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5759         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5760         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5761         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5762         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5763         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5764         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5765         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5766         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5767         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5768         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5769         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5770         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5771         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5772         0x00000000, 0x00000000, 0x00000000,
5773 };
5774
5775 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5776         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5777         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5778         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5779         0x00000000, 0x00000000, 0x00000000,
5780 };
5781
5782 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5783         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5784         0x00000000, 0x00000000, 0x00000000,
5785 };
5786
5787 /* tp->lock is held. */
5788 static int tg3_load_tso_firmware(struct tg3 *tp)
5789 {
5790         struct fw_info info;
5791         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5792         int err, i;
5793
5794         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5795                 return 0;
5796
5797         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5798                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5799                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5800                 info.text_data = &tg3Tso5FwText[0];
5801                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5802                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5803                 info.rodata_data = &tg3Tso5FwRodata[0];
5804                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5805                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5806                 info.data_data = &tg3Tso5FwData[0];
5807                 cpu_base = RX_CPU_BASE;
5808                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5809                 cpu_scratch_size = (info.text_len +
5810                                     info.rodata_len +
5811                                     info.data_len +
5812                                     TG3_TSO5_FW_SBSS_LEN +
5813                                     TG3_TSO5_FW_BSS_LEN);
5814         } else {
5815                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5816                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5817                 info.text_data = &tg3TsoFwText[0];
5818                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5819                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5820                 info.rodata_data = &tg3TsoFwRodata[0];
5821                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5822                 info.data_len = TG3_TSO_FW_DATA_LEN;
5823                 info.data_data = &tg3TsoFwData[0];
5824                 cpu_base = TX_CPU_BASE;
5825                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5826                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5827         }
5828
5829         err = tg3_load_firmware_cpu(tp, cpu_base,
5830                                     cpu_scratch_base, cpu_scratch_size,
5831                                     &info);
5832         if (err)
5833                 return err;
5834
5835         /* Now startup the cpu. */
5836         tw32(cpu_base + CPU_STATE, 0xffffffff);
5837         tw32_f(cpu_base + CPU_PC,    info.text_base);
5838
5839         for (i = 0; i < 5; i++) {
5840                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5841                         break;
5842                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5843                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5844                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5845                 udelay(1000);
5846         }
5847         if (i >= 5) {
5848                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5849                        "to set CPU PC, is %08x should be %08x\n",
5850                        tp->dev->name, tr32(cpu_base + CPU_PC),
5851                        info.text_base);
5852                 return -ENODEV;
5853         }
5854         tw32(cpu_base + CPU_STATE, 0xffffffff);
5855         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5856         return 0;
5857 }
5858
5859 #endif /* TG3_TSO_SUPPORT != 0 */
5860
5861 /* tp->lock is held. */
5862 static void __tg3_set_mac_addr(struct tg3 *tp)
5863 {
5864         u32 addr_high, addr_low;
5865         int i;
5866
5867         addr_high = ((tp->dev->dev_addr[0] << 8) |
5868                      tp->dev->dev_addr[1]);
5869         addr_low = ((tp->dev->dev_addr[2] << 24) |
5870                     (tp->dev->dev_addr[3] << 16) |
5871                     (tp->dev->dev_addr[4] <<  8) |
5872                     (tp->dev->dev_addr[5] <<  0));
5873         for (i = 0; i < 4; i++) {
5874                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5875                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5876         }
5877
5878         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5879             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5880                 for (i = 0; i < 12; i++) {
5881                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5882                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5883                 }
5884         }
5885
5886         addr_high = (tp->dev->dev_addr[0] +
5887                      tp->dev->dev_addr[1] +
5888                      tp->dev->dev_addr[2] +
5889                      tp->dev->dev_addr[3] +
5890                      tp->dev->dev_addr[4] +
5891                      tp->dev->dev_addr[5]) &
5892                 TX_BACKOFF_SEED_MASK;
5893         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5894 }
5895
5896 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5897 {
5898         struct tg3 *tp = netdev_priv(dev);
5899         struct sockaddr *addr = p;
5900         int err = 0;
5901
5902         if (!is_valid_ether_addr(addr->sa_data))
5903                 return -EINVAL;
5904
5905         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5906
5907         if (!netif_running(dev))
5908                 return 0;
5909
5910         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5911                 /* Reset chip so that ASF can re-init any MAC addresses it
5912                  * needs.
5913                  */
5914                 tg3_netif_stop(tp);
5915                 tg3_full_lock(tp, 1);
5916
5917                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5918                 err = tg3_restart_hw(tp, 0);
5919                 if (!err)
5920                         tg3_netif_start(tp);
5921                 tg3_full_unlock(tp);
5922         } else {
5923                 spin_lock_bh(&tp->lock);
5924                 __tg3_set_mac_addr(tp);
5925                 spin_unlock_bh(&tp->lock);
5926         }
5927
5928         return err;
5929 }
5930
5931 /* tp->lock is held. */
5932 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5933                            dma_addr_t mapping, u32 maxlen_flags,
5934                            u32 nic_addr)
5935 {
5936         tg3_write_mem(tp,
5937                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5938                       ((u64) mapping >> 32));
5939         tg3_write_mem(tp,
5940                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5941                       ((u64) mapping & 0xffffffff));
5942         tg3_write_mem(tp,
5943                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5944                        maxlen_flags);
5945
5946         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5947                 tg3_write_mem(tp,
5948                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5949                               nic_addr);
5950 }
5951
5952 static void __tg3_set_rx_mode(struct net_device *);
5953 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5954 {
5955         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5956         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5957         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5958         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5959         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5960                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5961                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5962         }
5963         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5964         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5965         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5966                 u32 val = ec->stats_block_coalesce_usecs;
5967
5968                 if (!netif_carrier_ok(tp->dev))
5969                         val = 0;
5970
5971                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5972         }
5973 }
5974
5975 /* tp->lock is held. */
5976 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5977 {
5978         u32 val, rdmac_mode;
5979         int i, err, limit;
5980
5981         tg3_disable_ints(tp);
5982
5983         tg3_stop_fw(tp);
5984
5985         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5986
5987         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5988                 tg3_abort_hw(tp, 1);
5989         }
5990
5991         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
5992                 tg3_phy_reset(tp);
5993
5994         err = tg3_chip_reset(tp);
5995         if (err)
5996                 return err;
5997
5998         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5999
6000         /* This works around an issue with Athlon chipsets on
6001          * B3 tigon3 silicon.  This bit has no effect on any
6002          * other revision.  But do not set this on PCI Express
6003          * chips.
6004          */
6005         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6006                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6007         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6008
6009         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6010             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6011                 val = tr32(TG3PCI_PCISTATE);
6012                 val |= PCISTATE_RETRY_SAME_DMA;
6013                 tw32(TG3PCI_PCISTATE, val);
6014         }
6015
6016         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6017                 /* Enable some hw fixes.  */
6018                 val = tr32(TG3PCI_MSI_DATA);
6019                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6020                 tw32(TG3PCI_MSI_DATA, val);
6021         }
6022
6023         /* Descriptor ring init may make accesses to the
6024          * NIC SRAM area to setup the TX descriptors, so we
6025          * can only do this after the hardware has been
6026          * successfully reset.
6027          */
6028         err = tg3_init_rings(tp);
6029         if (err)
6030                 return err;
6031
6032         /* This value is determined during the probe time DMA
6033          * engine test, tg3_test_dma.
6034          */
6035         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6036
6037         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6038                           GRC_MODE_4X_NIC_SEND_RINGS |
6039                           GRC_MODE_NO_TX_PHDR_CSUM |
6040                           GRC_MODE_NO_RX_PHDR_CSUM);
6041         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6042
6043         /* Pseudo-header checksum is done by hardware logic and not
6044          * the offload processers, so make the chip do the pseudo-
6045          * header checksums on receive.  For transmit it is more
6046          * convenient to do the pseudo-header checksum in software
6047          * as Linux does that on transmit for us in all cases.
6048          */
6049         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6050
6051         tw32(GRC_MODE,
6052              tp->grc_mode |
6053              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6054
6055         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6056         val = tr32(GRC_MISC_CFG);
6057         val &= ~0xff;
6058         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6059         tw32(GRC_MISC_CFG, val);
6060
6061         /* Initialize MBUF/DESC pool. */
6062         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6063                 /* Do nothing.  */
6064         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6065                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6066                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6067                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6068                 else
6069                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6070                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6071                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6072         }
6073 #if TG3_TSO_SUPPORT != 0
6074         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6075                 int fw_len;
6076
6077                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6078                           TG3_TSO5_FW_RODATA_LEN +
6079                           TG3_TSO5_FW_DATA_LEN +
6080                           TG3_TSO5_FW_SBSS_LEN +
6081                           TG3_TSO5_FW_BSS_LEN);
6082                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6083                 tw32(BUFMGR_MB_POOL_ADDR,
6084                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6085                 tw32(BUFMGR_MB_POOL_SIZE,
6086                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6087         }
6088 #endif
6089
6090         if (tp->dev->mtu <= ETH_DATA_LEN) {
6091                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6092                      tp->bufmgr_config.mbuf_read_dma_low_water);
6093                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6094                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6095                 tw32(BUFMGR_MB_HIGH_WATER,
6096                      tp->bufmgr_config.mbuf_high_water);
6097         } else {
6098                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6099                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6100                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6101                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6102                 tw32(BUFMGR_MB_HIGH_WATER,
6103                      tp->bufmgr_config.mbuf_high_water_jumbo);
6104         }
6105         tw32(BUFMGR_DMA_LOW_WATER,
6106              tp->bufmgr_config.dma_low_water);
6107         tw32(BUFMGR_DMA_HIGH_WATER,
6108              tp->bufmgr_config.dma_high_water);
6109
6110         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6111         for (i = 0; i < 2000; i++) {
6112                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6113                         break;
6114                 udelay(10);
6115         }
6116         if (i >= 2000) {
6117                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6118                        tp->dev->name);
6119                 return -ENODEV;
6120         }
6121
6122         /* Setup replenish threshold. */
6123         val = tp->rx_pending / 8;
6124         if (val == 0)
6125                 val = 1;
6126         else if (val > tp->rx_std_max_post)
6127                 val = tp->rx_std_max_post;
6128         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6129                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6130                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6131
6132                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6133                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6134         }
6135
6136         tw32(RCVBDI_STD_THRESH, val);
6137
6138         /* Initialize TG3_BDINFO's at:
6139          *  RCVDBDI_STD_BD:     standard eth size rx ring
6140          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6141          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6142          *
6143          * like so:
6144          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6145          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6146          *                              ring attribute flags
6147          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6148          *
6149          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6150          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6151          *
6152          * The size of each ring is fixed in the firmware, but the location is
6153          * configurable.
6154          */
6155         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6156              ((u64) tp->rx_std_mapping >> 32));
6157         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6158              ((u64) tp->rx_std_mapping & 0xffffffff));
6159         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6160              NIC_SRAM_RX_BUFFER_DESC);
6161
6162         /* Don't even try to program the JUMBO/MINI buffer descriptor
6163          * configs on 5705.
6164          */
6165         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6166                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6167                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6168         } else {
6169                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6170                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6171
6172                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6173                      BDINFO_FLAGS_DISABLED);
6174
6175                 /* Setup replenish threshold. */
6176                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6177
6178                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6179                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6180                              ((u64) tp->rx_jumbo_mapping >> 32));
6181                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6182                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6183                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6184                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6185                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6186                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6187                 } else {
6188                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6189                              BDINFO_FLAGS_DISABLED);
6190                 }
6191
6192         }
6193
6194         /* There is only one send ring on 5705/5750, no need to explicitly
6195          * disable the others.
6196          */
6197         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6198                 /* Clear out send RCB ring in SRAM. */
6199                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6200                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6201                                       BDINFO_FLAGS_DISABLED);
6202         }
6203
6204         tp->tx_prod = 0;
6205         tp->tx_cons = 0;
6206         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6207         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6208
6209         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6210                        tp->tx_desc_mapping,
6211                        (TG3_TX_RING_SIZE <<
6212                         BDINFO_FLAGS_MAXLEN_SHIFT),
6213                        NIC_SRAM_TX_BUFFER_DESC);
6214
6215         /* There is only one receive return ring on 5705/5750, no need
6216          * to explicitly disable the others.
6217          */
6218         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6219                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6220                      i += TG3_BDINFO_SIZE) {
6221                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6222                                       BDINFO_FLAGS_DISABLED);
6223                 }
6224         }
6225
6226         tp->rx_rcb_ptr = 0;
6227         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6228
6229         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6230                        tp->rx_rcb_mapping,
6231                        (TG3_RX_RCB_RING_SIZE(tp) <<
6232                         BDINFO_FLAGS_MAXLEN_SHIFT),
6233                        0);
6234
6235         tp->rx_std_ptr = tp->rx_pending;
6236         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6237                      tp->rx_std_ptr);
6238
6239         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6240                                                 tp->rx_jumbo_pending : 0;
6241         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6242                      tp->rx_jumbo_ptr);
6243
6244         /* Initialize MAC address and backoff seed. */
6245         __tg3_set_mac_addr(tp);
6246
6247         /* MTU + ethernet header + FCS + optional VLAN tag */
6248         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6249
6250         /* The slot time is changed by tg3_setup_phy if we
6251          * run at gigabit with half duplex.
6252          */
6253         tw32(MAC_TX_LENGTHS,
6254              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6255              (6 << TX_LENGTHS_IPG_SHIFT) |
6256              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6257
6258         /* Receive rules. */
6259         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6260         tw32(RCVLPC_CONFIG, 0x0181);
6261
6262         /* Calculate RDMAC_MODE setting early, we need it to determine
6263          * the RCVLPC_STATE_ENABLE mask.
6264          */
6265         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6266                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6267                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6268                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6269                       RDMAC_MODE_LNGREAD_ENAB);
6270         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6271                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6272
6273         /* If statement applies to 5705 and 5750 PCI devices only */
6274         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6275              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6276             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6277                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6278                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6279                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6280                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6281                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6282                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6283                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6284                 }
6285         }
6286
6287         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6288                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6289
6290 #if TG3_TSO_SUPPORT != 0
6291         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6292                 rdmac_mode |= (1 << 27);
6293 #endif
6294
6295         /* Receive/send statistics. */
6296         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6297                 val = tr32(RCVLPC_STATS_ENABLE);
6298                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6299                 tw32(RCVLPC_STATS_ENABLE, val);
6300         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6301                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6302                 val = tr32(RCVLPC_STATS_ENABLE);
6303                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6304                 tw32(RCVLPC_STATS_ENABLE, val);
6305         } else {
6306                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6307         }
6308         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6309         tw32(SNDDATAI_STATSENAB, 0xffffff);
6310         tw32(SNDDATAI_STATSCTRL,
6311              (SNDDATAI_SCTRL_ENABLE |
6312               SNDDATAI_SCTRL_FASTUPD));
6313
6314         /* Setup host coalescing engine. */
6315         tw32(HOSTCC_MODE, 0);
6316         for (i = 0; i < 2000; i++) {
6317                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6318                         break;
6319                 udelay(10);
6320         }
6321
6322         __tg3_set_coalesce(tp, &tp->coal);
6323
6324         /* set status block DMA address */
6325         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6326              ((u64) tp->status_mapping >> 32));
6327         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6328              ((u64) tp->status_mapping & 0xffffffff));
6329
6330         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6331                 /* Status/statistics block address.  See tg3_timer,
6332                  * the tg3_periodic_fetch_stats call there, and
6333                  * tg3_get_stats to see how this works for 5705/5750 chips.
6334                  */
6335                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6336                      ((u64) tp->stats_mapping >> 32));
6337                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6338                      ((u64) tp->stats_mapping & 0xffffffff));
6339                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6340                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6341         }
6342
6343         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6344
6345         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6346         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6347         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6348                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6349
6350         /* Clear statistics/status block in chip, and status block in ram. */
6351         for (i = NIC_SRAM_STATS_BLK;
6352              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6353              i += sizeof(u32)) {
6354                 tg3_write_mem(tp, i, 0);
6355                 udelay(40);
6356         }
6357         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6358
6359         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6360                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6361                 /* reset to prevent losing 1st rx packet intermittently */
6362                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6363                 udelay(10);
6364         }
6365
6366         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6367                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6368         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6369         udelay(40);
6370
6371         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6372          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6373          * register to preserve the GPIO settings for LOMs. The GPIOs,
6374          * whether used as inputs or outputs, are set by boot code after
6375          * reset.
6376          */
6377         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6378                 u32 gpio_mask;
6379
6380                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6381                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6382
6383                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6384                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6385                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6386
6387                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6388                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6389
6390                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6391
6392                 /* GPIO1 must be driven high for eeprom write protect */
6393                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6394                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6395         }
6396         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6397         udelay(100);
6398
6399         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6400         tp->last_tag = 0;
6401
6402         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6403                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6404                 udelay(40);
6405         }
6406
6407         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6408                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6409                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6410                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6411                WDMAC_MODE_LNGREAD_ENAB);
6412
6413         /* If statement applies to 5705 and 5750 PCI devices only */
6414         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6415              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6416             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6417                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6418                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6419                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6420                         /* nothing */
6421                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6422                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6423                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6424                         val |= WDMAC_MODE_RX_ACCEL;
6425                 }
6426         }
6427
6428         /* Enable host coalescing bug fix */
6429         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6430             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6431                 val |= (1 << 29);
6432
6433         tw32_f(WDMAC_MODE, val);
6434         udelay(40);
6435
6436         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6437                 val = tr32(TG3PCI_X_CAPS);
6438                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6439                         val &= ~PCIX_CAPS_BURST_MASK;
6440                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6441                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6442                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6443                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6444                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6445                                 val |= (tp->split_mode_max_reqs <<
6446                                         PCIX_CAPS_SPLIT_SHIFT);
6447                 }
6448                 tw32(TG3PCI_X_CAPS, val);
6449         }
6450
6451         tw32_f(RDMAC_MODE, rdmac_mode);
6452         udelay(40);
6453
6454         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6455         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6456                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6457         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6458         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6459         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6460         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6461         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6462 #if TG3_TSO_SUPPORT != 0
6463         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6464                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6465 #endif
6466         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6467         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6468
6469         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6470                 err = tg3_load_5701_a0_firmware_fix(tp);
6471                 if (err)
6472                         return err;
6473         }
6474
6475 #if TG3_TSO_SUPPORT != 0
6476         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6477                 err = tg3_load_tso_firmware(tp);
6478                 if (err)
6479                         return err;
6480         }
6481 #endif
6482
6483         tp->tx_mode = TX_MODE_ENABLE;
6484         tw32_f(MAC_TX_MODE, tp->tx_mode);
6485         udelay(100);
6486
6487         tp->rx_mode = RX_MODE_ENABLE;
6488         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6489                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6490
6491         tw32_f(MAC_RX_MODE, tp->rx_mode);
6492         udelay(10);
6493
6494         if (tp->link_config.phy_is_low_power) {
6495                 tp->link_config.phy_is_low_power = 0;
6496                 tp->link_config.speed = tp->link_config.orig_speed;
6497                 tp->link_config.duplex = tp->link_config.orig_duplex;
6498                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6499         }
6500
6501         tp->mi_mode = MAC_MI_MODE_BASE;
6502         tw32_f(MAC_MI_MODE, tp->mi_mode);
6503         udelay(80);
6504
6505         tw32(MAC_LED_CTRL, tp->led_ctrl);
6506
6507         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6508         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6509                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6510                 udelay(10);
6511         }
6512         tw32_f(MAC_RX_MODE, tp->rx_mode);
6513         udelay(10);
6514
6515         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6516                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6517                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6518                         /* Set drive transmission level to 1.2V  */
6519                         /* only if the signal pre-emphasis bit is not set  */
6520                         val = tr32(MAC_SERDES_CFG);
6521                         val &= 0xfffff000;
6522                         val |= 0x880;
6523                         tw32(MAC_SERDES_CFG, val);
6524                 }
6525                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6526                         tw32(MAC_SERDES_CFG, 0x616000);
6527         }
6528
6529         /* Prevent chip from dropping frames when flow control
6530          * is enabled.
6531          */
6532         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6533
6534         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6535             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6536                 /* Use hardware link auto-negotiation */
6537                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6538         }
6539
6540         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6541             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6542                 u32 tmp;
6543
6544                 tmp = tr32(SERDES_RX_CTRL);
6545                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6546                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6547                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6548                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6549         }
6550
6551         err = tg3_setup_phy(tp, reset_phy);
6552         if (err)
6553                 return err;
6554
6555         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6556                 u32 tmp;
6557
6558                 /* Clear CRC stats. */
6559                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6560                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6561                         tg3_readphy(tp, 0x14, &tmp);
6562                 }
6563         }
6564
6565         __tg3_set_rx_mode(tp->dev);
6566
6567         /* Initialize receive rules. */
6568         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6569         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6570         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6571         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6572
6573         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6574             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6575                 limit = 8;
6576         else
6577                 limit = 16;
6578         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6579                 limit -= 4;
6580         switch (limit) {
6581         case 16:
6582                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6583         case 15:
6584                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6585         case 14:
6586                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6587         case 13:
6588                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6589         case 12:
6590                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6591         case 11:
6592                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6593         case 10:
6594                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6595         case 9:
6596                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6597         case 8:
6598                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6599         case 7:
6600                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6601         case 6:
6602                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6603         case 5:
6604                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6605         case 4:
6606                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6607         case 3:
6608                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6609         case 2:
6610         case 1:
6611
6612         default:
6613                 break;
6614         };
6615
6616         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6617
6618         return 0;
6619 }
6620
6621 /* Called at device open time to get the chip ready for
6622  * packet processing.  Invoked with tp->lock held.
6623  */
6624 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6625 {
6626         int err;
6627
6628         /* Force the chip into D0. */
6629         err = tg3_set_power_state(tp, PCI_D0);
6630         if (err)
6631                 goto out;
6632
6633         tg3_switch_clocks(tp);
6634
6635         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6636
6637         err = tg3_reset_hw(tp, reset_phy);
6638
6639 out:
6640         return err;
6641 }
6642
6643 #define TG3_STAT_ADD32(PSTAT, REG) \
6644 do {    u32 __val = tr32(REG); \
6645         (PSTAT)->low += __val; \
6646         if ((PSTAT)->low < __val) \
6647                 (PSTAT)->high += 1; \
6648 } while (0)
6649
6650 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6651 {
6652         struct tg3_hw_stats *sp = tp->hw_stats;
6653
6654         if (!netif_carrier_ok(tp->dev))
6655                 return;
6656
6657         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6658         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6659         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6660         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6661         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6662         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6663         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6664         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6665         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6666         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6667         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6668         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6669         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6670
6671         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6672         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6673         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6674         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6675         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6676         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6677         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6678         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6679         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6680         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6681         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6682         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6683         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6684         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6685
6686         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6687         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6688         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6689 }
6690
6691 static void tg3_timer(unsigned long __opaque)
6692 {
6693         struct tg3 *tp = (struct tg3 *) __opaque;
6694
6695         if (tp->irq_sync)
6696                 goto restart_timer;
6697
6698         spin_lock(&tp->lock);
6699
6700         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6701                 /* All of this garbage is because when using non-tagged
6702                  * IRQ status the mailbox/status_block protocol the chip
6703                  * uses with the cpu is race prone.
6704                  */
6705                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6706                         tw32(GRC_LOCAL_CTRL,
6707                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6708                 } else {
6709                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6710                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6711                 }
6712
6713                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6714                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6715                         spin_unlock(&tp->lock);
6716                         schedule_work(&tp->reset_task);
6717                         return;
6718                 }
6719         }
6720
6721         /* This part only runs once per second. */
6722         if (!--tp->timer_counter) {
6723                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6724                         tg3_periodic_fetch_stats(tp);
6725
6726                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6727                         u32 mac_stat;
6728                         int phy_event;
6729
6730                         mac_stat = tr32(MAC_STATUS);
6731
6732                         phy_event = 0;
6733                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6734                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6735                                         phy_event = 1;
6736                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6737                                 phy_event = 1;
6738
6739                         if (phy_event)
6740                                 tg3_setup_phy(tp, 0);
6741                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6742                         u32 mac_stat = tr32(MAC_STATUS);
6743                         int need_setup = 0;
6744
6745                         if (netif_carrier_ok(tp->dev) &&
6746                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6747                                 need_setup = 1;
6748                         }
6749                         if (! netif_carrier_ok(tp->dev) &&
6750                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6751                                          MAC_STATUS_SIGNAL_DET))) {
6752                                 need_setup = 1;
6753                         }
6754                         if (need_setup) {
6755                                 if (!tp->serdes_counter) {
6756                                         tw32_f(MAC_MODE,
6757                                              (tp->mac_mode &
6758                                               ~MAC_MODE_PORT_MODE_MASK));
6759                                         udelay(40);
6760                                         tw32_f(MAC_MODE, tp->mac_mode);
6761                                         udelay(40);
6762                                 }
6763                                 tg3_setup_phy(tp, 0);
6764                         }
6765                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6766                         tg3_serdes_parallel_detect(tp);
6767
6768                 tp->timer_counter = tp->timer_multiplier;
6769         }
6770
6771         /* Heartbeat is only sent once every 2 seconds.
6772          *
6773          * The heartbeat is to tell the ASF firmware that the host
6774          * driver is still alive.  In the event that the OS crashes,
6775          * ASF needs to reset the hardware to free up the FIFO space
6776          * that may be filled with rx packets destined for the host.
6777          * If the FIFO is full, ASF will no longer function properly.
6778          *
6779          * Unintended resets have been reported on real time kernels
6780          * where the timer doesn't run on time.  Netpoll will also have
6781          * same problem.
6782          *
6783          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
6784          * to check the ring condition when the heartbeat is expiring
6785          * before doing the reset.  This will prevent most unintended
6786          * resets.
6787          */
6788         if (!--tp->asf_counter) {
6789                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6790                         u32 val;
6791
6792                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6793                                       FWCMD_NICDRV_ALIVE3);
6794                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6795                         /* 5 seconds timeout */
6796                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6797                         val = tr32(GRC_RX_CPU_EVENT);
6798                         val |= (1 << 14);
6799                         tw32(GRC_RX_CPU_EVENT, val);
6800                 }
6801                 tp->asf_counter = tp->asf_multiplier;
6802         }
6803
6804         spin_unlock(&tp->lock);
6805
6806 restart_timer:
6807         tp->timer.expires = jiffies + tp->timer_offset;
6808         add_timer(&tp->timer);
6809 }
6810
6811 static int tg3_request_irq(struct tg3 *tp)
6812 {
6813         irqreturn_t (*fn)(int, void *, struct pt_regs *);
6814         unsigned long flags;
6815         struct net_device *dev = tp->dev;
6816
6817         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6818                 fn = tg3_msi;
6819                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6820                         fn = tg3_msi_1shot;
6821                 flags = IRQF_SAMPLE_RANDOM;
6822         } else {
6823                 fn = tg3_interrupt;
6824                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6825                         fn = tg3_interrupt_tagged;
6826                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
6827         }
6828         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6829 }
6830
6831 static int tg3_test_interrupt(struct tg3 *tp)
6832 {
6833         struct net_device *dev = tp->dev;
6834         int err, i;
6835         u32 int_mbox = 0;
6836
6837         if (!netif_running(dev))
6838                 return -ENODEV;
6839
6840         tg3_disable_ints(tp);
6841
6842         free_irq(tp->pdev->irq, dev);
6843
6844         err = request_irq(tp->pdev->irq, tg3_test_isr,
6845                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
6846         if (err)
6847                 return err;
6848
6849         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6850         tg3_enable_ints(tp);
6851
6852         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6853                HOSTCC_MODE_NOW);
6854
6855         for (i = 0; i < 5; i++) {
6856                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6857                                         TG3_64BIT_REG_LOW);
6858                 if (int_mbox != 0)
6859                         break;
6860                 msleep(10);
6861         }
6862
6863         tg3_disable_ints(tp);
6864
6865         free_irq(tp->pdev->irq, dev);
6866
6867         err = tg3_request_irq(tp);
6868
6869         if (err)
6870                 return err;
6871
6872         if (int_mbox != 0)
6873                 return 0;
6874
6875         return -EIO;
6876 }
6877
6878 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6879  * successfully restored
6880  */
6881 static int tg3_test_msi(struct tg3 *tp)
6882 {
6883         struct net_device *dev = tp->dev;
6884         int err;
6885         u16 pci_cmd;
6886
6887         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6888                 return 0;
6889
6890         /* Turn off SERR reporting in case MSI terminates with Master
6891          * Abort.
6892          */
6893         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6894         pci_write_config_word(tp->pdev, PCI_COMMAND,
6895                               pci_cmd & ~PCI_COMMAND_SERR);
6896
6897         err = tg3_test_interrupt(tp);
6898
6899         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6900
6901         if (!err)
6902                 return 0;
6903
6904         /* other failures */
6905         if (err != -EIO)
6906                 return err;
6907
6908         /* MSI test failed, go back to INTx mode */
6909         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6910                "switching to INTx mode. Please report this failure to "
6911                "the PCI maintainer and include system chipset information.\n",
6912                        tp->dev->name);
6913
6914         free_irq(tp->pdev->irq, dev);
6915         pci_disable_msi(tp->pdev);
6916
6917         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6918
6919         err = tg3_request_irq(tp);
6920         if (err)
6921                 return err;
6922
6923         /* Need to reset the chip because the MSI cycle may have terminated
6924          * with Master Abort.
6925          */
6926         tg3_full_lock(tp, 1);
6927
6928         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6929         err = tg3_init_hw(tp, 1);
6930
6931         tg3_full_unlock(tp);
6932
6933         if (err)
6934                 free_irq(tp->pdev->irq, dev);
6935
6936         return err;
6937 }
6938
6939 static int tg3_open(struct net_device *dev)
6940 {
6941         struct tg3 *tp = netdev_priv(dev);
6942         int err;
6943
6944         tg3_full_lock(tp, 0);
6945
6946         err = tg3_set_power_state(tp, PCI_D0);
6947         if (err)
6948                 return err;
6949
6950         tg3_disable_ints(tp);
6951         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6952
6953         tg3_full_unlock(tp);
6954
6955         /* The placement of this call is tied
6956          * to the setup and use of Host TX descriptors.
6957          */
6958         err = tg3_alloc_consistent(tp);
6959         if (err)
6960                 return err;
6961
6962         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6963             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6964             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6965             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6966               (tp->pdev_peer == tp->pdev))) {
6967                 /* All MSI supporting chips should support tagged
6968                  * status.  Assert that this is the case.
6969                  */
6970                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6971                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6972                                "Not using MSI.\n", tp->dev->name);
6973                 } else if (pci_enable_msi(tp->pdev) == 0) {
6974                         u32 msi_mode;
6975
6976                         msi_mode = tr32(MSGINT_MODE);
6977                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6978                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6979                 }
6980         }
6981         err = tg3_request_irq(tp);
6982
6983         if (err) {
6984                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6985                         pci_disable_msi(tp->pdev);
6986                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6987                 }
6988                 tg3_free_consistent(tp);
6989                 return err;
6990         }
6991
6992         tg3_full_lock(tp, 0);
6993
6994         err = tg3_init_hw(tp, 1);
6995         if (err) {
6996                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6997                 tg3_free_rings(tp);
6998         } else {
6999                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7000                         tp->timer_offset = HZ;
7001                 else
7002                         tp->timer_offset = HZ / 10;
7003
7004                 BUG_ON(tp->timer_offset > HZ);
7005                 tp->timer_counter = tp->timer_multiplier =
7006                         (HZ / tp->timer_offset);
7007                 tp->asf_counter = tp->asf_multiplier =
7008                         ((HZ / tp->timer_offset) * 2);
7009
7010                 init_timer(&tp->timer);
7011                 tp->timer.expires = jiffies + tp->timer_offset;
7012                 tp->timer.data = (unsigned long) tp;
7013                 tp->timer.function = tg3_timer;
7014         }
7015
7016         tg3_full_unlock(tp);
7017
7018         if (err) {
7019                 free_irq(tp->pdev->irq, dev);
7020                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7021                         pci_disable_msi(tp->pdev);
7022                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7023                 }
7024                 tg3_free_consistent(tp);
7025                 return err;
7026         }
7027
7028         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7029                 err = tg3_test_msi(tp);
7030
7031                 if (err) {
7032                         tg3_full_lock(tp, 0);
7033
7034                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7035                                 pci_disable_msi(tp->pdev);
7036                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7037                         }
7038                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7039                         tg3_free_rings(tp);
7040                         tg3_free_consistent(tp);
7041
7042                         tg3_full_unlock(tp);
7043
7044                         return err;
7045                 }
7046
7047                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7048                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7049                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7050
7051                                 tw32(PCIE_TRANSACTION_CFG,
7052                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7053                         }
7054                 }
7055         }
7056
7057         tg3_full_lock(tp, 0);
7058
7059         add_timer(&tp->timer);
7060         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7061         tg3_enable_ints(tp);
7062
7063         tg3_full_unlock(tp);
7064
7065         netif_start_queue(dev);
7066
7067         return 0;
7068 }
7069
7070 #if 0
7071 /*static*/ void tg3_dump_state(struct tg3 *tp)
7072 {
7073         u32 val32, val32_2, val32_3, val32_4, val32_5;
7074         u16 val16;
7075         int i;
7076
7077         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7078         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7079         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7080                val16, val32);
7081
7082         /* MAC block */
7083         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7084                tr32(MAC_MODE), tr32(MAC_STATUS));
7085         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7086                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7087         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7088                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7089         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7090                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7091
7092         /* Send data initiator control block */
7093         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7094                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7095         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7096                tr32(SNDDATAI_STATSCTRL));
7097
7098         /* Send data completion control block */
7099         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7100
7101         /* Send BD ring selector block */
7102         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7103                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7104
7105         /* Send BD initiator control block */
7106         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7107                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7108
7109         /* Send BD completion control block */
7110         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7111
7112         /* Receive list placement control block */
7113         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7114                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7115         printk("       RCVLPC_STATSCTRL[%08x]\n",
7116                tr32(RCVLPC_STATSCTRL));
7117
7118         /* Receive data and receive BD initiator control block */
7119         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7120                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7121
7122         /* Receive data completion control block */
7123         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7124                tr32(RCVDCC_MODE));
7125
7126         /* Receive BD initiator control block */
7127         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7128                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7129
7130         /* Receive BD completion control block */
7131         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7132                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7133
7134         /* Receive list selector control block */
7135         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7136                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7137
7138         /* Mbuf cluster free block */
7139         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7140                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7141
7142         /* Host coalescing control block */
7143         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7144                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7145         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7146                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7147                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7148         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7149                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7150                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7151         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7152                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7153         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7154                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7155
7156         /* Memory arbiter control block */
7157         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7158                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7159
7160         /* Buffer manager control block */
7161         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7162                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7163         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7164                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7165         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7166                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7167                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7168                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7169
7170         /* Read DMA control block */
7171         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7172                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7173
7174         /* Write DMA control block */
7175         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7176                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7177
7178         /* DMA completion block */
7179         printk("DEBUG: DMAC_MODE[%08x]\n",
7180                tr32(DMAC_MODE));
7181
7182         /* GRC block */
7183         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7184                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7185         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7186                tr32(GRC_LOCAL_CTRL));
7187
7188         /* TG3_BDINFOs */
7189         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7190                tr32(RCVDBDI_JUMBO_BD + 0x0),
7191                tr32(RCVDBDI_JUMBO_BD + 0x4),
7192                tr32(RCVDBDI_JUMBO_BD + 0x8),
7193                tr32(RCVDBDI_JUMBO_BD + 0xc));
7194         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7195                tr32(RCVDBDI_STD_BD + 0x0),
7196                tr32(RCVDBDI_STD_BD + 0x4),
7197                tr32(RCVDBDI_STD_BD + 0x8),
7198                tr32(RCVDBDI_STD_BD + 0xc));
7199         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7200                tr32(RCVDBDI_MINI_BD + 0x0),
7201                tr32(RCVDBDI_MINI_BD + 0x4),
7202                tr32(RCVDBDI_MINI_BD + 0x8),
7203                tr32(RCVDBDI_MINI_BD + 0xc));
7204
7205         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7206         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7207         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7208         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7209         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7210                val32, val32_2, val32_3, val32_4);
7211
7212         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7213         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7214         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7215         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7216         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7217                val32, val32_2, val32_3, val32_4);
7218
7219         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7220         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7221         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7222         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7223         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7224         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7225                val32, val32_2, val32_3, val32_4, val32_5);
7226
7227         /* SW status block */
7228         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7229                tp->hw_status->status,
7230                tp->hw_status->status_tag,
7231                tp->hw_status->rx_jumbo_consumer,
7232                tp->hw_status->rx_consumer,
7233                tp->hw_status->rx_mini_consumer,
7234                tp->hw_status->idx[0].rx_producer,
7235                tp->hw_status->idx[0].tx_consumer);
7236
7237         /* SW statistics block */
7238         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7239                ((u32 *)tp->hw_stats)[0],
7240                ((u32 *)tp->hw_stats)[1],
7241                ((u32 *)tp->hw_stats)[2],
7242                ((u32 *)tp->hw_stats)[3]);
7243
7244         /* Mailboxes */
7245         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7246                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7247                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7248                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7249                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7250
7251         /* NIC side send descriptors. */
7252         for (i = 0; i < 6; i++) {
7253                 unsigned long txd;
7254
7255                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7256                         + (i * sizeof(struct tg3_tx_buffer_desc));
7257                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7258                        i,
7259                        readl(txd + 0x0), readl(txd + 0x4),
7260                        readl(txd + 0x8), readl(txd + 0xc));
7261         }
7262
7263         /* NIC side RX descriptors. */
7264         for (i = 0; i < 6; i++) {
7265                 unsigned long rxd;
7266
7267                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7268                         + (i * sizeof(struct tg3_rx_buffer_desc));
7269                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7270                        i,
7271                        readl(rxd + 0x0), readl(rxd + 0x4),
7272                        readl(rxd + 0x8), readl(rxd + 0xc));
7273                 rxd += (4 * sizeof(u32));
7274                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7275                        i,
7276                        readl(rxd + 0x0), readl(rxd + 0x4),
7277                        readl(rxd + 0x8), readl(rxd + 0xc));
7278         }
7279
7280         for (i = 0; i < 6; i++) {
7281                 unsigned long rxd;
7282
7283                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7284                         + (i * sizeof(struct tg3_rx_buffer_desc));
7285                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7286                        i,
7287                        readl(rxd + 0x0), readl(rxd + 0x4),
7288                        readl(rxd + 0x8), readl(rxd + 0xc));
7289                 rxd += (4 * sizeof(u32));
7290                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7291                        i,
7292                        readl(rxd + 0x0), readl(rxd + 0x4),
7293                        readl(rxd + 0x8), readl(rxd + 0xc));
7294         }
7295 }
7296 #endif
7297
7298 static struct net_device_stats *tg3_get_stats(struct net_device *);
7299 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7300
7301 static int tg3_close(struct net_device *dev)
7302 {
7303         struct tg3 *tp = netdev_priv(dev);
7304
7305         /* Calling flush_scheduled_work() may deadlock because
7306          * linkwatch_event() may be on the workqueue and it will try to get
7307          * the rtnl_lock which we are holding.
7308          */
7309         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7310                 msleep(1);
7311
7312         netif_stop_queue(dev);
7313
7314         del_timer_sync(&tp->timer);
7315
7316         tg3_full_lock(tp, 1);
7317 #if 0
7318         tg3_dump_state(tp);
7319 #endif
7320
7321         tg3_disable_ints(tp);
7322
7323         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7324         tg3_free_rings(tp);
7325         tp->tg3_flags &=
7326                 ~(TG3_FLAG_INIT_COMPLETE |
7327                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7328
7329         tg3_full_unlock(tp);
7330
7331         free_irq(tp->pdev->irq, dev);
7332         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7333                 pci_disable_msi(tp->pdev);
7334                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7335         }
7336
7337         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7338                sizeof(tp->net_stats_prev));
7339         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7340                sizeof(tp->estats_prev));
7341
7342         tg3_free_consistent(tp);
7343
7344         tg3_set_power_state(tp, PCI_D3hot);
7345
7346         netif_carrier_off(tp->dev);
7347
7348         return 0;
7349 }
7350
7351 static inline unsigned long get_stat64(tg3_stat64_t *val)
7352 {
7353         unsigned long ret;
7354
7355 #if (BITS_PER_LONG == 32)
7356         ret = val->low;
7357 #else
7358         ret = ((u64)val->high << 32) | ((u64)val->low);
7359 #endif
7360         return ret;
7361 }
7362
7363 static unsigned long calc_crc_errors(struct tg3 *tp)
7364 {
7365         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7366
7367         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7368             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7369              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7370                 u32 val;
7371
7372                 spin_lock_bh(&tp->lock);
7373                 if (!tg3_readphy(tp, 0x1e, &val)) {
7374                         tg3_writephy(tp, 0x1e, val | 0x8000);
7375                         tg3_readphy(tp, 0x14, &val);
7376                 } else
7377                         val = 0;
7378                 spin_unlock_bh(&tp->lock);
7379
7380                 tp->phy_crc_errors += val;
7381
7382                 return tp->phy_crc_errors;
7383         }
7384
7385         return get_stat64(&hw_stats->rx_fcs_errors);
7386 }
7387
7388 #define ESTAT_ADD(member) \
7389         estats->member =        old_estats->member + \
7390                                 get_stat64(&hw_stats->member)
7391
7392 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7393 {
7394         struct tg3_ethtool_stats *estats = &tp->estats;
7395         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7396         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7397
7398         if (!hw_stats)
7399                 return old_estats;
7400
7401         ESTAT_ADD(rx_octets);
7402         ESTAT_ADD(rx_fragments);
7403         ESTAT_ADD(rx_ucast_packets);
7404         ESTAT_ADD(rx_mcast_packets);
7405         ESTAT_ADD(rx_bcast_packets);
7406         ESTAT_ADD(rx_fcs_errors);
7407         ESTAT_ADD(rx_align_errors);
7408         ESTAT_ADD(rx_xon_pause_rcvd);
7409         ESTAT_ADD(rx_xoff_pause_rcvd);
7410         ESTAT_ADD(rx_mac_ctrl_rcvd);
7411         ESTAT_ADD(rx_xoff_entered);
7412         ESTAT_ADD(rx_frame_too_long_errors);
7413         ESTAT_ADD(rx_jabbers);
7414         ESTAT_ADD(rx_undersize_packets);
7415         ESTAT_ADD(rx_in_length_errors);
7416         ESTAT_ADD(rx_out_length_errors);
7417         ESTAT_ADD(rx_64_or_less_octet_packets);
7418         ESTAT_ADD(rx_65_to_127_octet_packets);
7419         ESTAT_ADD(rx_128_to_255_octet_packets);
7420         ESTAT_ADD(rx_256_to_511_octet_packets);
7421         ESTAT_ADD(rx_512_to_1023_octet_packets);
7422         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7423         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7424         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7425         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7426         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7427
7428         ESTAT_ADD(tx_octets);
7429         ESTAT_ADD(tx_collisions);
7430         ESTAT_ADD(tx_xon_sent);
7431         ESTAT_ADD(tx_xoff_sent);
7432         ESTAT_ADD(tx_flow_control);
7433         ESTAT_ADD(tx_mac_errors);
7434         ESTAT_ADD(tx_single_collisions);
7435         ESTAT_ADD(tx_mult_collisions);
7436         ESTAT_ADD(tx_deferred);
7437         ESTAT_ADD(tx_excessive_collisions);
7438         ESTAT_ADD(tx_late_collisions);
7439         ESTAT_ADD(tx_collide_2times);
7440         ESTAT_ADD(tx_collide_3times);
7441         ESTAT_ADD(tx_collide_4times);
7442         ESTAT_ADD(tx_collide_5times);
7443         ESTAT_ADD(tx_collide_6times);
7444         ESTAT_ADD(tx_collide_7times);
7445         ESTAT_ADD(tx_collide_8times);
7446         ESTAT_ADD(tx_collide_9times);
7447         ESTAT_ADD(tx_collide_10times);
7448         ESTAT_ADD(tx_collide_11times);
7449         ESTAT_ADD(tx_collide_12times);
7450         ESTAT_ADD(tx_collide_13times);
7451         ESTAT_ADD(tx_collide_14times);
7452         ESTAT_ADD(tx_collide_15times);
7453         ESTAT_ADD(tx_ucast_packets);
7454         ESTAT_ADD(tx_mcast_packets);
7455         ESTAT_ADD(tx_bcast_packets);
7456         ESTAT_ADD(tx_carrier_sense_errors);
7457         ESTAT_ADD(tx_discards);
7458         ESTAT_ADD(tx_errors);
7459
7460         ESTAT_ADD(dma_writeq_full);
7461         ESTAT_ADD(dma_write_prioq_full);
7462         ESTAT_ADD(rxbds_empty);
7463         ESTAT_ADD(rx_discards);
7464         ESTAT_ADD(rx_errors);
7465         ESTAT_ADD(rx_threshold_hit);
7466
7467         ESTAT_ADD(dma_readq_full);
7468         ESTAT_ADD(dma_read_prioq_full);
7469         ESTAT_ADD(tx_comp_queue_full);
7470
7471         ESTAT_ADD(ring_set_send_prod_index);
7472         ESTAT_ADD(ring_status_update);
7473         ESTAT_ADD(nic_irqs);
7474         ESTAT_ADD(nic_avoided_irqs);
7475         ESTAT_ADD(nic_tx_threshold_hit);
7476
7477         return estats;
7478 }
7479
7480 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7481 {
7482         struct tg3 *tp = netdev_priv(dev);
7483         struct net_device_stats *stats = &tp->net_stats;
7484         struct net_device_stats *old_stats = &tp->net_stats_prev;
7485         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7486
7487         if (!hw_stats)
7488                 return old_stats;
7489
7490         stats->rx_packets = old_stats->rx_packets +
7491                 get_stat64(&hw_stats->rx_ucast_packets) +
7492                 get_stat64(&hw_stats->rx_mcast_packets) +
7493                 get_stat64(&hw_stats->rx_bcast_packets);
7494
7495         stats->tx_packets = old_stats->tx_packets +
7496                 get_stat64(&hw_stats->tx_ucast_packets) +
7497                 get_stat64(&hw_stats->tx_mcast_packets) +
7498                 get_stat64(&hw_stats->tx_bcast_packets);
7499
7500         stats->rx_bytes = old_stats->rx_bytes +
7501                 get_stat64(&hw_stats->rx_octets);
7502         stats->tx_bytes = old_stats->tx_bytes +
7503                 get_stat64(&hw_stats->tx_octets);
7504
7505         stats->rx_errors = old_stats->rx_errors +
7506                 get_stat64(&hw_stats->rx_errors);
7507         stats->tx_errors = old_stats->tx_errors +
7508                 get_stat64(&hw_stats->tx_errors) +
7509                 get_stat64(&hw_stats->tx_mac_errors) +
7510                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7511                 get_stat64(&hw_stats->tx_discards);
7512
7513         stats->multicast = old_stats->multicast +
7514                 get_stat64(&hw_stats->rx_mcast_packets);
7515         stats->collisions = old_stats->collisions +
7516                 get_stat64(&hw_stats->tx_collisions);
7517
7518         stats->rx_length_errors = old_stats->rx_length_errors +
7519                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7520                 get_stat64(&hw_stats->rx_undersize_packets);
7521
7522         stats->rx_over_errors = old_stats->rx_over_errors +
7523                 get_stat64(&hw_stats->rxbds_empty);
7524         stats->rx_frame_errors = old_stats->rx_frame_errors +
7525                 get_stat64(&hw_stats->rx_align_errors);
7526         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7527                 get_stat64(&hw_stats->tx_discards);
7528         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7529                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7530
7531         stats->rx_crc_errors = old_stats->rx_crc_errors +
7532                 calc_crc_errors(tp);
7533
7534         stats->rx_missed_errors = old_stats->rx_missed_errors +
7535                 get_stat64(&hw_stats->rx_discards);
7536
7537         return stats;
7538 }
7539
7540 static inline u32 calc_crc(unsigned char *buf, int len)
7541 {
7542         u32 reg;
7543         u32 tmp;
7544         int j, k;
7545
7546         reg = 0xffffffff;
7547
7548         for (j = 0; j < len; j++) {
7549                 reg ^= buf[j];
7550
7551                 for (k = 0; k < 8; k++) {
7552                         tmp = reg & 0x01;
7553
7554                         reg >>= 1;
7555
7556                         if (tmp) {
7557                                 reg ^= 0xedb88320;
7558                         }
7559                 }
7560         }
7561
7562         return ~reg;
7563 }
7564
7565 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7566 {
7567         /* accept or reject all multicast frames */
7568         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7569         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7570         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7571         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7572 }
7573
7574 static void __tg3_set_rx_mode(struct net_device *dev)
7575 {
7576         struct tg3 *tp = netdev_priv(dev);
7577         u32 rx_mode;
7578
7579         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7580                                   RX_MODE_KEEP_VLAN_TAG);
7581
7582         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7583          * flag clear.
7584          */
7585 #if TG3_VLAN_TAG_USED
7586         if (!tp->vlgrp &&
7587             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7588                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7589 #else
7590         /* By definition, VLAN is disabled always in this
7591          * case.
7592          */
7593         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7594                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7595 #endif
7596
7597         if (dev->flags & IFF_PROMISC) {
7598                 /* Promiscuous mode. */
7599                 rx_mode |= RX_MODE_PROMISC;
7600         } else if (dev->flags & IFF_ALLMULTI) {
7601                 /* Accept all multicast. */
7602                 tg3_set_multi (tp, 1);
7603         } else if (dev->mc_count < 1) {
7604                 /* Reject all multicast. */
7605                 tg3_set_multi (tp, 0);
7606         } else {
7607                 /* Accept one or more multicast(s). */
7608                 struct dev_mc_list *mclist;
7609                 unsigned int i;
7610                 u32 mc_filter[4] = { 0, };
7611                 u32 regidx;
7612                 u32 bit;
7613                 u32 crc;
7614
7615                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7616                      i++, mclist = mclist->next) {
7617
7618                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7619                         bit = ~crc & 0x7f;
7620                         regidx = (bit & 0x60) >> 5;
7621                         bit &= 0x1f;
7622                         mc_filter[regidx] |= (1 << bit);
7623                 }
7624
7625                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7626                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7627                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7628                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7629         }
7630
7631         if (rx_mode != tp->rx_mode) {
7632                 tp->rx_mode = rx_mode;
7633                 tw32_f(MAC_RX_MODE, rx_mode);
7634                 udelay(10);
7635         }
7636 }
7637
7638 static void tg3_set_rx_mode(struct net_device *dev)
7639 {
7640         struct tg3 *tp = netdev_priv(dev);
7641
7642         if (!netif_running(dev))
7643                 return;
7644
7645         tg3_full_lock(tp, 0);
7646         __tg3_set_rx_mode(dev);
7647         tg3_full_unlock(tp);
7648 }
7649
7650 #define TG3_REGDUMP_LEN         (32 * 1024)
7651
7652 static int tg3_get_regs_len(struct net_device *dev)
7653 {
7654         return TG3_REGDUMP_LEN;
7655 }
7656
7657 static void tg3_get_regs(struct net_device *dev,
7658                 struct ethtool_regs *regs, void *_p)
7659 {
7660         u32 *p = _p;
7661         struct tg3 *tp = netdev_priv(dev);
7662         u8 *orig_p = _p;
7663         int i;
7664
7665         regs->version = 0;
7666
7667         memset(p, 0, TG3_REGDUMP_LEN);
7668
7669         if (tp->link_config.phy_is_low_power)
7670                 return;
7671
7672         tg3_full_lock(tp, 0);
7673
7674 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7675 #define GET_REG32_LOOP(base,len)                \
7676 do {    p = (u32 *)(orig_p + (base));           \
7677         for (i = 0; i < len; i += 4)            \
7678                 __GET_REG32((base) + i);        \
7679 } while (0)
7680 #define GET_REG32_1(reg)                        \
7681 do {    p = (u32 *)(orig_p + (reg));            \
7682         __GET_REG32((reg));                     \
7683 } while (0)
7684
7685         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7686         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7687         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7688         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7689         GET_REG32_1(SNDDATAC_MODE);
7690         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7691         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7692         GET_REG32_1(SNDBDC_MODE);
7693         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7694         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7695         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7696         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7697         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7698         GET_REG32_1(RCVDCC_MODE);
7699         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7700         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7701         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7702         GET_REG32_1(MBFREE_MODE);
7703         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7704         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7705         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7706         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7707         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7708         GET_REG32_1(RX_CPU_MODE);
7709         GET_REG32_1(RX_CPU_STATE);
7710         GET_REG32_1(RX_CPU_PGMCTR);
7711         GET_REG32_1(RX_CPU_HWBKPT);
7712         GET_REG32_1(TX_CPU_MODE);
7713         GET_REG32_1(TX_CPU_STATE);
7714         GET_REG32_1(TX_CPU_PGMCTR);
7715         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7716         GET_REG32_LOOP(FTQ_RESET, 0x120);
7717         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7718         GET_REG32_1(DMAC_MODE);
7719         GET_REG32_LOOP(GRC_MODE, 0x4c);
7720         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7721                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7722
7723 #undef __GET_REG32
7724 #undef GET_REG32_LOOP
7725 #undef GET_REG32_1
7726
7727         tg3_full_unlock(tp);
7728 }
7729
7730 static int tg3_get_eeprom_len(struct net_device *dev)
7731 {
7732         struct tg3 *tp = netdev_priv(dev);
7733
7734         return tp->nvram_size;
7735 }
7736
7737 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7738 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7739
7740 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7741 {
7742         struct tg3 *tp = netdev_priv(dev);
7743         int ret;
7744         u8  *pd;
7745         u32 i, offset, len, val, b_offset, b_count;
7746
7747         if (tp->link_config.phy_is_low_power)
7748                 return -EAGAIN;
7749
7750         offset = eeprom->offset;
7751         len = eeprom->len;
7752         eeprom->len = 0;
7753
7754         eeprom->magic = TG3_EEPROM_MAGIC;
7755
7756         if (offset & 3) {
7757                 /* adjustments to start on required 4 byte boundary */
7758                 b_offset = offset & 3;
7759                 b_count = 4 - b_offset;
7760                 if (b_count > len) {
7761                         /* i.e. offset=1 len=2 */
7762                         b_count = len;
7763                 }
7764                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7765                 if (ret)
7766                         return ret;
7767                 val = cpu_to_le32(val);
7768                 memcpy(data, ((char*)&val) + b_offset, b_count);
7769                 len -= b_count;
7770                 offset += b_count;
7771                 eeprom->len += b_count;
7772         }
7773
7774         /* read bytes upto the last 4 byte boundary */
7775         pd = &data[eeprom->len];
7776         for (i = 0; i < (len - (len & 3)); i += 4) {
7777                 ret = tg3_nvram_read(tp, offset + i, &val);
7778                 if (ret) {
7779                         eeprom->len += i;
7780                         return ret;
7781                 }
7782                 val = cpu_to_le32(val);
7783                 memcpy(pd + i, &val, 4);
7784         }
7785         eeprom->len += i;
7786
7787         if (len & 3) {
7788                 /* read last bytes not ending on 4 byte boundary */
7789                 pd = &data[eeprom->len];
7790                 b_count = len & 3;
7791                 b_offset = offset + len - b_count;
7792                 ret = tg3_nvram_read(tp, b_offset, &val);
7793                 if (ret)
7794                         return ret;
7795                 val = cpu_to_le32(val);
7796                 memcpy(pd, ((char*)&val), b_count);
7797                 eeprom->len += b_count;
7798         }
7799         return 0;
7800 }
7801
7802 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7803
7804 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7805 {
7806         struct tg3 *tp = netdev_priv(dev);
7807         int ret;
7808         u32 offset, len, b_offset, odd_len, start, end;
7809         u8 *buf;
7810
7811         if (tp->link_config.phy_is_low_power)
7812                 return -EAGAIN;
7813
7814         if (eeprom->magic != TG3_EEPROM_MAGIC)
7815                 return -EINVAL;
7816
7817         offset = eeprom->offset;
7818         len = eeprom->len;
7819
7820         if ((b_offset = (offset & 3))) {
7821                 /* adjustments to start on required 4 byte boundary */
7822                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7823                 if (ret)
7824                         return ret;
7825                 start = cpu_to_le32(start);
7826                 len += b_offset;
7827                 offset &= ~3;
7828                 if (len < 4)
7829                         len = 4;
7830         }
7831
7832         odd_len = 0;
7833         if (len & 3) {
7834                 /* adjustments to end on required 4 byte boundary */
7835                 odd_len = 1;
7836                 len = (len + 3) & ~3;
7837                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7838                 if (ret)
7839                         return ret;
7840                 end = cpu_to_le32(end);
7841         }
7842
7843         buf = data;
7844         if (b_offset || odd_len) {
7845                 buf = kmalloc(len, GFP_KERNEL);
7846                 if (buf == 0)
7847                         return -ENOMEM;
7848                 if (b_offset)
7849                         memcpy(buf, &start, 4);
7850                 if (odd_len)
7851                         memcpy(buf+len-4, &end, 4);
7852                 memcpy(buf + b_offset, data, eeprom->len);
7853         }
7854
7855         ret = tg3_nvram_write_block(tp, offset, len, buf);
7856
7857         if (buf != data)
7858                 kfree(buf);
7859
7860         return ret;
7861 }
7862
7863 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7864 {
7865         struct tg3 *tp = netdev_priv(dev);
7866
7867         cmd->supported = (SUPPORTED_Autoneg);
7868
7869         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7870                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7871                                    SUPPORTED_1000baseT_Full);
7872
7873         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7874                 cmd->supported |= (SUPPORTED_100baseT_Half |
7875                                   SUPPORTED_100baseT_Full |
7876                                   SUPPORTED_10baseT_Half |
7877                                   SUPPORTED_10baseT_Full |
7878                                   SUPPORTED_MII);
7879                 cmd->port = PORT_TP;
7880         } else {
7881                 cmd->supported |= SUPPORTED_FIBRE;
7882                 cmd->port = PORT_FIBRE;
7883         }
7884
7885         cmd->advertising = tp->link_config.advertising;
7886         if (netif_running(dev)) {
7887                 cmd->speed = tp->link_config.active_speed;
7888                 cmd->duplex = tp->link_config.active_duplex;
7889         }
7890         cmd->phy_address = PHY_ADDR;
7891         cmd->transceiver = 0;
7892         cmd->autoneg = tp->link_config.autoneg;
7893         cmd->maxtxpkt = 0;
7894         cmd->maxrxpkt = 0;
7895         return 0;
7896 }
7897
7898 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7899 {
7900         struct tg3 *tp = netdev_priv(dev);
7901
7902         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
7903                 /* These are the only valid advertisement bits allowed.  */
7904                 if (cmd->autoneg == AUTONEG_ENABLE &&
7905                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7906                                           ADVERTISED_1000baseT_Full |
7907                                           ADVERTISED_Autoneg |
7908                                           ADVERTISED_FIBRE)))
7909                         return -EINVAL;
7910                 /* Fiber can only do SPEED_1000.  */
7911                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7912                          (cmd->speed != SPEED_1000))
7913                         return -EINVAL;
7914         /* Copper cannot force SPEED_1000.  */
7915         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7916                    (cmd->speed == SPEED_1000))
7917                 return -EINVAL;
7918         else if ((cmd->speed == SPEED_1000) &&
7919                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7920                 return -EINVAL;
7921
7922         tg3_full_lock(tp, 0);
7923
7924         tp->link_config.autoneg = cmd->autoneg;
7925         if (cmd->autoneg == AUTONEG_ENABLE) {
7926                 tp->link_config.advertising = cmd->advertising;
7927                 tp->link_config.speed = SPEED_INVALID;
7928                 tp->link_config.duplex = DUPLEX_INVALID;
7929         } else {
7930                 tp->link_config.advertising = 0;
7931                 tp->link_config.speed = cmd->speed;
7932                 tp->link_config.duplex = cmd->duplex;
7933         }
7934
7935         if (netif_running(dev))
7936                 tg3_setup_phy(tp, 1);
7937
7938         tg3_full_unlock(tp);
7939
7940         return 0;
7941 }
7942
7943 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7944 {
7945         struct tg3 *tp = netdev_priv(dev);
7946
7947         strcpy(info->driver, DRV_MODULE_NAME);
7948         strcpy(info->version, DRV_MODULE_VERSION);
7949         strcpy(info->fw_version, tp->fw_ver);
7950         strcpy(info->bus_info, pci_name(tp->pdev));
7951 }
7952
7953 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7954 {
7955         struct tg3 *tp = netdev_priv(dev);
7956
7957         wol->supported = WAKE_MAGIC;
7958         wol->wolopts = 0;
7959         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7960                 wol->wolopts = WAKE_MAGIC;
7961         memset(&wol->sopass, 0, sizeof(wol->sopass));
7962 }
7963
7964 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7965 {
7966         struct tg3 *tp = netdev_priv(dev);
7967
7968         if (wol->wolopts & ~WAKE_MAGIC)
7969                 return -EINVAL;
7970         if ((wol->wolopts & WAKE_MAGIC) &&
7971             tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
7972             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7973                 return -EINVAL;
7974
7975         spin_lock_bh(&tp->lock);
7976         if (wol->wolopts & WAKE_MAGIC)
7977                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7978         else
7979                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7980         spin_unlock_bh(&tp->lock);
7981
7982         return 0;
7983 }
7984
7985 static u32 tg3_get_msglevel(struct net_device *dev)
7986 {
7987         struct tg3 *tp = netdev_priv(dev);
7988         return tp->msg_enable;
7989 }
7990
7991 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7992 {
7993         struct tg3 *tp = netdev_priv(dev);
7994         tp->msg_enable = value;
7995 }
7996
7997 #if TG3_TSO_SUPPORT != 0
7998 static int tg3_set_tso(struct net_device *dev, u32 value)
7999 {
8000         struct tg3 *tp = netdev_priv(dev);
8001
8002         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8003                 if (value)
8004                         return -EINVAL;
8005                 return 0;
8006         }
8007         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8008             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8009                 if (value)
8010                         dev->features |= NETIF_F_TSO6;
8011                 else
8012                         dev->features &= ~NETIF_F_TSO6;
8013         }
8014         return ethtool_op_set_tso(dev, value);
8015 }
8016 #endif
8017
8018 static int tg3_nway_reset(struct net_device *dev)
8019 {
8020         struct tg3 *tp = netdev_priv(dev);
8021         u32 bmcr;
8022         int r;
8023
8024         if (!netif_running(dev))
8025                 return -EAGAIN;
8026
8027         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8028                 return -EINVAL;
8029
8030         spin_lock_bh(&tp->lock);
8031         r = -EINVAL;
8032         tg3_readphy(tp, MII_BMCR, &bmcr);
8033         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8034             ((bmcr & BMCR_ANENABLE) ||
8035              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8036                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8037                                            BMCR_ANENABLE);
8038                 r = 0;
8039         }
8040         spin_unlock_bh(&tp->lock);
8041
8042         return r;
8043 }
8044
8045 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8046 {
8047         struct tg3 *tp = netdev_priv(dev);
8048
8049         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8050         ering->rx_mini_max_pending = 0;
8051         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8052                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8053         else
8054                 ering->rx_jumbo_max_pending = 0;
8055
8056         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8057
8058         ering->rx_pending = tp->rx_pending;
8059         ering->rx_mini_pending = 0;
8060         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8061                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8062         else
8063                 ering->rx_jumbo_pending = 0;
8064
8065         ering->tx_pending = tp->tx_pending;
8066 }
8067
8068 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8069 {
8070         struct tg3 *tp = netdev_priv(dev);
8071         int irq_sync = 0, err = 0;
8072
8073         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8074             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8075             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
8076                 return -EINVAL;
8077
8078         if (netif_running(dev)) {
8079                 tg3_netif_stop(tp);
8080                 irq_sync = 1;
8081         }
8082
8083         tg3_full_lock(tp, irq_sync);
8084
8085         tp->rx_pending = ering->rx_pending;
8086
8087         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8088             tp->rx_pending > 63)
8089                 tp->rx_pending = 63;
8090         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8091         tp->tx_pending = ering->tx_pending;
8092
8093         if (netif_running(dev)) {
8094                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8095                 err = tg3_restart_hw(tp, 1);
8096                 if (!err)
8097                         tg3_netif_start(tp);
8098         }
8099
8100         tg3_full_unlock(tp);
8101
8102         return err;
8103 }
8104
8105 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8106 {
8107         struct tg3 *tp = netdev_priv(dev);
8108
8109         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8110         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8111         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8112 }
8113
8114 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8115 {
8116         struct tg3 *tp = netdev_priv(dev);
8117         int irq_sync = 0, err = 0;
8118
8119         if (netif_running(dev)) {
8120                 tg3_netif_stop(tp);
8121                 irq_sync = 1;
8122         }
8123
8124         tg3_full_lock(tp, irq_sync);
8125
8126         if (epause->autoneg)
8127                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8128         else
8129                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8130         if (epause->rx_pause)
8131                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8132         else
8133                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8134         if (epause->tx_pause)
8135                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8136         else
8137                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8138
8139         if (netif_running(dev)) {
8140                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8141                 err = tg3_restart_hw(tp, 1);
8142                 if (!err)
8143                         tg3_netif_start(tp);
8144         }
8145
8146         tg3_full_unlock(tp);
8147
8148         return err;
8149 }
8150
8151 static u32 tg3_get_rx_csum(struct net_device *dev)
8152 {
8153         struct tg3 *tp = netdev_priv(dev);
8154         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8155 }
8156
8157 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8158 {
8159         struct tg3 *tp = netdev_priv(dev);
8160
8161         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8162                 if (data != 0)
8163                         return -EINVAL;
8164                 return 0;
8165         }
8166
8167         spin_lock_bh(&tp->lock);
8168         if (data)
8169                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8170         else
8171                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8172         spin_unlock_bh(&tp->lock);
8173
8174         return 0;
8175 }
8176
8177 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8178 {
8179         struct tg3 *tp = netdev_priv(dev);
8180
8181         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8182                 if (data != 0)
8183                         return -EINVAL;
8184                 return 0;
8185         }
8186
8187         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8188             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8189                 ethtool_op_set_tx_hw_csum(dev, data);
8190         else
8191                 ethtool_op_set_tx_csum(dev, data);
8192
8193         return 0;
8194 }
8195
8196 static int tg3_get_stats_count (struct net_device *dev)
8197 {
8198         return TG3_NUM_STATS;
8199 }
8200
8201 static int tg3_get_test_count (struct net_device *dev)
8202 {
8203         return TG3_NUM_TEST;
8204 }
8205
8206 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8207 {
8208         switch (stringset) {
8209         case ETH_SS_STATS:
8210                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8211                 break;
8212         case ETH_SS_TEST:
8213                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8214                 break;
8215         default:
8216                 WARN_ON(1);     /* we need a WARN() */
8217                 break;
8218         }
8219 }
8220
8221 static int tg3_phys_id(struct net_device *dev, u32 data)
8222 {
8223         struct tg3 *tp = netdev_priv(dev);
8224         int i;
8225
8226         if (!netif_running(tp->dev))
8227                 return -EAGAIN;
8228
8229         if (data == 0)
8230                 data = 2;
8231
8232         for (i = 0; i < (data * 2); i++) {
8233                 if ((i % 2) == 0)
8234                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8235                                            LED_CTRL_1000MBPS_ON |
8236                                            LED_CTRL_100MBPS_ON |
8237                                            LED_CTRL_10MBPS_ON |
8238                                            LED_CTRL_TRAFFIC_OVERRIDE |
8239                                            LED_CTRL_TRAFFIC_BLINK |
8240                                            LED_CTRL_TRAFFIC_LED);
8241
8242                 else
8243                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8244                                            LED_CTRL_TRAFFIC_OVERRIDE);
8245
8246                 if (msleep_interruptible(500))
8247                         break;
8248         }
8249         tw32(MAC_LED_CTRL, tp->led_ctrl);
8250         return 0;
8251 }
8252
8253 static void tg3_get_ethtool_stats (struct net_device *dev,
8254                                    struct ethtool_stats *estats, u64 *tmp_stats)
8255 {
8256         struct tg3 *tp = netdev_priv(dev);
8257         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8258 }
8259
8260 #define NVRAM_TEST_SIZE 0x100
8261 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8262
8263 static int tg3_test_nvram(struct tg3 *tp)
8264 {
8265         u32 *buf, csum, magic;
8266         int i, j, err = 0, size;
8267
8268         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8269                 return -EIO;
8270
8271         if (magic == TG3_EEPROM_MAGIC)
8272                 size = NVRAM_TEST_SIZE;
8273         else if ((magic & 0xff000000) == 0xa5000000) {
8274                 if ((magic & 0xe00000) == 0x200000)
8275                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8276                 else
8277                         return 0;
8278         } else
8279                 return -EIO;
8280
8281         buf = kmalloc(size, GFP_KERNEL);
8282         if (buf == NULL)
8283                 return -ENOMEM;
8284
8285         err = -EIO;
8286         for (i = 0, j = 0; i < size; i += 4, j++) {
8287                 u32 val;
8288
8289                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8290                         break;
8291                 buf[j] = cpu_to_le32(val);
8292         }
8293         if (i < size)
8294                 goto out;
8295
8296         /* Selfboot format */
8297         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8298                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8299
8300                 for (i = 0; i < size; i++)
8301                         csum8 += buf8[i];
8302
8303                 if (csum8 == 0) {
8304                         err = 0;
8305                         goto out;
8306                 }
8307
8308                 err = -EIO;
8309                 goto out;
8310         }
8311
8312         /* Bootstrap checksum at offset 0x10 */
8313         csum = calc_crc((unsigned char *) buf, 0x10);
8314         if(csum != cpu_to_le32(buf[0x10/4]))
8315                 goto out;
8316
8317         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8318         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8319         if (csum != cpu_to_le32(buf[0xfc/4]))
8320                  goto out;
8321
8322         err = 0;
8323
8324 out:
8325         kfree(buf);
8326         return err;
8327 }
8328
8329 #define TG3_SERDES_TIMEOUT_SEC  2
8330 #define TG3_COPPER_TIMEOUT_SEC  6
8331
8332 static int tg3_test_link(struct tg3 *tp)
8333 {
8334         int i, max;
8335
8336         if (!netif_running(tp->dev))
8337                 return -ENODEV;
8338
8339         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8340                 max = TG3_SERDES_TIMEOUT_SEC;
8341         else
8342                 max = TG3_COPPER_TIMEOUT_SEC;
8343
8344         for (i = 0; i < max; i++) {
8345                 if (netif_carrier_ok(tp->dev))
8346                         return 0;
8347
8348                 if (msleep_interruptible(1000))
8349                         break;
8350         }
8351
8352         return -EIO;
8353 }
8354
8355 /* Only test the commonly used registers */
8356 static int tg3_test_registers(struct tg3 *tp)
8357 {
8358         int i, is_5705;
8359         u32 offset, read_mask, write_mask, val, save_val, read_val;
8360         static struct {
8361                 u16 offset;
8362                 u16 flags;
8363 #define TG3_FL_5705     0x1
8364 #define TG3_FL_NOT_5705 0x2
8365 #define TG3_FL_NOT_5788 0x4
8366                 u32 read_mask;
8367                 u32 write_mask;
8368         } reg_tbl[] = {
8369                 /* MAC Control Registers */
8370                 { MAC_MODE, TG3_FL_NOT_5705,
8371                         0x00000000, 0x00ef6f8c },
8372                 { MAC_MODE, TG3_FL_5705,
8373                         0x00000000, 0x01ef6b8c },
8374                 { MAC_STATUS, TG3_FL_NOT_5705,
8375                         0x03800107, 0x00000000 },
8376                 { MAC_STATUS, TG3_FL_5705,
8377                         0x03800100, 0x00000000 },
8378                 { MAC_ADDR_0_HIGH, 0x0000,
8379                         0x00000000, 0x0000ffff },
8380                 { MAC_ADDR_0_LOW, 0x0000,
8381                         0x00000000, 0xffffffff },
8382                 { MAC_RX_MTU_SIZE, 0x0000,
8383                         0x00000000, 0x0000ffff },
8384                 { MAC_TX_MODE, 0x0000,
8385                         0x00000000, 0x00000070 },
8386                 { MAC_TX_LENGTHS, 0x0000,
8387                         0x00000000, 0x00003fff },
8388                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8389                         0x00000000, 0x000007fc },
8390                 { MAC_RX_MODE, TG3_FL_5705,
8391                         0x00000000, 0x000007dc },
8392                 { MAC_HASH_REG_0, 0x0000,
8393                         0x00000000, 0xffffffff },
8394                 { MAC_HASH_REG_1, 0x0000,
8395                         0x00000000, 0xffffffff },
8396                 { MAC_HASH_REG_2, 0x0000,
8397                         0x00000000, 0xffffffff },
8398                 { MAC_HASH_REG_3, 0x0000,
8399                         0x00000000, 0xffffffff },
8400
8401                 /* Receive Data and Receive BD Initiator Control Registers. */
8402                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8403                         0x00000000, 0xffffffff },
8404                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8405                         0x00000000, 0xffffffff },
8406                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8407                         0x00000000, 0x00000003 },
8408                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8409                         0x00000000, 0xffffffff },
8410                 { RCVDBDI_STD_BD+0, 0x0000,
8411                         0x00000000, 0xffffffff },
8412                 { RCVDBDI_STD_BD+4, 0x0000,
8413                         0x00000000, 0xffffffff },
8414                 { RCVDBDI_STD_BD+8, 0x0000,
8415                         0x00000000, 0xffff0002 },
8416                 { RCVDBDI_STD_BD+0xc, 0x0000,
8417                         0x00000000, 0xffffffff },
8418
8419                 /* Receive BD Initiator Control Registers. */
8420                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8421                         0x00000000, 0xffffffff },
8422                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8423                         0x00000000, 0x000003ff },
8424                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8425                         0x00000000, 0xffffffff },
8426
8427                 /* Host Coalescing Control Registers. */
8428                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8429                         0x00000000, 0x00000004 },
8430                 { HOSTCC_MODE, TG3_FL_5705,
8431                         0x00000000, 0x000000f6 },
8432                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8433                         0x00000000, 0xffffffff },
8434                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8435                         0x00000000, 0x000003ff },
8436                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8437                         0x00000000, 0xffffffff },
8438                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8439                         0x00000000, 0x000003ff },
8440                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8441                         0x00000000, 0xffffffff },
8442                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8443                         0x00000000, 0x000000ff },
8444                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8445                         0x00000000, 0xffffffff },
8446                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8447                         0x00000000, 0x000000ff },
8448                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8449                         0x00000000, 0xffffffff },
8450                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8451                         0x00000000, 0xffffffff },
8452                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8453                         0x00000000, 0xffffffff },
8454                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8455                         0x00000000, 0x000000ff },
8456                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8457                         0x00000000, 0xffffffff },
8458                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8459                         0x00000000, 0x000000ff },
8460                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8461                         0x00000000, 0xffffffff },
8462                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8463                         0x00000000, 0xffffffff },
8464                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8465                         0x00000000, 0xffffffff },
8466                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8467                         0x00000000, 0xffffffff },
8468                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8469                         0x00000000, 0xffffffff },
8470                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8471                         0xffffffff, 0x00000000 },
8472                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8473                         0xffffffff, 0x00000000 },
8474
8475                 /* Buffer Manager Control Registers. */
8476                 { BUFMGR_MB_POOL_ADDR, 0x0000,
8477                         0x00000000, 0x007fff80 },
8478                 { BUFMGR_MB_POOL_SIZE, 0x0000,
8479                         0x00000000, 0x007fffff },
8480                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8481                         0x00000000, 0x0000003f },
8482                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8483                         0x00000000, 0x000001ff },
8484                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8485                         0x00000000, 0x000001ff },
8486                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8487                         0xffffffff, 0x00000000 },
8488                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8489                         0xffffffff, 0x00000000 },
8490
8491                 /* Mailbox Registers */
8492                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8493                         0x00000000, 0x000001ff },
8494                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8495                         0x00000000, 0x000001ff },
8496                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8497                         0x00000000, 0x000007ff },
8498                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8499                         0x00000000, 0x000001ff },
8500
8501                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8502         };
8503
8504         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8505                 is_5705 = 1;
8506         else
8507                 is_5705 = 0;
8508
8509         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8510                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8511                         continue;
8512
8513                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8514                         continue;
8515
8516                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8517                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8518                         continue;
8519
8520                 offset = (u32) reg_tbl[i].offset;
8521                 read_mask = reg_tbl[i].read_mask;
8522                 write_mask = reg_tbl[i].write_mask;
8523
8524                 /* Save the original register content */
8525                 save_val = tr32(offset);
8526
8527                 /* Determine the read-only value. */
8528                 read_val = save_val & read_mask;
8529
8530                 /* Write zero to the register, then make sure the read-only bits
8531                  * are not changed and the read/write bits are all zeros.
8532                  */
8533                 tw32(offset, 0);
8534
8535                 val = tr32(offset);
8536
8537                 /* Test the read-only and read/write bits. */
8538                 if (((val & read_mask) != read_val) || (val & write_mask))
8539                         goto out;
8540
8541                 /* Write ones to all the bits defined by RdMask and WrMask, then
8542                  * make sure the read-only bits are not changed and the
8543                  * read/write bits are all ones.
8544                  */
8545                 tw32(offset, read_mask | write_mask);
8546
8547                 val = tr32(offset);
8548
8549                 /* Test the read-only bits. */
8550                 if ((val & read_mask) != read_val)
8551                         goto out;
8552
8553                 /* Test the read/write bits. */
8554                 if ((val & write_mask) != write_mask)
8555                         goto out;
8556
8557                 tw32(offset, save_val);
8558         }
8559
8560         return 0;
8561
8562 out:
8563         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8564         tw32(offset, save_val);
8565         return -EIO;
8566 }
8567
8568 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8569 {
8570         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8571         int i;
8572         u32 j;
8573
8574         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8575                 for (j = 0; j < len; j += 4) {
8576                         u32 val;
8577
8578                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8579                         tg3_read_mem(tp, offset + j, &val);
8580                         if (val != test_pattern[i])
8581                                 return -EIO;
8582                 }
8583         }
8584         return 0;
8585 }
8586
8587 static int tg3_test_memory(struct tg3 *tp)
8588 {
8589         static struct mem_entry {
8590                 u32 offset;
8591                 u32 len;
8592         } mem_tbl_570x[] = {
8593                 { 0x00000000, 0x00b50},
8594                 { 0x00002000, 0x1c000},
8595                 { 0xffffffff, 0x00000}
8596         }, mem_tbl_5705[] = {
8597                 { 0x00000100, 0x0000c},
8598                 { 0x00000200, 0x00008},
8599                 { 0x00004000, 0x00800},
8600                 { 0x00006000, 0x01000},
8601                 { 0x00008000, 0x02000},
8602                 { 0x00010000, 0x0e000},
8603                 { 0xffffffff, 0x00000}
8604         }, mem_tbl_5755[] = {
8605                 { 0x00000200, 0x00008},
8606                 { 0x00004000, 0x00800},
8607                 { 0x00006000, 0x00800},
8608                 { 0x00008000, 0x02000},
8609                 { 0x00010000, 0x0c000},
8610                 { 0xffffffff, 0x00000}
8611         };
8612         struct mem_entry *mem_tbl;
8613         int err = 0;
8614         int i;
8615
8616         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8617                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8618                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8619                         mem_tbl = mem_tbl_5755;
8620                 else
8621                         mem_tbl = mem_tbl_5705;
8622         } else
8623                 mem_tbl = mem_tbl_570x;
8624
8625         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8626                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8627                     mem_tbl[i].len)) != 0)
8628                         break;
8629         }
8630
8631         return err;
8632 }
8633
8634 #define TG3_MAC_LOOPBACK        0
8635 #define TG3_PHY_LOOPBACK        1
8636
8637 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8638 {
8639         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8640         u32 desc_idx;
8641         struct sk_buff *skb, *rx_skb;
8642         u8 *tx_data;
8643         dma_addr_t map;
8644         int num_pkts, tx_len, rx_len, i, err;
8645         struct tg3_rx_buffer_desc *desc;
8646
8647         if (loopback_mode == TG3_MAC_LOOPBACK) {
8648                 /* HW errata - mac loopback fails in some cases on 5780.
8649                  * Normal traffic and PHY loopback are not affected by
8650                  * errata.
8651                  */
8652                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8653                         return 0;
8654
8655                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8656                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY;
8657                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8658                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8659                 else
8660                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8661                 tw32(MAC_MODE, mac_mode);
8662         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8663                 u32 val;
8664
8665                 val = BMCR_LOOPBACK | BMCR_FULLDPLX;
8666                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8667                         val |= BMCR_SPEED100;
8668                 else
8669                         val |= BMCR_SPEED1000;
8670
8671                 tg3_writephy(tp, MII_BMCR, val);
8672                 udelay(40);
8673                 /* reset to prevent losing 1st rx packet intermittently */
8674                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8675                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8676                         udelay(10);
8677                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8678                 }
8679                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8680                            MAC_MODE_LINK_POLARITY;
8681                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8682                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8683                 else
8684                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8685                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8686                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8687                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8688                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8689                 }
8690                 tw32(MAC_MODE, mac_mode);
8691         }
8692         else
8693                 return -EINVAL;
8694
8695         err = -EIO;
8696
8697         tx_len = 1514;
8698         skb = netdev_alloc_skb(tp->dev, tx_len);
8699         if (!skb)
8700                 return -ENOMEM;
8701
8702         tx_data = skb_put(skb, tx_len);
8703         memcpy(tx_data, tp->dev->dev_addr, 6);
8704         memset(tx_data + 6, 0x0, 8);
8705
8706         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8707
8708         for (i = 14; i < tx_len; i++)
8709                 tx_data[i] = (u8) (i & 0xff);
8710
8711         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8712
8713         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8714              HOSTCC_MODE_NOW);
8715
8716         udelay(10);
8717
8718         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8719
8720         num_pkts = 0;
8721
8722         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8723
8724         tp->tx_prod++;
8725         num_pkts++;
8726
8727         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8728                      tp->tx_prod);
8729         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8730
8731         udelay(10);
8732
8733         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
8734         for (i = 0; i < 25; i++) {
8735                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8736                        HOSTCC_MODE_NOW);
8737
8738                 udelay(10);
8739
8740                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8741                 rx_idx = tp->hw_status->idx[0].rx_producer;
8742                 if ((tx_idx == tp->tx_prod) &&
8743                     (rx_idx == (rx_start_idx + num_pkts)))
8744                         break;
8745         }
8746
8747         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8748         dev_kfree_skb(skb);
8749
8750         if (tx_idx != tp->tx_prod)
8751                 goto out;
8752
8753         if (rx_idx != rx_start_idx + num_pkts)
8754                 goto out;
8755
8756         desc = &tp->rx_rcb[rx_start_idx];
8757         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8758         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8759         if (opaque_key != RXD_OPAQUE_RING_STD)
8760                 goto out;
8761
8762         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8763             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8764                 goto out;
8765
8766         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8767         if (rx_len != tx_len)
8768                 goto out;
8769
8770         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8771
8772         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8773         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8774
8775         for (i = 14; i < tx_len; i++) {
8776                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8777                         goto out;
8778         }
8779         err = 0;
8780
8781         /* tg3_free_rings will unmap and free the rx_skb */
8782 out:
8783         return err;
8784 }
8785
8786 #define TG3_MAC_LOOPBACK_FAILED         1
8787 #define TG3_PHY_LOOPBACK_FAILED         2
8788 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8789                                          TG3_PHY_LOOPBACK_FAILED)
8790
8791 static int tg3_test_loopback(struct tg3 *tp)
8792 {
8793         int err = 0;
8794
8795         if (!netif_running(tp->dev))
8796                 return TG3_LOOPBACK_FAILED;
8797
8798         err = tg3_reset_hw(tp, 1);
8799         if (err)
8800                 return TG3_LOOPBACK_FAILED;
8801
8802         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8803                 err |= TG3_MAC_LOOPBACK_FAILED;
8804         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8805                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8806                         err |= TG3_PHY_LOOPBACK_FAILED;
8807         }
8808
8809         return err;
8810 }
8811
8812 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8813                           u64 *data)
8814 {
8815         struct tg3 *tp = netdev_priv(dev);
8816
8817         if (tp->link_config.phy_is_low_power)
8818                 tg3_set_power_state(tp, PCI_D0);
8819
8820         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8821
8822         if (tg3_test_nvram(tp) != 0) {
8823                 etest->flags |= ETH_TEST_FL_FAILED;
8824                 data[0] = 1;
8825         }
8826         if (tg3_test_link(tp) != 0) {
8827                 etest->flags |= ETH_TEST_FL_FAILED;
8828                 data[1] = 1;
8829         }
8830         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8831                 int err, irq_sync = 0;
8832
8833                 if (netif_running(dev)) {
8834                         tg3_netif_stop(tp);
8835                         irq_sync = 1;
8836                 }
8837
8838                 tg3_full_lock(tp, irq_sync);
8839
8840                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8841                 err = tg3_nvram_lock(tp);
8842                 tg3_halt_cpu(tp, RX_CPU_BASE);
8843                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8844                         tg3_halt_cpu(tp, TX_CPU_BASE);
8845                 if (!err)
8846                         tg3_nvram_unlock(tp);
8847
8848                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8849                         tg3_phy_reset(tp);
8850
8851                 if (tg3_test_registers(tp) != 0) {
8852                         etest->flags |= ETH_TEST_FL_FAILED;
8853                         data[2] = 1;
8854                 }
8855                 if (tg3_test_memory(tp) != 0) {
8856                         etest->flags |= ETH_TEST_FL_FAILED;
8857                         data[3] = 1;
8858                 }
8859                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8860                         etest->flags |= ETH_TEST_FL_FAILED;
8861
8862                 tg3_full_unlock(tp);
8863
8864                 if (tg3_test_interrupt(tp) != 0) {
8865                         etest->flags |= ETH_TEST_FL_FAILED;
8866                         data[5] = 1;
8867                 }
8868
8869                 tg3_full_lock(tp, 0);
8870
8871                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8872                 if (netif_running(dev)) {
8873                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8874                         if (!tg3_restart_hw(tp, 1))
8875                                 tg3_netif_start(tp);
8876                 }
8877
8878                 tg3_full_unlock(tp);
8879         }
8880         if (tp->link_config.phy_is_low_power)
8881                 tg3_set_power_state(tp, PCI_D3hot);
8882
8883 }
8884
8885 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8886 {
8887         struct mii_ioctl_data *data = if_mii(ifr);
8888         struct tg3 *tp = netdev_priv(dev);
8889         int err;
8890
8891         switch(cmd) {
8892         case SIOCGMIIPHY:
8893                 data->phy_id = PHY_ADDR;
8894
8895                 /* fallthru */
8896         case SIOCGMIIREG: {
8897                 u32 mii_regval;
8898
8899                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8900                         break;                  /* We have no PHY */
8901
8902                 if (tp->link_config.phy_is_low_power)
8903                         return -EAGAIN;
8904
8905                 spin_lock_bh(&tp->lock);
8906                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8907                 spin_unlock_bh(&tp->lock);
8908
8909                 data->val_out = mii_regval;
8910
8911                 return err;
8912         }
8913
8914         case SIOCSMIIREG:
8915                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8916                         break;                  /* We have no PHY */
8917
8918                 if (!capable(CAP_NET_ADMIN))
8919                         return -EPERM;
8920
8921                 if (tp->link_config.phy_is_low_power)
8922                         return -EAGAIN;
8923
8924                 spin_lock_bh(&tp->lock);
8925                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8926                 spin_unlock_bh(&tp->lock);
8927
8928                 return err;
8929
8930         default:
8931                 /* do nothing */
8932                 break;
8933         }
8934         return -EOPNOTSUPP;
8935 }
8936
8937 #if TG3_VLAN_TAG_USED
8938 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8939 {
8940         struct tg3 *tp = netdev_priv(dev);
8941
8942         if (netif_running(dev))
8943                 tg3_netif_stop(tp);
8944
8945         tg3_full_lock(tp, 0);
8946
8947         tp->vlgrp = grp;
8948
8949         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8950         __tg3_set_rx_mode(dev);
8951
8952         tg3_full_unlock(tp);
8953
8954         if (netif_running(dev))
8955                 tg3_netif_start(tp);
8956 }
8957
8958 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8959 {
8960         struct tg3 *tp = netdev_priv(dev);
8961
8962         if (netif_running(dev))
8963                 tg3_netif_stop(tp);
8964
8965         tg3_full_lock(tp, 0);
8966         if (tp->vlgrp)
8967                 tp->vlgrp->vlan_devices[vid] = NULL;
8968         tg3_full_unlock(tp);
8969
8970         if (netif_running(dev))
8971                 tg3_netif_start(tp);
8972 }
8973 #endif
8974
8975 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8976 {
8977         struct tg3 *tp = netdev_priv(dev);
8978
8979         memcpy(ec, &tp->coal, sizeof(*ec));
8980         return 0;
8981 }
8982
8983 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8984 {
8985         struct tg3 *tp = netdev_priv(dev);
8986         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8987         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8988
8989         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8990                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8991                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8992                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8993                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8994         }
8995
8996         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8997             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8998             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8999             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9000             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9001             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9002             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9003             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9004             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9005             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9006                 return -EINVAL;
9007
9008         /* No rx interrupts will be generated if both are zero */
9009         if ((ec->rx_coalesce_usecs == 0) &&
9010             (ec->rx_max_coalesced_frames == 0))
9011                 return -EINVAL;
9012
9013         /* No tx interrupts will be generated if both are zero */
9014         if ((ec->tx_coalesce_usecs == 0) &&
9015             (ec->tx_max_coalesced_frames == 0))
9016                 return -EINVAL;
9017
9018         /* Only copy relevant parameters, ignore all others. */
9019         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9020         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9021         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9022         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9023         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9024         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9025         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9026         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9027         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9028
9029         if (netif_running(dev)) {
9030                 tg3_full_lock(tp, 0);
9031                 __tg3_set_coalesce(tp, &tp->coal);
9032                 tg3_full_unlock(tp);
9033         }
9034         return 0;
9035 }
9036
9037 static const struct ethtool_ops tg3_ethtool_ops = {
9038         .get_settings           = tg3_get_settings,
9039         .set_settings           = tg3_set_settings,
9040         .get_drvinfo            = tg3_get_drvinfo,
9041         .get_regs_len           = tg3_get_regs_len,
9042         .get_regs               = tg3_get_regs,
9043         .get_wol                = tg3_get_wol,
9044         .set_wol                = tg3_set_wol,
9045         .get_msglevel           = tg3_get_msglevel,
9046         .set_msglevel           = tg3_set_msglevel,
9047         .nway_reset             = tg3_nway_reset,
9048         .get_link               = ethtool_op_get_link,
9049         .get_eeprom_len         = tg3_get_eeprom_len,
9050         .get_eeprom             = tg3_get_eeprom,
9051         .set_eeprom             = tg3_set_eeprom,
9052         .get_ringparam          = tg3_get_ringparam,
9053         .set_ringparam          = tg3_set_ringparam,
9054         .get_pauseparam         = tg3_get_pauseparam,
9055         .set_pauseparam         = tg3_set_pauseparam,
9056         .get_rx_csum            = tg3_get_rx_csum,
9057         .set_rx_csum            = tg3_set_rx_csum,
9058         .get_tx_csum            = ethtool_op_get_tx_csum,
9059         .set_tx_csum            = tg3_set_tx_csum,
9060         .get_sg                 = ethtool_op_get_sg,
9061         .set_sg                 = ethtool_op_set_sg,
9062 #if TG3_TSO_SUPPORT != 0
9063         .get_tso                = ethtool_op_get_tso,
9064         .set_tso                = tg3_set_tso,
9065 #endif
9066         .self_test_count        = tg3_get_test_count,
9067         .self_test              = tg3_self_test,
9068         .get_strings            = tg3_get_strings,
9069         .phys_id                = tg3_phys_id,
9070         .get_stats_count        = tg3_get_stats_count,
9071         .get_ethtool_stats      = tg3_get_ethtool_stats,
9072         .get_coalesce           = tg3_get_coalesce,
9073         .set_coalesce           = tg3_set_coalesce,
9074         .get_perm_addr          = ethtool_op_get_perm_addr,
9075 };
9076
9077 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9078 {
9079         u32 cursize, val, magic;
9080
9081         tp->nvram_size = EEPROM_CHIP_SIZE;
9082
9083         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9084                 return;
9085
9086         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
9087                 return;
9088
9089         /*
9090          * Size the chip by reading offsets at increasing powers of two.
9091          * When we encounter our validation signature, we know the addressing
9092          * has wrapped around, and thus have our chip size.
9093          */
9094         cursize = 0x10;
9095
9096         while (cursize < tp->nvram_size) {
9097                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9098                         return;
9099
9100                 if (val == magic)
9101                         break;
9102
9103                 cursize <<= 1;
9104         }
9105
9106         tp->nvram_size = cursize;
9107 }
9108
9109 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9110 {
9111         u32 val;
9112
9113         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9114                 return;
9115
9116         /* Selfboot format */
9117         if (val != TG3_EEPROM_MAGIC) {
9118                 tg3_get_eeprom_size(tp);
9119                 return;
9120         }
9121
9122         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9123                 if (val != 0) {
9124                         tp->nvram_size = (val >> 16) * 1024;
9125                         return;
9126                 }
9127         }
9128         tp->nvram_size = 0x20000;
9129 }
9130
9131 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9132 {
9133         u32 nvcfg1;
9134
9135         nvcfg1 = tr32(NVRAM_CFG1);
9136         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9137                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9138         }
9139         else {
9140                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9141                 tw32(NVRAM_CFG1, nvcfg1);
9142         }
9143
9144         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9145             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9146                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9147                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9148                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9149                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9150                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9151                                 break;
9152                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9153                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9154                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9155                                 break;
9156                         case FLASH_VENDOR_ATMEL_EEPROM:
9157                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9158                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9159                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9160                                 break;
9161                         case FLASH_VENDOR_ST:
9162                                 tp->nvram_jedecnum = JEDEC_ST;
9163                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9164                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9165                                 break;
9166                         case FLASH_VENDOR_SAIFUN:
9167                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9168                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9169                                 break;
9170                         case FLASH_VENDOR_SST_SMALL:
9171                         case FLASH_VENDOR_SST_LARGE:
9172                                 tp->nvram_jedecnum = JEDEC_SST;
9173                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9174                                 break;
9175                 }
9176         }
9177         else {
9178                 tp->nvram_jedecnum = JEDEC_ATMEL;
9179                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9180                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9181         }
9182 }
9183
9184 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9185 {
9186         u32 nvcfg1;
9187
9188         nvcfg1 = tr32(NVRAM_CFG1);
9189
9190         /* NVRAM protection for TPM */
9191         if (nvcfg1 & (1 << 27))
9192                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9193
9194         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9195                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9196                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9197                         tp->nvram_jedecnum = JEDEC_ATMEL;
9198                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9199                         break;
9200                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9201                         tp->nvram_jedecnum = JEDEC_ATMEL;
9202                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9203                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9204                         break;
9205                 case FLASH_5752VENDOR_ST_M45PE10:
9206                 case FLASH_5752VENDOR_ST_M45PE20:
9207                 case FLASH_5752VENDOR_ST_M45PE40:
9208                         tp->nvram_jedecnum = JEDEC_ST;
9209                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9210                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9211                         break;
9212         }
9213
9214         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9215                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9216                         case FLASH_5752PAGE_SIZE_256:
9217                                 tp->nvram_pagesize = 256;
9218                                 break;
9219                         case FLASH_5752PAGE_SIZE_512:
9220                                 tp->nvram_pagesize = 512;
9221                                 break;
9222                         case FLASH_5752PAGE_SIZE_1K:
9223                                 tp->nvram_pagesize = 1024;
9224                                 break;
9225                         case FLASH_5752PAGE_SIZE_2K:
9226                                 tp->nvram_pagesize = 2048;
9227                                 break;
9228                         case FLASH_5752PAGE_SIZE_4K:
9229                                 tp->nvram_pagesize = 4096;
9230                                 break;
9231                         case FLASH_5752PAGE_SIZE_264:
9232                                 tp->nvram_pagesize = 264;
9233                                 break;
9234                 }
9235         }
9236         else {
9237                 /* For eeprom, set pagesize to maximum eeprom size */
9238                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9239
9240                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9241                 tw32(NVRAM_CFG1, nvcfg1);
9242         }
9243 }
9244
9245 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9246 {
9247         u32 nvcfg1;
9248
9249         nvcfg1 = tr32(NVRAM_CFG1);
9250
9251         /* NVRAM protection for TPM */
9252         if (nvcfg1 & (1 << 27))
9253                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9254
9255         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9256                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9257                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9258                         tp->nvram_jedecnum = JEDEC_ATMEL;
9259                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9260                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9261
9262                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9263                         tw32(NVRAM_CFG1, nvcfg1);
9264                         break;
9265                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9266                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9267                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9268                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9269                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9270                         tp->nvram_jedecnum = JEDEC_ATMEL;
9271                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9272                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9273                         tp->nvram_pagesize = 264;
9274                         break;
9275                 case FLASH_5752VENDOR_ST_M45PE10:
9276                 case FLASH_5752VENDOR_ST_M45PE20:
9277                 case FLASH_5752VENDOR_ST_M45PE40:
9278                         tp->nvram_jedecnum = JEDEC_ST;
9279                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9280                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9281                         tp->nvram_pagesize = 256;
9282                         break;
9283         }
9284 }
9285
9286 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9287 {
9288         u32 nvcfg1;
9289
9290         nvcfg1 = tr32(NVRAM_CFG1);
9291
9292         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9293                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9294                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9295                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9296                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9297                         tp->nvram_jedecnum = JEDEC_ATMEL;
9298                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9299                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9300
9301                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9302                         tw32(NVRAM_CFG1, nvcfg1);
9303                         break;
9304                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9305                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9306                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9307                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9308                         tp->nvram_jedecnum = JEDEC_ATMEL;
9309                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9310                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9311                         tp->nvram_pagesize = 264;
9312                         break;
9313                 case FLASH_5752VENDOR_ST_M45PE10:
9314                 case FLASH_5752VENDOR_ST_M45PE20:
9315                 case FLASH_5752VENDOR_ST_M45PE40:
9316                         tp->nvram_jedecnum = JEDEC_ST;
9317                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9318                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9319                         tp->nvram_pagesize = 256;
9320                         break;
9321         }
9322 }
9323
9324 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9325 {
9326         tp->nvram_jedecnum = JEDEC_ATMEL;
9327         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9328         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9329 }
9330
9331 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9332 static void __devinit tg3_nvram_init(struct tg3 *tp)
9333 {
9334         int j;
9335
9336         tw32_f(GRC_EEPROM_ADDR,
9337              (EEPROM_ADDR_FSM_RESET |
9338               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9339                EEPROM_ADDR_CLKPERD_SHIFT)));
9340
9341         /* XXX schedule_timeout() ... */
9342         for (j = 0; j < 100; j++)
9343                 udelay(10);
9344
9345         /* Enable seeprom accesses. */
9346         tw32_f(GRC_LOCAL_CTRL,
9347              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9348         udelay(100);
9349
9350         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9351             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9352                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9353
9354                 if (tg3_nvram_lock(tp)) {
9355                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9356                                "tg3_nvram_init failed.\n", tp->dev->name);
9357                         return;
9358                 }
9359                 tg3_enable_nvram_access(tp);
9360
9361                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9362                         tg3_get_5752_nvram_info(tp);
9363                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9364                         tg3_get_5755_nvram_info(tp);
9365                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9366                         tg3_get_5787_nvram_info(tp);
9367                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9368                         tg3_get_5906_nvram_info(tp);
9369                 else
9370                         tg3_get_nvram_info(tp);
9371
9372                 tg3_get_nvram_size(tp);
9373
9374                 tg3_disable_nvram_access(tp);
9375                 tg3_nvram_unlock(tp);
9376
9377         } else {
9378                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9379
9380                 tg3_get_eeprom_size(tp);
9381         }
9382 }
9383
9384 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9385                                         u32 offset, u32 *val)
9386 {
9387         u32 tmp;
9388         int i;
9389
9390         if (offset > EEPROM_ADDR_ADDR_MASK ||
9391             (offset % 4) != 0)
9392                 return -EINVAL;
9393
9394         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9395                                         EEPROM_ADDR_DEVID_MASK |
9396                                         EEPROM_ADDR_READ);
9397         tw32(GRC_EEPROM_ADDR,
9398              tmp |
9399              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9400              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9401               EEPROM_ADDR_ADDR_MASK) |
9402              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9403
9404         for (i = 0; i < 10000; i++) {
9405                 tmp = tr32(GRC_EEPROM_ADDR);
9406
9407                 if (tmp & EEPROM_ADDR_COMPLETE)
9408                         break;
9409                 udelay(100);
9410         }
9411         if (!(tmp & EEPROM_ADDR_COMPLETE))
9412                 return -EBUSY;
9413
9414         *val = tr32(GRC_EEPROM_DATA);
9415         return 0;
9416 }
9417
9418 #define NVRAM_CMD_TIMEOUT 10000
9419
9420 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9421 {
9422         int i;
9423
9424         tw32(NVRAM_CMD, nvram_cmd);
9425         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9426                 udelay(10);
9427                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9428                         udelay(10);
9429                         break;
9430                 }
9431         }
9432         if (i == NVRAM_CMD_TIMEOUT) {
9433                 return -EBUSY;
9434         }
9435         return 0;
9436 }
9437
9438 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9439 {
9440         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9441             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9442             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9443             (tp->nvram_jedecnum == JEDEC_ATMEL))
9444
9445                 addr = ((addr / tp->nvram_pagesize) <<
9446                         ATMEL_AT45DB0X1B_PAGE_POS) +
9447                        (addr % tp->nvram_pagesize);
9448
9449         return addr;
9450 }
9451
9452 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9453 {
9454         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9455             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9456             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9457             (tp->nvram_jedecnum == JEDEC_ATMEL))
9458
9459                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9460                         tp->nvram_pagesize) +
9461                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9462
9463         return addr;
9464 }
9465
9466 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9467 {
9468         int ret;
9469
9470         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9471                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9472
9473         offset = tg3_nvram_phys_addr(tp, offset);
9474
9475         if (offset > NVRAM_ADDR_MSK)
9476                 return -EINVAL;
9477
9478         ret = tg3_nvram_lock(tp);
9479         if (ret)
9480                 return ret;
9481
9482         tg3_enable_nvram_access(tp);
9483
9484         tw32(NVRAM_ADDR, offset);
9485         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9486                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9487
9488         if (ret == 0)
9489                 *val = swab32(tr32(NVRAM_RDDATA));
9490
9491         tg3_disable_nvram_access(tp);
9492
9493         tg3_nvram_unlock(tp);
9494
9495         return ret;
9496 }
9497
9498 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9499 {
9500         int err;
9501         u32 tmp;
9502
9503         err = tg3_nvram_read(tp, offset, &tmp);
9504         *val = swab32(tmp);
9505         return err;
9506 }
9507
9508 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9509                                     u32 offset, u32 len, u8 *buf)
9510 {
9511         int i, j, rc = 0;
9512         u32 val;
9513
9514         for (i = 0; i < len; i += 4) {
9515                 u32 addr, data;
9516
9517                 addr = offset + i;
9518
9519                 memcpy(&data, buf + i, 4);
9520
9521                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9522
9523                 val = tr32(GRC_EEPROM_ADDR);
9524                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9525
9526                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9527                         EEPROM_ADDR_READ);
9528                 tw32(GRC_EEPROM_ADDR, val |
9529                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9530                         (addr & EEPROM_ADDR_ADDR_MASK) |
9531                         EEPROM_ADDR_START |
9532                         EEPROM_ADDR_WRITE);
9533
9534                 for (j = 0; j < 10000; j++) {
9535                         val = tr32(GRC_EEPROM_ADDR);
9536
9537                         if (val & EEPROM_ADDR_COMPLETE)
9538                                 break;
9539                         udelay(100);
9540                 }
9541                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9542                         rc = -EBUSY;
9543                         break;
9544                 }
9545         }
9546
9547         return rc;
9548 }
9549
9550 /* offset and length are dword aligned */
9551 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9552                 u8 *buf)
9553 {
9554         int ret = 0;
9555         u32 pagesize = tp->nvram_pagesize;
9556         u32 pagemask = pagesize - 1;
9557         u32 nvram_cmd;
9558         u8 *tmp;
9559
9560         tmp = kmalloc(pagesize, GFP_KERNEL);
9561         if (tmp == NULL)
9562                 return -ENOMEM;
9563
9564         while (len) {
9565                 int j;
9566                 u32 phy_addr, page_off, size;
9567
9568                 phy_addr = offset & ~pagemask;
9569
9570                 for (j = 0; j < pagesize; j += 4) {
9571                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9572                                                 (u32 *) (tmp + j))))
9573                                 break;
9574                 }
9575                 if (ret)
9576                         break;
9577
9578                 page_off = offset & pagemask;
9579                 size = pagesize;
9580                 if (len < size)
9581                         size = len;
9582
9583                 len -= size;
9584
9585                 memcpy(tmp + page_off, buf, size);
9586
9587                 offset = offset + (pagesize - page_off);
9588
9589                 tg3_enable_nvram_access(tp);
9590
9591                 /*
9592                  * Before we can erase the flash page, we need
9593                  * to issue a special "write enable" command.
9594                  */
9595                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9596
9597                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9598                         break;
9599
9600                 /* Erase the target page */
9601                 tw32(NVRAM_ADDR, phy_addr);
9602
9603                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9604                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9605
9606                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9607                         break;
9608
9609                 /* Issue another write enable to start the write. */
9610                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9611
9612                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9613                         break;
9614
9615                 for (j = 0; j < pagesize; j += 4) {
9616                         u32 data;
9617
9618                         data = *((u32 *) (tmp + j));
9619                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9620
9621                         tw32(NVRAM_ADDR, phy_addr + j);
9622
9623                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9624                                 NVRAM_CMD_WR;
9625
9626                         if (j == 0)
9627                                 nvram_cmd |= NVRAM_CMD_FIRST;
9628                         else if (j == (pagesize - 4))
9629                                 nvram_cmd |= NVRAM_CMD_LAST;
9630
9631                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9632                                 break;
9633                 }
9634                 if (ret)
9635                         break;
9636         }
9637
9638         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9639         tg3_nvram_exec_cmd(tp, nvram_cmd);
9640
9641         kfree(tmp);
9642
9643         return ret;
9644 }
9645
9646 /* offset and length are dword aligned */
9647 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9648                 u8 *buf)
9649 {
9650         int i, ret = 0;
9651
9652         for (i = 0; i < len; i += 4, offset += 4) {
9653                 u32 data, page_off, phy_addr, nvram_cmd;
9654
9655                 memcpy(&data, buf + i, 4);
9656                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9657
9658                 page_off = offset % tp->nvram_pagesize;
9659
9660                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9661
9662                 tw32(NVRAM_ADDR, phy_addr);
9663
9664                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9665
9666                 if ((page_off == 0) || (i == 0))
9667                         nvram_cmd |= NVRAM_CMD_FIRST;
9668                 if (page_off == (tp->nvram_pagesize - 4))
9669                         nvram_cmd |= NVRAM_CMD_LAST;
9670
9671                 if (i == (len - 4))
9672                         nvram_cmd |= NVRAM_CMD_LAST;
9673
9674                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9675                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9676                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9677                     (tp->nvram_jedecnum == JEDEC_ST) &&
9678                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9679
9680                         if ((ret = tg3_nvram_exec_cmd(tp,
9681                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9682                                 NVRAM_CMD_DONE)))
9683
9684                                 break;
9685                 }
9686                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9687                         /* We always do complete word writes to eeprom. */
9688                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9689                 }
9690
9691                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9692                         break;
9693         }
9694         return ret;
9695 }
9696
9697 /* offset and length are dword aligned */
9698 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9699 {
9700         int ret;
9701
9702         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9703                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9704                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9705                 udelay(40);
9706         }
9707
9708         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9709                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9710         }
9711         else {
9712                 u32 grc_mode;
9713
9714                 ret = tg3_nvram_lock(tp);
9715                 if (ret)
9716                         return ret;
9717
9718                 tg3_enable_nvram_access(tp);
9719                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9720                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9721                         tw32(NVRAM_WRITE1, 0x406);
9722
9723                 grc_mode = tr32(GRC_MODE);
9724                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9725
9726                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9727                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9728
9729                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9730                                 buf);
9731                 }
9732                 else {
9733                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9734                                 buf);
9735                 }
9736
9737                 grc_mode = tr32(GRC_MODE);
9738                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9739
9740                 tg3_disable_nvram_access(tp);
9741                 tg3_nvram_unlock(tp);
9742         }
9743
9744         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9745                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9746                 udelay(40);
9747         }
9748
9749         return ret;
9750 }
9751
9752 struct subsys_tbl_ent {
9753         u16 subsys_vendor, subsys_devid;
9754         u32 phy_id;
9755 };
9756
9757 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9758         /* Broadcom boards. */
9759         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9760         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9761         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9762         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9763         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9764         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9765         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9766         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9767         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9768         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9769         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9770
9771         /* 3com boards. */
9772         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9773         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9774         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9775         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9776         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9777
9778         /* DELL boards. */
9779         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9780         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9781         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9782         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9783
9784         /* Compaq boards. */
9785         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9786         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9787         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9788         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9789         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9790
9791         /* IBM boards. */
9792         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9793 };
9794
9795 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9796 {
9797         int i;
9798
9799         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9800                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9801                      tp->pdev->subsystem_vendor) &&
9802                     (subsys_id_to_phy_id[i].subsys_devid ==
9803                      tp->pdev->subsystem_device))
9804                         return &subsys_id_to_phy_id[i];
9805         }
9806         return NULL;
9807 }
9808
9809 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9810 {
9811         u32 val;
9812         u16 pmcsr;
9813
9814         /* On some early chips the SRAM cannot be accessed in D3hot state,
9815          * so need make sure we're in D0.
9816          */
9817         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9818         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9819         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9820         msleep(1);
9821
9822         /* Make sure register accesses (indirect or otherwise)
9823          * will function correctly.
9824          */
9825         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9826                                tp->misc_host_ctrl);
9827
9828         /* The memory arbiter has to be enabled in order for SRAM accesses
9829          * to succeed.  Normally on powerup the tg3 chip firmware will make
9830          * sure it is enabled, but other entities such as system netboot
9831          * code might disable it.
9832          */
9833         val = tr32(MEMARB_MODE);
9834         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9835
9836         tp->phy_id = PHY_ID_INVALID;
9837         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9838
9839         /* Assume an onboard device by default.  */
9840         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9841
9842         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9843                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM))
9844                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9845                 return;
9846         }
9847
9848         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9849         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9850                 u32 nic_cfg, led_cfg;
9851                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9852                 int eeprom_phy_serdes = 0;
9853
9854                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9855                 tp->nic_sram_data_cfg = nic_cfg;
9856
9857                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9858                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9859                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9860                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9861                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9862                     (ver > 0) && (ver < 0x100))
9863                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9864
9865                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9866                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9867                         eeprom_phy_serdes = 1;
9868
9869                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9870                 if (nic_phy_id != 0) {
9871                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9872                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9873
9874                         eeprom_phy_id  = (id1 >> 16) << 10;
9875                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9876                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9877                 } else
9878                         eeprom_phy_id = 0;
9879
9880                 tp->phy_id = eeprom_phy_id;
9881                 if (eeprom_phy_serdes) {
9882                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9883                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9884                         else
9885                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9886                 }
9887
9888                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9889                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9890                                     SHASTA_EXT_LED_MODE_MASK);
9891                 else
9892                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9893
9894                 switch (led_cfg) {
9895                 default:
9896                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9897                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9898                         break;
9899
9900                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9901                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9902                         break;
9903
9904                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9905                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9906
9907                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9908                          * read on some older 5700/5701 bootcode.
9909                          */
9910                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9911                             ASIC_REV_5700 ||
9912                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9913                             ASIC_REV_5701)
9914                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9915
9916                         break;
9917
9918                 case SHASTA_EXT_LED_SHARED:
9919                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9920                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9921                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9922                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9923                                                  LED_CTRL_MODE_PHY_2);
9924                         break;
9925
9926                 case SHASTA_EXT_LED_MAC:
9927                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9928                         break;
9929
9930                 case SHASTA_EXT_LED_COMBO:
9931                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9932                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9933                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9934                                                  LED_CTRL_MODE_PHY_2);
9935                         break;
9936
9937                 };
9938
9939                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9940                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9941                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9942                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9943
9944                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
9945                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9946                 else
9947                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9948
9949                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9950                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9951                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9952                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9953                 }
9954                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9955                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9956
9957                 if (cfg2 & (1 << 17))
9958                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9959
9960                 /* serdes signal pre-emphasis in register 0x590 set by */
9961                 /* bootcode if bit 18 is set */
9962                 if (cfg2 & (1 << 18))
9963                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9964         }
9965 }
9966
9967 static int __devinit tg3_phy_probe(struct tg3 *tp)
9968 {
9969         u32 hw_phy_id_1, hw_phy_id_2;
9970         u32 hw_phy_id, hw_phy_id_masked;
9971         int err;
9972
9973         /* Reading the PHY ID register can conflict with ASF
9974          * firwmare access to the PHY hardware.
9975          */
9976         err = 0;
9977         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9978                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9979         } else {
9980                 /* Now read the physical PHY_ID from the chip and verify
9981                  * that it is sane.  If it doesn't look good, we fall back
9982                  * to either the hard-coded table based PHY_ID and failing
9983                  * that the value found in the eeprom area.
9984                  */
9985                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9986                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9987
9988                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9989                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9990                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9991
9992                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9993         }
9994
9995         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9996                 tp->phy_id = hw_phy_id;
9997                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9998                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9999                 else
10000                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10001         } else {
10002                 if (tp->phy_id != PHY_ID_INVALID) {
10003                         /* Do nothing, phy ID already set up in
10004                          * tg3_get_eeprom_hw_cfg().
10005                          */
10006                 } else {
10007                         struct subsys_tbl_ent *p;
10008
10009                         /* No eeprom signature?  Try the hardcoded
10010                          * subsys device table.
10011                          */
10012                         p = lookup_by_subsys(tp);
10013                         if (!p)
10014                                 return -ENODEV;
10015
10016                         tp->phy_id = p->phy_id;
10017                         if (!tp->phy_id ||
10018                             tp->phy_id == PHY_ID_BCM8002)
10019                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10020                 }
10021         }
10022
10023         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10024             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10025                 u32 bmsr, adv_reg, tg3_ctrl;
10026
10027                 tg3_readphy(tp, MII_BMSR, &bmsr);
10028                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10029                     (bmsr & BMSR_LSTATUS))
10030                         goto skip_phy_reset;
10031
10032                 err = tg3_phy_reset(tp);
10033                 if (err)
10034                         return err;
10035
10036                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10037                            ADVERTISE_100HALF | ADVERTISE_100FULL |
10038                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10039                 tg3_ctrl = 0;
10040                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10041                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10042                                     MII_TG3_CTRL_ADV_1000_FULL);
10043                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10044                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10045                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10046                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
10047                 }
10048
10049                 if (!tg3_copper_is_advertising_all(tp)) {
10050                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10051
10052                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10053                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10054
10055                         tg3_writephy(tp, MII_BMCR,
10056                                      BMCR_ANENABLE | BMCR_ANRESTART);
10057                 }
10058                 tg3_phy_set_wirespeed(tp);
10059
10060                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10061                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10062                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10063         }
10064
10065 skip_phy_reset:
10066         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10067                 err = tg3_init_5401phy_dsp(tp);
10068                 if (err)
10069                         return err;
10070         }
10071
10072         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10073                 err = tg3_init_5401phy_dsp(tp);
10074         }
10075
10076         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10077                 tp->link_config.advertising =
10078                         (ADVERTISED_1000baseT_Half |
10079                          ADVERTISED_1000baseT_Full |
10080                          ADVERTISED_Autoneg |
10081                          ADVERTISED_FIBRE);
10082         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10083                 tp->link_config.advertising &=
10084                         ~(ADVERTISED_1000baseT_Half |
10085                           ADVERTISED_1000baseT_Full);
10086
10087         return err;
10088 }
10089
10090 static void __devinit tg3_read_partno(struct tg3 *tp)
10091 {
10092         unsigned char vpd_data[256];
10093         int i;
10094         u32 magic;
10095
10096         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10097                 goto out_not_found;
10098
10099         if (magic == TG3_EEPROM_MAGIC) {
10100                 for (i = 0; i < 256; i += 4) {
10101                         u32 tmp;
10102
10103                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10104                                 goto out_not_found;
10105
10106                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10107                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10108                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10109                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10110                 }
10111         } else {
10112                 int vpd_cap;
10113
10114                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10115                 for (i = 0; i < 256; i += 4) {
10116                         u32 tmp, j = 0;
10117                         u16 tmp16;
10118
10119                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10120                                               i);
10121                         while (j++ < 100) {
10122                                 pci_read_config_word(tp->pdev, vpd_cap +
10123                                                      PCI_VPD_ADDR, &tmp16);
10124                                 if (tmp16 & 0x8000)
10125                                         break;
10126                                 msleep(1);
10127                         }
10128                         if (!(tmp16 & 0x8000))
10129                                 goto out_not_found;
10130
10131                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10132                                               &tmp);
10133                         tmp = cpu_to_le32(tmp);
10134                         memcpy(&vpd_data[i], &tmp, 4);
10135                 }
10136         }
10137
10138         /* Now parse and find the part number. */
10139         for (i = 0; i < 256; ) {
10140                 unsigned char val = vpd_data[i];
10141                 int block_end;
10142
10143                 if (val == 0x82 || val == 0x91) {
10144                         i = (i + 3 +
10145                              (vpd_data[i + 1] +
10146                               (vpd_data[i + 2] << 8)));
10147                         continue;
10148                 }
10149
10150                 if (val != 0x90)
10151                         goto out_not_found;
10152
10153                 block_end = (i + 3 +
10154                              (vpd_data[i + 1] +
10155                               (vpd_data[i + 2] << 8)));
10156                 i += 3;
10157                 while (i < block_end) {
10158                         if (vpd_data[i + 0] == 'P' &&
10159                             vpd_data[i + 1] == 'N') {
10160                                 int partno_len = vpd_data[i + 2];
10161
10162                                 if (partno_len > 24)
10163                                         goto out_not_found;
10164
10165                                 memcpy(tp->board_part_number,
10166                                        &vpd_data[i + 3],
10167                                        partno_len);
10168
10169                                 /* Success. */
10170                                 return;
10171                         }
10172                 }
10173
10174                 /* Part number not found. */
10175                 goto out_not_found;
10176         }
10177
10178 out_not_found:
10179         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10180                 strcpy(tp->board_part_number, "BCM95906");
10181         else
10182                 strcpy(tp->board_part_number, "none");
10183 }
10184
10185 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10186 {
10187         u32 val, offset, start;
10188
10189         if (tg3_nvram_read_swab(tp, 0, &val))
10190                 return;
10191
10192         if (val != TG3_EEPROM_MAGIC)
10193                 return;
10194
10195         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10196             tg3_nvram_read_swab(tp, 0x4, &start))
10197                 return;
10198
10199         offset = tg3_nvram_logical_addr(tp, offset);
10200         if (tg3_nvram_read_swab(tp, offset, &val))
10201                 return;
10202
10203         if ((val & 0xfc000000) == 0x0c000000) {
10204                 u32 ver_offset, addr;
10205                 int i;
10206
10207                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10208                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10209                         return;
10210
10211                 if (val != 0)
10212                         return;
10213
10214                 addr = offset + ver_offset - start;
10215                 for (i = 0; i < 16; i += 4) {
10216                         if (tg3_nvram_read(tp, addr + i, &val))
10217                                 return;
10218
10219                         val = cpu_to_le32(val);
10220                         memcpy(tp->fw_ver + i, &val, 4);
10221                 }
10222         }
10223 }
10224
10225 static int __devinit tg3_get_invariants(struct tg3 *tp)
10226 {
10227         static struct pci_device_id write_reorder_chipsets[] = {
10228                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10229                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10230                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10231                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10232                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10233                              PCI_DEVICE_ID_VIA_8385_0) },
10234                 { },
10235         };
10236         u32 misc_ctrl_reg;
10237         u32 cacheline_sz_reg;
10238         u32 pci_state_reg, grc_misc_cfg;
10239         u32 val;
10240         u16 pci_cmd;
10241         int err;
10242
10243         /* Force memory write invalidate off.  If we leave it on,
10244          * then on 5700_BX chips we have to enable a workaround.
10245          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10246          * to match the cacheline size.  The Broadcom driver have this
10247          * workaround but turns MWI off all the times so never uses
10248          * it.  This seems to suggest that the workaround is insufficient.
10249          */
10250         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10251         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10252         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10253
10254         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10255          * has the register indirect write enable bit set before
10256          * we try to access any of the MMIO registers.  It is also
10257          * critical that the PCI-X hw workaround situation is decided
10258          * before that as well.
10259          */
10260         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10261                               &misc_ctrl_reg);
10262
10263         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10264                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10265
10266         /* Wrong chip ID in 5752 A0. This code can be removed later
10267          * as A0 is not in production.
10268          */
10269         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10270                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10271
10272         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10273          * we need to disable memory and use config. cycles
10274          * only to access all registers. The 5702/03 chips
10275          * can mistakenly decode the special cycles from the
10276          * ICH chipsets as memory write cycles, causing corruption
10277          * of register and memory space. Only certain ICH bridges
10278          * will drive special cycles with non-zero data during the
10279          * address phase which can fall within the 5703's address
10280          * range. This is not an ICH bug as the PCI spec allows
10281          * non-zero address during special cycles. However, only
10282          * these ICH bridges are known to drive non-zero addresses
10283          * during special cycles.
10284          *
10285          * Since special cycles do not cross PCI bridges, we only
10286          * enable this workaround if the 5703 is on the secondary
10287          * bus of these ICH bridges.
10288          */
10289         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10290             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10291                 static struct tg3_dev_id {
10292                         u32     vendor;
10293                         u32     device;
10294                         u32     rev;
10295                 } ich_chipsets[] = {
10296                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10297                           PCI_ANY_ID },
10298                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10299                           PCI_ANY_ID },
10300                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10301                           0xa },
10302                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10303                           PCI_ANY_ID },
10304                         { },
10305                 };
10306                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10307                 struct pci_dev *bridge = NULL;
10308
10309                 while (pci_id->vendor != 0) {
10310                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10311                                                 bridge);
10312                         if (!bridge) {
10313                                 pci_id++;
10314                                 continue;
10315                         }
10316                         if (pci_id->rev != PCI_ANY_ID) {
10317                                 u8 rev;
10318
10319                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10320                                                      &rev);
10321                                 if (rev > pci_id->rev)
10322                                         continue;
10323                         }
10324                         if (bridge->subordinate &&
10325                             (bridge->subordinate->number ==
10326                              tp->pdev->bus->number)) {
10327
10328                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10329                                 pci_dev_put(bridge);
10330                                 break;
10331                         }
10332                 }
10333         }
10334
10335         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10336          * DMA addresses > 40-bit. This bridge may have other additional
10337          * 57xx devices behind it in some 4-port NIC designs for example.
10338          * Any tg3 device found behind the bridge will also need the 40-bit
10339          * DMA workaround.
10340          */
10341         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10342             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10343                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10344                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10345                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10346         }
10347         else {
10348                 struct pci_dev *bridge = NULL;
10349
10350                 do {
10351                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10352                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10353                                                 bridge);
10354                         if (bridge && bridge->subordinate &&
10355                             (bridge->subordinate->number <=
10356                              tp->pdev->bus->number) &&
10357                             (bridge->subordinate->subordinate >=
10358                              tp->pdev->bus->number)) {
10359                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10360                                 pci_dev_put(bridge);
10361                                 break;
10362                         }
10363                 } while (bridge);
10364         }
10365
10366         /* Initialize misc host control in PCI block. */
10367         tp->misc_host_ctrl |= (misc_ctrl_reg &
10368                                MISC_HOST_CTRL_CHIPREV);
10369         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10370                                tp->misc_host_ctrl);
10371
10372         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10373                               &cacheline_sz_reg);
10374
10375         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10376         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10377         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10378         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10379
10380         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10381             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10382             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10383             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10384             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
10385             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10386                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10387
10388         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10389             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10390                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10391
10392         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10393                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10394                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10395                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10396                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10397                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10398                 } else {
10399                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
10400                                           TG3_FLG2_HW_TSO_1_BUG;
10401                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10402                                 ASIC_REV_5750 &&
10403                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10404                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
10405                 }
10406         }
10407
10408         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10409             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10410             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10411             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10412             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
10413             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
10414                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10415
10416         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10417                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10418
10419         /* If we have an AMD 762 or VIA K8T800 chipset, write
10420          * reordering to the mailbox registers done by the host
10421          * controller can cause major troubles.  We read back from
10422          * every mailbox register write to force the writes to be
10423          * posted to the chip in order.
10424          */
10425         if (pci_dev_present(write_reorder_chipsets) &&
10426             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10427                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10428
10429         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10430             tp->pci_lat_timer < 64) {
10431                 tp->pci_lat_timer = 64;
10432
10433                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10434                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10435                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10436                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10437
10438                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10439                                        cacheline_sz_reg);
10440         }
10441
10442         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10443                               &pci_state_reg);
10444
10445         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10446                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10447
10448                 /* If this is a 5700 BX chipset, and we are in PCI-X
10449                  * mode, enable register write workaround.
10450                  *
10451                  * The workaround is to use indirect register accesses
10452                  * for all chip writes not to mailbox registers.
10453                  */
10454                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10455                         u32 pm_reg;
10456                         u16 pci_cmd;
10457
10458                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10459
10460                         /* The chip can have it's power management PCI config
10461                          * space registers clobbered due to this bug.
10462                          * So explicitly force the chip into D0 here.
10463                          */
10464                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10465                                               &pm_reg);
10466                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10467                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10468                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10469                                                pm_reg);
10470
10471                         /* Also, force SERR#/PERR# in PCI command. */
10472                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10473                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10474                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10475                 }
10476         }
10477
10478         /* 5700 BX chips need to have their TX producer index mailboxes
10479          * written twice to workaround a bug.
10480          */
10481         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10482                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10483
10484         /* Back to back register writes can cause problems on this chip,
10485          * the workaround is to read back all reg writes except those to
10486          * mailbox regs.  See tg3_write_indirect_reg32().
10487          *
10488          * PCI Express 5750_A0 rev chips need this workaround too.
10489          */
10490         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10491             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10492              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10493                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10494
10495         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10496                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10497         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10498                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10499
10500         /* Chip-specific fixup from Broadcom driver */
10501         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10502             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10503                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10504                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10505         }
10506
10507         /* Default fast path register access methods */
10508         tp->read32 = tg3_read32;
10509         tp->write32 = tg3_write32;
10510         tp->read32_mbox = tg3_read32;
10511         tp->write32_mbox = tg3_write32;
10512         tp->write32_tx_mbox = tg3_write32;
10513         tp->write32_rx_mbox = tg3_write32;
10514
10515         /* Various workaround register access methods */
10516         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10517                 tp->write32 = tg3_write_indirect_reg32;
10518         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10519                 tp->write32 = tg3_write_flush_reg32;
10520
10521         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10522             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10523                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10524                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10525                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10526         }
10527
10528         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10529                 tp->read32 = tg3_read_indirect_reg32;
10530                 tp->write32 = tg3_write_indirect_reg32;
10531                 tp->read32_mbox = tg3_read_indirect_mbox;
10532                 tp->write32_mbox = tg3_write_indirect_mbox;
10533                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10534                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10535
10536                 iounmap(tp->regs);
10537                 tp->regs = NULL;
10538
10539                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10540                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10541                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10542         }
10543         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10544                 tp->read32_mbox = tg3_read32_mbox_5906;
10545                 tp->write32_mbox = tg3_write32_mbox_5906;
10546                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
10547                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
10548         }
10549
10550         if (tp->write32 == tg3_write_indirect_reg32 ||
10551             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10552              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10553               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10554                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10555
10556         /* Get eeprom hw config before calling tg3_set_power_state().
10557          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10558          * determined before calling tg3_set_power_state() so that
10559          * we know whether or not to switch out of Vaux power.
10560          * When the flag is set, it means that GPIO1 is used for eeprom
10561          * write protect and also implies that it is a LOM where GPIOs
10562          * are not used to switch power.
10563          */
10564         tg3_get_eeprom_hw_cfg(tp);
10565
10566         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10567          * GPIO1 driven high will bring 5700's external PHY out of reset.
10568          * It is also used as eeprom write protect on LOMs.
10569          */
10570         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10571         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10572             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10573                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10574                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10575         /* Unused GPIO3 must be driven as output on 5752 because there
10576          * are no pull-up resistors on unused GPIO pins.
10577          */
10578         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10579                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10580
10581         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10582                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10583
10584         /* Force the chip into D0. */
10585         err = tg3_set_power_state(tp, PCI_D0);
10586         if (err) {
10587                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10588                        pci_name(tp->pdev));
10589                 return err;
10590         }
10591
10592         /* 5700 B0 chips do not support checksumming correctly due
10593          * to hardware bugs.
10594          */
10595         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10596                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10597
10598         /* Derive initial jumbo mode from MTU assigned in
10599          * ether_setup() via the alloc_etherdev() call
10600          */
10601         if (tp->dev->mtu > ETH_DATA_LEN &&
10602             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10603                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10604
10605         /* Determine WakeOnLan speed to use. */
10606         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10607             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10608             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10609             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10610                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10611         } else {
10612                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10613         }
10614
10615         /* A few boards don't want Ethernet@WireSpeed phy feature */
10616         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10617             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10618              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10619              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10620             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
10621             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10622                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10623
10624         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10625             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10626                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10627         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10628                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10629
10630         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10631                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10632                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10633                         tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10634                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
10635                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10636         }
10637
10638         tp->coalesce_mode = 0;
10639         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10640             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10641                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10642
10643         /* Initialize MAC MI mode, polling disabled. */
10644         tw32_f(MAC_MI_MODE, tp->mi_mode);
10645         udelay(80);
10646
10647         /* Initialize data/descriptor byte/word swapping. */
10648         val = tr32(GRC_MODE);
10649         val &= GRC_MODE_HOST_STACKUP;
10650         tw32(GRC_MODE, val | tp->grc_mode);
10651
10652         tg3_switch_clocks(tp);
10653
10654         /* Clear this out for sanity. */
10655         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10656
10657         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10658                               &pci_state_reg);
10659         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10660             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10661                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10662
10663                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10664                     chiprevid == CHIPREV_ID_5701_B0 ||
10665                     chiprevid == CHIPREV_ID_5701_B2 ||
10666                     chiprevid == CHIPREV_ID_5701_B5) {
10667                         void __iomem *sram_base;
10668
10669                         /* Write some dummy words into the SRAM status block
10670                          * area, see if it reads back correctly.  If the return
10671                          * value is bad, force enable the PCIX workaround.
10672                          */
10673                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10674
10675                         writel(0x00000000, sram_base);
10676                         writel(0x00000000, sram_base + 4);
10677                         writel(0xffffffff, sram_base + 4);
10678                         if (readl(sram_base) != 0x00000000)
10679                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10680                 }
10681         }
10682
10683         udelay(50);
10684         tg3_nvram_init(tp);
10685
10686         grc_misc_cfg = tr32(GRC_MISC_CFG);
10687         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10688
10689         /* Broadcom's driver says that CIOBE multisplit has a bug */
10690 #if 0
10691         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10692             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10693                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10694                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10695         }
10696 #endif
10697         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10698             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10699              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10700                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10701
10702         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10703             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10704                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10705         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10706                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10707                                       HOSTCC_MODE_CLRTICK_TXBD);
10708
10709                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10710                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10711                                        tp->misc_host_ctrl);
10712         }
10713
10714         /* these are limited to 10/100 only */
10715         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10716              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10717             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10718              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10719              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10720               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10721               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10722             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10723              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10724               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)) ||
10725             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10726                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10727
10728         err = tg3_phy_probe(tp);
10729         if (err) {
10730                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10731                        pci_name(tp->pdev), err);
10732                 /* ... but do not return immediately ... */
10733         }
10734
10735         tg3_read_partno(tp);
10736         tg3_read_fw_ver(tp);
10737
10738         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10739                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10740         } else {
10741                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10742                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10743                 else
10744                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10745         }
10746
10747         /* 5700 {AX,BX} chips have a broken status block link
10748          * change bit implementation, so we must use the
10749          * status register in those cases.
10750          */
10751         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10752                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10753         else
10754                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10755
10756         /* The led_ctrl is set during tg3_phy_probe, here we might
10757          * have to force the link status polling mechanism based
10758          * upon subsystem IDs.
10759          */
10760         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10761             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10762                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10763                                   TG3_FLAG_USE_LINKCHG_REG);
10764         }
10765
10766         /* For all SERDES we poll the MAC status register. */
10767         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10768                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10769         else
10770                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10771
10772         /* All chips before 5787 can get confused if TX buffers
10773          * straddle the 4GB address boundary in some cases.
10774          */
10775         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10776             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10777             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10778                 tp->dev->hard_start_xmit = tg3_start_xmit;
10779         else
10780                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10781
10782         tp->rx_offset = 2;
10783         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10784             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10785                 tp->rx_offset = 0;
10786
10787         tp->rx_std_max_post = TG3_RX_RING_SIZE;
10788
10789         /* Increment the rx prod index on the rx std ring by at most
10790          * 8 for these chips to workaround hw errata.
10791          */
10792         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10793             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10794             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10795                 tp->rx_std_max_post = 8;
10796
10797         /* By default, disable wake-on-lan.  User can change this
10798          * using ETHTOOL_SWOL.
10799          */
10800         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10801
10802         return err;
10803 }
10804
10805 #ifdef CONFIG_SPARC64
10806 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10807 {
10808         struct net_device *dev = tp->dev;
10809         struct pci_dev *pdev = tp->pdev;
10810         struct pcidev_cookie *pcp = pdev->sysdata;
10811
10812         if (pcp != NULL) {
10813                 unsigned char *addr;
10814                 int len;
10815
10816                 addr = of_get_property(pcp->prom_node, "local-mac-address",
10817                                         &len);
10818                 if (addr && len == 6) {
10819                         memcpy(dev->dev_addr, addr, 6);
10820                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10821                         return 0;
10822                 }
10823         }
10824         return -ENODEV;
10825 }
10826
10827 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10828 {
10829         struct net_device *dev = tp->dev;
10830
10831         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10832         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10833         return 0;
10834 }
10835 #endif
10836
10837 static int __devinit tg3_get_device_address(struct tg3 *tp)
10838 {
10839         struct net_device *dev = tp->dev;
10840         u32 hi, lo, mac_offset;
10841         int addr_ok = 0;
10842
10843 #ifdef CONFIG_SPARC64
10844         if (!tg3_get_macaddr_sparc(tp))
10845                 return 0;
10846 #endif
10847
10848         mac_offset = 0x7c;
10849         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10850             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10851                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10852                         mac_offset = 0xcc;
10853                 if (tg3_nvram_lock(tp))
10854                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10855                 else
10856                         tg3_nvram_unlock(tp);
10857         }
10858         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10859                 mac_offset = 0x10;
10860
10861         /* First try to get it from MAC address mailbox. */
10862         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10863         if ((hi >> 16) == 0x484b) {
10864                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10865                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10866
10867                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10868                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10869                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10870                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10871                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10872
10873                 /* Some old bootcode may report a 0 MAC address in SRAM */
10874                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10875         }
10876         if (!addr_ok) {
10877                 /* Next, try NVRAM. */
10878                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10879                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10880                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
10881                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
10882                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
10883                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
10884                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
10885                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
10886                 }
10887                 /* Finally just fetch it out of the MAC control regs. */
10888                 else {
10889                         hi = tr32(MAC_ADDR_0_HIGH);
10890                         lo = tr32(MAC_ADDR_0_LOW);
10891
10892                         dev->dev_addr[5] = lo & 0xff;
10893                         dev->dev_addr[4] = (lo >> 8) & 0xff;
10894                         dev->dev_addr[3] = (lo >> 16) & 0xff;
10895                         dev->dev_addr[2] = (lo >> 24) & 0xff;
10896                         dev->dev_addr[1] = hi & 0xff;
10897                         dev->dev_addr[0] = (hi >> 8) & 0xff;
10898                 }
10899         }
10900
10901         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10902 #ifdef CONFIG_SPARC64
10903                 if (!tg3_get_default_macaddr_sparc(tp))
10904                         return 0;
10905 #endif
10906                 return -EINVAL;
10907         }
10908         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10909         return 0;
10910 }
10911
10912 #define BOUNDARY_SINGLE_CACHELINE       1
10913 #define BOUNDARY_MULTI_CACHELINE        2
10914
10915 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10916 {
10917         int cacheline_size;
10918         u8 byte;
10919         int goal;
10920
10921         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10922         if (byte == 0)
10923                 cacheline_size = 1024;
10924         else
10925                 cacheline_size = (int) byte * 4;
10926
10927         /* On 5703 and later chips, the boundary bits have no
10928          * effect.
10929          */
10930         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10931             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10932             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10933                 goto out;
10934
10935 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10936         goal = BOUNDARY_MULTI_CACHELINE;
10937 #else
10938 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10939         goal = BOUNDARY_SINGLE_CACHELINE;
10940 #else
10941         goal = 0;
10942 #endif
10943 #endif
10944
10945         if (!goal)
10946                 goto out;
10947
10948         /* PCI controllers on most RISC systems tend to disconnect
10949          * when a device tries to burst across a cache-line boundary.
10950          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10951          *
10952          * Unfortunately, for PCI-E there are only limited
10953          * write-side controls for this, and thus for reads
10954          * we will still get the disconnects.  We'll also waste
10955          * these PCI cycles for both read and write for chips
10956          * other than 5700 and 5701 which do not implement the
10957          * boundary bits.
10958          */
10959         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10960             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10961                 switch (cacheline_size) {
10962                 case 16:
10963                 case 32:
10964                 case 64:
10965                 case 128:
10966                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10967                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10968                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10969                         } else {
10970                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10971                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10972                         }
10973                         break;
10974
10975                 case 256:
10976                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10977                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10978                         break;
10979
10980                 default:
10981                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10982                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10983                         break;
10984                 };
10985         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10986                 switch (cacheline_size) {
10987                 case 16:
10988                 case 32:
10989                 case 64:
10990                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10991                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10992                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10993                                 break;
10994                         }
10995                         /* fallthrough */
10996                 case 128:
10997                 default:
10998                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10999                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11000                         break;
11001                 };
11002         } else {
11003                 switch (cacheline_size) {
11004                 case 16:
11005                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11006                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11007                                         DMA_RWCTRL_WRITE_BNDRY_16);
11008                                 break;
11009                         }
11010                         /* fallthrough */
11011                 case 32:
11012                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11013                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11014                                         DMA_RWCTRL_WRITE_BNDRY_32);
11015                                 break;
11016                         }
11017                         /* fallthrough */
11018                 case 64:
11019                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11020                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11021                                         DMA_RWCTRL_WRITE_BNDRY_64);
11022                                 break;
11023                         }
11024                         /* fallthrough */
11025                 case 128:
11026                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11027                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11028                                         DMA_RWCTRL_WRITE_BNDRY_128);
11029                                 break;
11030                         }
11031                         /* fallthrough */
11032                 case 256:
11033                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
11034                                 DMA_RWCTRL_WRITE_BNDRY_256);
11035                         break;
11036                 case 512:
11037                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
11038                                 DMA_RWCTRL_WRITE_BNDRY_512);
11039                         break;
11040                 case 1024:
11041                 default:
11042                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11043                                 DMA_RWCTRL_WRITE_BNDRY_1024);
11044                         break;
11045                 };
11046         }
11047
11048 out:
11049         return val;
11050 }
11051
11052 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11053 {
11054         struct tg3_internal_buffer_desc test_desc;
11055         u32 sram_dma_descs;
11056         int i, ret;
11057
11058         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11059
11060         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11061         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11062         tw32(RDMAC_STATUS, 0);
11063         tw32(WDMAC_STATUS, 0);
11064
11065         tw32(BUFMGR_MODE, 0);
11066         tw32(FTQ_RESET, 0);
11067
11068         test_desc.addr_hi = ((u64) buf_dma) >> 32;
11069         test_desc.addr_lo = buf_dma & 0xffffffff;
11070         test_desc.nic_mbuf = 0x00002100;
11071         test_desc.len = size;
11072
11073         /*
11074          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11075          * the *second* time the tg3 driver was getting loaded after an
11076          * initial scan.
11077          *
11078          * Broadcom tells me:
11079          *   ...the DMA engine is connected to the GRC block and a DMA
11080          *   reset may affect the GRC block in some unpredictable way...
11081          *   The behavior of resets to individual blocks has not been tested.
11082          *
11083          * Broadcom noted the GRC reset will also reset all sub-components.
11084          */
11085         if (to_device) {
11086                 test_desc.cqid_sqid = (13 << 8) | 2;
11087
11088                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11089                 udelay(40);
11090         } else {
11091                 test_desc.cqid_sqid = (16 << 8) | 7;
11092
11093                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11094                 udelay(40);
11095         }
11096         test_desc.flags = 0x00000005;
11097
11098         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11099                 u32 val;
11100
11101                 val = *(((u32 *)&test_desc) + i);
11102                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11103                                        sram_dma_descs + (i * sizeof(u32)));
11104                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11105         }
11106         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11107
11108         if (to_device) {
11109                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11110         } else {
11111                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11112         }
11113
11114         ret = -ENODEV;
11115         for (i = 0; i < 40; i++) {
11116                 u32 val;
11117
11118                 if (to_device)
11119                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11120                 else
11121                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11122                 if ((val & 0xffff) == sram_dma_descs) {
11123                         ret = 0;
11124                         break;
11125                 }
11126
11127                 udelay(100);
11128         }
11129
11130         return ret;
11131 }
11132
11133 #define TEST_BUFFER_SIZE        0x2000
11134
11135 static int __devinit tg3_test_dma(struct tg3 *tp)
11136 {
11137         dma_addr_t buf_dma;
11138         u32 *buf, saved_dma_rwctrl;
11139         int ret;
11140
11141         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11142         if (!buf) {
11143                 ret = -ENOMEM;
11144                 goto out_nofree;
11145         }
11146
11147         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11148                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11149
11150         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11151
11152         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11153                 /* DMA read watermark not used on PCIE */
11154                 tp->dma_rwctrl |= 0x00180000;
11155         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11156                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11157                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11158                         tp->dma_rwctrl |= 0x003f0000;
11159                 else
11160                         tp->dma_rwctrl |= 0x003f000f;
11161         } else {
11162                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11163                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11164                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11165
11166                         /* If the 5704 is behind the EPB bridge, we can
11167                          * do the less restrictive ONE_DMA workaround for
11168                          * better performance.
11169                          */
11170                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11171                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11172                                 tp->dma_rwctrl |= 0x8000;
11173                         else if (ccval == 0x6 || ccval == 0x7)
11174                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11175
11176                         /* Set bit 23 to enable PCIX hw bug fix */
11177                         tp->dma_rwctrl |= 0x009f0000;
11178                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11179                         /* 5780 always in PCIX mode */
11180                         tp->dma_rwctrl |= 0x00144000;
11181                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11182                         /* 5714 always in PCIX mode */
11183                         tp->dma_rwctrl |= 0x00148000;
11184                 } else {
11185                         tp->dma_rwctrl |= 0x001b000f;
11186                 }
11187         }
11188
11189         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11190             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11191                 tp->dma_rwctrl &= 0xfffffff0;
11192
11193         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11194             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11195                 /* Remove this if it causes problems for some boards. */
11196                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11197
11198                 /* On 5700/5701 chips, we need to set this bit.
11199                  * Otherwise the chip will issue cacheline transactions
11200                  * to streamable DMA memory with not all the byte
11201                  * enables turned on.  This is an error on several
11202                  * RISC PCI controllers, in particular sparc64.
11203                  *
11204                  * On 5703/5704 chips, this bit has been reassigned
11205                  * a different meaning.  In particular, it is used
11206                  * on those chips to enable a PCI-X workaround.
11207                  */
11208                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11209         }
11210
11211         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11212
11213 #if 0
11214         /* Unneeded, already done by tg3_get_invariants.  */
11215         tg3_switch_clocks(tp);
11216 #endif
11217
11218         ret = 0;
11219         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11220             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11221                 goto out;
11222
11223         /* It is best to perform DMA test with maximum write burst size
11224          * to expose the 5700/5701 write DMA bug.
11225          */
11226         saved_dma_rwctrl = tp->dma_rwctrl;
11227         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11228         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11229
11230         while (1) {
11231                 u32 *p = buf, i;
11232
11233                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11234                         p[i] = i;
11235
11236                 /* Send the buffer to the chip. */
11237                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11238                 if (ret) {
11239                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11240                         break;
11241                 }
11242
11243 #if 0
11244                 /* validate data reached card RAM correctly. */
11245                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11246                         u32 val;
11247                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
11248                         if (le32_to_cpu(val) != p[i]) {
11249                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
11250                                 /* ret = -ENODEV here? */
11251                         }
11252                         p[i] = 0;
11253                 }
11254 #endif
11255                 /* Now read it back. */
11256                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11257                 if (ret) {
11258                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11259
11260                         break;
11261                 }
11262
11263                 /* Verify it. */
11264                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11265                         if (p[i] == i)
11266                                 continue;
11267
11268                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11269                             DMA_RWCTRL_WRITE_BNDRY_16) {
11270                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11271                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11272                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11273                                 break;
11274                         } else {
11275                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11276                                 ret = -ENODEV;
11277                                 goto out;
11278                         }
11279                 }
11280
11281                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11282                         /* Success. */
11283                         ret = 0;
11284                         break;
11285                 }
11286         }
11287         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11288             DMA_RWCTRL_WRITE_BNDRY_16) {
11289                 static struct pci_device_id dma_wait_state_chipsets[] = {
11290                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11291                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11292                         { },
11293                 };
11294
11295                 /* DMA test passed without adjusting DMA boundary,
11296                  * now look for chipsets that are known to expose the
11297                  * DMA bug without failing the test.
11298                  */
11299                 if (pci_dev_present(dma_wait_state_chipsets)) {
11300                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11301                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11302                 }
11303                 else
11304                         /* Safe to use the calculated DMA boundary. */
11305                         tp->dma_rwctrl = saved_dma_rwctrl;
11306
11307                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11308         }
11309
11310 out:
11311         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11312 out_nofree:
11313         return ret;
11314 }
11315
11316 static void __devinit tg3_init_link_config(struct tg3 *tp)
11317 {
11318         tp->link_config.advertising =
11319                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11320                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11321                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11322                  ADVERTISED_Autoneg | ADVERTISED_MII);
11323         tp->link_config.speed = SPEED_INVALID;
11324         tp->link_config.duplex = DUPLEX_INVALID;
11325         tp->link_config.autoneg = AUTONEG_ENABLE;
11326         tp->link_config.active_speed = SPEED_INVALID;
11327         tp->link_config.active_duplex = DUPLEX_INVALID;
11328         tp->link_config.phy_is_low_power = 0;
11329         tp->link_config.orig_speed = SPEED_INVALID;
11330         tp->link_config.orig_duplex = DUPLEX_INVALID;
11331         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11332 }
11333
11334 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11335 {
11336         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11337                 tp->bufmgr_config.mbuf_read_dma_low_water =
11338                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11339                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11340                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11341                 tp->bufmgr_config.mbuf_high_water =
11342                         DEFAULT_MB_HIGH_WATER_5705;
11343                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11344                         tp->bufmgr_config.mbuf_mac_rx_low_water =
11345                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
11346                         tp->bufmgr_config.mbuf_high_water =
11347                                 DEFAULT_MB_HIGH_WATER_5906;
11348                 }
11349
11350                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11351                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11352                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11353                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11354                 tp->bufmgr_config.mbuf_high_water_jumbo =
11355                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11356         } else {
11357                 tp->bufmgr_config.mbuf_read_dma_low_water =
11358                         DEFAULT_MB_RDMA_LOW_WATER;
11359                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11360                         DEFAULT_MB_MACRX_LOW_WATER;
11361                 tp->bufmgr_config.mbuf_high_water =
11362                         DEFAULT_MB_HIGH_WATER;
11363
11364                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11365                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11366                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11367                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11368                 tp->bufmgr_config.mbuf_high_water_jumbo =
11369                         DEFAULT_MB_HIGH_WATER_JUMBO;
11370         }
11371
11372         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11373         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11374 }
11375
11376 static char * __devinit tg3_phy_string(struct tg3 *tp)
11377 {
11378         switch (tp->phy_id & PHY_ID_MASK) {
11379         case PHY_ID_BCM5400:    return "5400";
11380         case PHY_ID_BCM5401:    return "5401";
11381         case PHY_ID_BCM5411:    return "5411";
11382         case PHY_ID_BCM5701:    return "5701";
11383         case PHY_ID_BCM5703:    return "5703";
11384         case PHY_ID_BCM5704:    return "5704";
11385         case PHY_ID_BCM5705:    return "5705";
11386         case PHY_ID_BCM5750:    return "5750";
11387         case PHY_ID_BCM5752:    return "5752";
11388         case PHY_ID_BCM5714:    return "5714";
11389         case PHY_ID_BCM5780:    return "5780";
11390         case PHY_ID_BCM5755:    return "5755";
11391         case PHY_ID_BCM5787:    return "5787";
11392         case PHY_ID_BCM5756:    return "5722/5756";
11393         case PHY_ID_BCM5906:    return "5906";
11394         case PHY_ID_BCM8002:    return "8002/serdes";
11395         case 0:                 return "serdes";
11396         default:                return "unknown";
11397         };
11398 }
11399
11400 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11401 {
11402         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11403                 strcpy(str, "PCI Express");
11404                 return str;
11405         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11406                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11407
11408                 strcpy(str, "PCIX:");
11409
11410                 if ((clock_ctrl == 7) ||
11411                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11412                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11413                         strcat(str, "133MHz");
11414                 else if (clock_ctrl == 0)
11415                         strcat(str, "33MHz");
11416                 else if (clock_ctrl == 2)
11417                         strcat(str, "50MHz");
11418                 else if (clock_ctrl == 4)
11419                         strcat(str, "66MHz");
11420                 else if (clock_ctrl == 6)
11421                         strcat(str, "100MHz");
11422         } else {
11423                 strcpy(str, "PCI:");
11424                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11425                         strcat(str, "66MHz");
11426                 else
11427                         strcat(str, "33MHz");
11428         }
11429         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11430                 strcat(str, ":32-bit");
11431         else
11432                 strcat(str, ":64-bit");
11433         return str;
11434 }
11435
11436 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11437 {
11438         struct pci_dev *peer;
11439         unsigned int func, devnr = tp->pdev->devfn & ~7;
11440
11441         for (func = 0; func < 8; func++) {
11442                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11443                 if (peer && peer != tp->pdev)
11444                         break;
11445                 pci_dev_put(peer);
11446         }
11447         /* 5704 can be configured in single-port mode, set peer to
11448          * tp->pdev in that case.
11449          */
11450         if (!peer) {
11451                 peer = tp->pdev;
11452                 return peer;
11453         }
11454
11455         /*
11456          * We don't need to keep the refcount elevated; there's no way
11457          * to remove one half of this device without removing the other
11458          */
11459         pci_dev_put(peer);
11460
11461         return peer;
11462 }
11463
11464 static void __devinit tg3_init_coal(struct tg3 *tp)
11465 {
11466         struct ethtool_coalesce *ec = &tp->coal;
11467
11468         memset(ec, 0, sizeof(*ec));
11469         ec->cmd = ETHTOOL_GCOALESCE;
11470         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11471         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11472         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11473         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11474         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11475         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11476         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11477         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11478         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11479
11480         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11481                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11482                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11483                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11484                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11485                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11486         }
11487
11488         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11489                 ec->rx_coalesce_usecs_irq = 0;
11490                 ec->tx_coalesce_usecs_irq = 0;
11491                 ec->stats_block_coalesce_usecs = 0;
11492         }
11493 }
11494
11495 static int __devinit tg3_init_one(struct pci_dev *pdev,
11496                                   const struct pci_device_id *ent)
11497 {
11498         static int tg3_version_printed = 0;
11499         unsigned long tg3reg_base, tg3reg_len;
11500         struct net_device *dev;
11501         struct tg3 *tp;
11502         int i, err, pm_cap;
11503         char str[40];
11504         u64 dma_mask, persist_dma_mask;
11505
11506         if (tg3_version_printed++ == 0)
11507                 printk(KERN_INFO "%s", version);
11508
11509         err = pci_enable_device(pdev);
11510         if (err) {
11511                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11512                        "aborting.\n");
11513                 return err;
11514         }
11515
11516         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11517                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11518                        "base address, aborting.\n");
11519                 err = -ENODEV;
11520                 goto err_out_disable_pdev;
11521         }
11522
11523         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11524         if (err) {
11525                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11526                        "aborting.\n");
11527                 goto err_out_disable_pdev;
11528         }
11529
11530         pci_set_master(pdev);
11531
11532         /* Find power-management capability. */
11533         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11534         if (pm_cap == 0) {
11535                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11536                        "aborting.\n");
11537                 err = -EIO;
11538                 goto err_out_free_res;
11539         }
11540
11541         tg3reg_base = pci_resource_start(pdev, 0);
11542         tg3reg_len = pci_resource_len(pdev, 0);
11543
11544         dev = alloc_etherdev(sizeof(*tp));
11545         if (!dev) {
11546                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11547                 err = -ENOMEM;
11548                 goto err_out_free_res;
11549         }
11550
11551         SET_MODULE_OWNER(dev);
11552         SET_NETDEV_DEV(dev, &pdev->dev);
11553
11554 #if TG3_VLAN_TAG_USED
11555         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11556         dev->vlan_rx_register = tg3_vlan_rx_register;
11557         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11558 #endif
11559
11560         tp = netdev_priv(dev);
11561         tp->pdev = pdev;
11562         tp->dev = dev;
11563         tp->pm_cap = pm_cap;
11564         tp->mac_mode = TG3_DEF_MAC_MODE;
11565         tp->rx_mode = TG3_DEF_RX_MODE;
11566         tp->tx_mode = TG3_DEF_TX_MODE;
11567         tp->mi_mode = MAC_MI_MODE_BASE;
11568         if (tg3_debug > 0)
11569                 tp->msg_enable = tg3_debug;
11570         else
11571                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11572
11573         /* The word/byte swap controls here control register access byte
11574          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11575          * setting below.
11576          */
11577         tp->misc_host_ctrl =
11578                 MISC_HOST_CTRL_MASK_PCI_INT |
11579                 MISC_HOST_CTRL_WORD_SWAP |
11580                 MISC_HOST_CTRL_INDIR_ACCESS |
11581                 MISC_HOST_CTRL_PCISTATE_RW;
11582
11583         /* The NONFRM (non-frame) byte/word swap controls take effect
11584          * on descriptor entries, anything which isn't packet data.
11585          *
11586          * The StrongARM chips on the board (one for tx, one for rx)
11587          * are running in big-endian mode.
11588          */
11589         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11590                         GRC_MODE_WSWAP_NONFRM_DATA);
11591 #ifdef __BIG_ENDIAN
11592         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11593 #endif
11594         spin_lock_init(&tp->lock);
11595         spin_lock_init(&tp->indirect_lock);
11596         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11597
11598         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11599         if (tp->regs == 0UL) {
11600                 printk(KERN_ERR PFX "Cannot map device registers, "
11601                        "aborting.\n");
11602                 err = -ENOMEM;
11603                 goto err_out_free_dev;
11604         }
11605
11606         tg3_init_link_config(tp);
11607
11608         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11609         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11610         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11611
11612         dev->open = tg3_open;
11613         dev->stop = tg3_close;
11614         dev->get_stats = tg3_get_stats;
11615         dev->set_multicast_list = tg3_set_rx_mode;
11616         dev->set_mac_address = tg3_set_mac_addr;
11617         dev->do_ioctl = tg3_ioctl;
11618         dev->tx_timeout = tg3_tx_timeout;
11619         dev->poll = tg3_poll;
11620         dev->ethtool_ops = &tg3_ethtool_ops;
11621         dev->weight = 64;
11622         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11623         dev->change_mtu = tg3_change_mtu;
11624         dev->irq = pdev->irq;
11625 #ifdef CONFIG_NET_POLL_CONTROLLER
11626         dev->poll_controller = tg3_poll_controller;
11627 #endif
11628
11629         err = tg3_get_invariants(tp);
11630         if (err) {
11631                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11632                        "aborting.\n");
11633                 goto err_out_iounmap;
11634         }
11635
11636         /* The EPB bridge inside 5714, 5715, and 5780 and any
11637          * device behind the EPB cannot support DMA addresses > 40-bit.
11638          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11639          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11640          * do DMA address check in tg3_start_xmit().
11641          */
11642         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11643                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11644         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11645                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11646 #ifdef CONFIG_HIGHMEM
11647                 dma_mask = DMA_64BIT_MASK;
11648 #endif
11649         } else
11650                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11651
11652         /* Configure DMA attributes. */
11653         if (dma_mask > DMA_32BIT_MASK) {
11654                 err = pci_set_dma_mask(pdev, dma_mask);
11655                 if (!err) {
11656                         dev->features |= NETIF_F_HIGHDMA;
11657                         err = pci_set_consistent_dma_mask(pdev,
11658                                                           persist_dma_mask);
11659                         if (err < 0) {
11660                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11661                                        "DMA for consistent allocations\n");
11662                                 goto err_out_iounmap;
11663                         }
11664                 }
11665         }
11666         if (err || dma_mask == DMA_32BIT_MASK) {
11667                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11668                 if (err) {
11669                         printk(KERN_ERR PFX "No usable DMA configuration, "
11670                                "aborting.\n");
11671                         goto err_out_iounmap;
11672                 }
11673         }
11674
11675         tg3_init_bufmgr_config(tp);
11676
11677 #if TG3_TSO_SUPPORT != 0
11678         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11679                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11680         }
11681         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11682             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11683             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11684             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11685                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11686         } else {
11687                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11688         }
11689
11690         /* TSO is on by default on chips that support hardware TSO.
11691          * Firmware TSO on older chips gives lower performance, so it
11692          * is off by default, but can be enabled using ethtool.
11693          */
11694         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11695                 dev->features |= NETIF_F_TSO;
11696                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
11697                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
11698                         dev->features |= NETIF_F_TSO6;
11699         }
11700
11701 #endif
11702
11703         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11704             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11705             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11706                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11707                 tp->rx_pending = 63;
11708         }
11709
11710         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11711             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11712                 tp->pdev_peer = tg3_find_peer(tp);
11713
11714         err = tg3_get_device_address(tp);
11715         if (err) {
11716                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11717                        "aborting.\n");
11718                 goto err_out_iounmap;
11719         }
11720
11721         /*
11722          * Reset chip in case UNDI or EFI driver did not shutdown
11723          * DMA self test will enable WDMAC and we'll see (spurious)
11724          * pending DMA on the PCI bus at that point.
11725          */
11726         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11727             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11728                 pci_save_state(tp->pdev);
11729                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11730                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11731         }
11732
11733         err = tg3_test_dma(tp);
11734         if (err) {
11735                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11736                 goto err_out_iounmap;
11737         }
11738
11739         /* Tigon3 can do ipv4 only... and some chips have buggy
11740          * checksumming.
11741          */
11742         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11743                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11744                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11745                         dev->features |= NETIF_F_HW_CSUM;
11746                 else
11747                         dev->features |= NETIF_F_IP_CSUM;
11748                 dev->features |= NETIF_F_SG;
11749                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11750         } else
11751                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11752
11753         /* flow control autonegotiation is default behavior */
11754         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11755
11756         tg3_init_coal(tp);
11757
11758         /* Now that we have fully setup the chip, save away a snapshot
11759          * of the PCI config space.  We need to restore this after
11760          * GRC_MISC_CFG core clock resets and some resume events.
11761          */
11762         pci_save_state(tp->pdev);
11763
11764         err = register_netdev(dev);
11765         if (err) {
11766                 printk(KERN_ERR PFX "Cannot register net device, "
11767                        "aborting.\n");
11768                 goto err_out_iounmap;
11769         }
11770
11771         pci_set_drvdata(pdev, dev);
11772
11773         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11774                dev->name,
11775                tp->board_part_number,
11776                tp->pci_chip_rev_id,
11777                tg3_phy_string(tp),
11778                tg3_bus_string(tp, str),
11779                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11780
11781         for (i = 0; i < 6; i++)
11782                 printk("%2.2x%c", dev->dev_addr[i],
11783                        i == 5 ? '\n' : ':');
11784
11785         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11786                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11787                "TSOcap[%d] \n",
11788                dev->name,
11789                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11790                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11791                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11792                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11793                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11794                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11795                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11796         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11797                dev->name, tp->dma_rwctrl,
11798                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11799                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11800
11801         netif_carrier_off(tp->dev);
11802
11803         return 0;
11804
11805 err_out_iounmap:
11806         if (tp->regs) {
11807                 iounmap(tp->regs);
11808                 tp->regs = NULL;
11809         }
11810
11811 err_out_free_dev:
11812         free_netdev(dev);
11813
11814 err_out_free_res:
11815         pci_release_regions(pdev);
11816
11817 err_out_disable_pdev:
11818         pci_disable_device(pdev);
11819         pci_set_drvdata(pdev, NULL);
11820         return err;
11821 }
11822
11823 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11824 {
11825         struct net_device *dev = pci_get_drvdata(pdev);
11826
11827         if (dev) {
11828                 struct tg3 *tp = netdev_priv(dev);
11829
11830                 flush_scheduled_work();
11831                 unregister_netdev(dev);
11832                 if (tp->regs) {
11833                         iounmap(tp->regs);
11834                         tp->regs = NULL;
11835                 }
11836                 free_netdev(dev);
11837                 pci_release_regions(pdev);
11838                 pci_disable_device(pdev);
11839                 pci_set_drvdata(pdev, NULL);
11840         }
11841 }
11842
11843 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11844 {
11845         struct net_device *dev = pci_get_drvdata(pdev);
11846         struct tg3 *tp = netdev_priv(dev);
11847         int err;
11848
11849         if (!netif_running(dev))
11850                 return 0;
11851
11852         flush_scheduled_work();
11853         tg3_netif_stop(tp);
11854
11855         del_timer_sync(&tp->timer);
11856
11857         tg3_full_lock(tp, 1);
11858         tg3_disable_ints(tp);
11859         tg3_full_unlock(tp);
11860
11861         netif_device_detach(dev);
11862
11863         tg3_full_lock(tp, 0);
11864         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11865         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11866         tg3_full_unlock(tp);
11867
11868         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11869         if (err) {
11870                 tg3_full_lock(tp, 0);
11871
11872                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11873                 if (tg3_restart_hw(tp, 1))
11874                         goto out;
11875
11876                 tp->timer.expires = jiffies + tp->timer_offset;
11877                 add_timer(&tp->timer);
11878
11879                 netif_device_attach(dev);
11880                 tg3_netif_start(tp);
11881
11882 out:
11883                 tg3_full_unlock(tp);
11884         }
11885
11886         return err;
11887 }
11888
11889 static int tg3_resume(struct pci_dev *pdev)
11890 {
11891         struct net_device *dev = pci_get_drvdata(pdev);
11892         struct tg3 *tp = netdev_priv(dev);
11893         int err;
11894
11895         if (!netif_running(dev))
11896                 return 0;
11897
11898         pci_restore_state(tp->pdev);
11899
11900         err = tg3_set_power_state(tp, PCI_D0);
11901         if (err)
11902                 return err;
11903
11904         netif_device_attach(dev);
11905
11906         tg3_full_lock(tp, 0);
11907
11908         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11909         err = tg3_restart_hw(tp, 1);
11910         if (err)
11911                 goto out;
11912
11913         tp->timer.expires = jiffies + tp->timer_offset;
11914         add_timer(&tp->timer);
11915
11916         tg3_netif_start(tp);
11917
11918 out:
11919         tg3_full_unlock(tp);
11920
11921         return err;
11922 }
11923
11924 static struct pci_driver tg3_driver = {
11925         .name           = DRV_MODULE_NAME,
11926         .id_table       = tg3_pci_tbl,
11927         .probe          = tg3_init_one,
11928         .remove         = __devexit_p(tg3_remove_one),
11929         .suspend        = tg3_suspend,
11930         .resume         = tg3_resume
11931 };
11932
11933 static int __init tg3_init(void)
11934 {
11935         return pci_register_driver(&tg3_driver);
11936 }
11937
11938 static void __exit tg3_cleanup(void)
11939 {
11940         pci_unregister_driver(&tg3_driver);
11941 }
11942
11943 module_init(tg3_init);
11944 module_exit(tg3_cleanup);