]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/tg3.c
[TG3]: Add 5787F device ID.
[linux-2.6-omap-h63xx.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43
44 #include <asm/system.h>
45 #include <asm/io.h>
46 #include <asm/byteorder.h>
47 #include <asm/uaccess.h>
48
49 #ifdef CONFIG_SPARC64
50 #include <asm/idprom.h>
51 #include <asm/oplib.h>
52 #include <asm/pbm.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #ifdef NETIF_F_TSO
62 #define TG3_TSO_SUPPORT 1
63 #else
64 #define TG3_TSO_SUPPORT 0
65 #endif
66
67 #include "tg3.h"
68
69 #define DRV_MODULE_NAME         "tg3"
70 #define PFX DRV_MODULE_NAME     ": "
71 #define DRV_MODULE_VERSION      "3.69"
72 #define DRV_MODULE_RELDATE      "November 15, 2006"
73
74 #define TG3_DEF_MAC_MODE        0
75 #define TG3_DEF_RX_MODE         0
76 #define TG3_DEF_TX_MODE         0
77 #define TG3_DEF_MSG_ENABLE        \
78         (NETIF_MSG_DRV          | \
79          NETIF_MSG_PROBE        | \
80          NETIF_MSG_LINK         | \
81          NETIF_MSG_TIMER        | \
82          NETIF_MSG_IFDOWN       | \
83          NETIF_MSG_IFUP         | \
84          NETIF_MSG_RX_ERR       | \
85          NETIF_MSG_TX_ERR)
86
87 /* length of time before we decide the hardware is borked,
88  * and dev->tx_timeout() should be called to fix the problem
89  */
90 #define TG3_TX_TIMEOUT                  (5 * HZ)
91
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU                     60
94 #define TG3_MAX_MTU(tp) \
95         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
96
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98  * You can't change the ring sizes, but you can change where you place
99  * them in the NIC onboard memory.
100  */
101 #define TG3_RX_RING_SIZE                512
102 #define TG3_DEF_RX_RING_PENDING         200
103 #define TG3_RX_JUMBO_RING_SIZE          256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
105
106 /* Do not place this n-ring entries value into the tp struct itself,
107  * we really want to expose these constants to GCC so that modulo et
108  * al.  operations are done with shifts and masks instead of with
109  * hw multiply/modulo instructions.  Another solution would be to
110  * replace things like '% foo' with '& (foo - 1)'.
111  */
112 #define TG3_RX_RCB_RING_SIZE(tp)        \
113         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
114
115 #define TG3_TX_RING_SIZE                512
116 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
117
118 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_RING_SIZE)
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121                                  TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123                                    TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
125                                  TG3_TX_RING_SIZE)
126 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
127
128 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
129 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
130
131 /* minimum number of free TX descriptors required to wake up TX process */
132 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
133
134 /* number of ETHTOOL_GSTATS u64's */
135 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
136
137 #define TG3_NUM_TEST            6
138
139 static char version[] __devinitdata =
140         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
141
142 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
143 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
144 MODULE_LICENSE("GPL");
145 MODULE_VERSION(DRV_MODULE_VERSION);
146
147 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
148 module_param(tg3_debug, int, 0);
149 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
150
151 static struct pci_device_id tg3_pci_tbl[] = {
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
205         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
206         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
207         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
211         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
212         {}
213 };
214
215 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
216
217 static const struct {
218         const char string[ETH_GSTRING_LEN];
219 } ethtool_stats_keys[TG3_NUM_STATS] = {
220         { "rx_octets" },
221         { "rx_fragments" },
222         { "rx_ucast_packets" },
223         { "rx_mcast_packets" },
224         { "rx_bcast_packets" },
225         { "rx_fcs_errors" },
226         { "rx_align_errors" },
227         { "rx_xon_pause_rcvd" },
228         { "rx_xoff_pause_rcvd" },
229         { "rx_mac_ctrl_rcvd" },
230         { "rx_xoff_entered" },
231         { "rx_frame_too_long_errors" },
232         { "rx_jabbers" },
233         { "rx_undersize_packets" },
234         { "rx_in_length_errors" },
235         { "rx_out_length_errors" },
236         { "rx_64_or_less_octet_packets" },
237         { "rx_65_to_127_octet_packets" },
238         { "rx_128_to_255_octet_packets" },
239         { "rx_256_to_511_octet_packets" },
240         { "rx_512_to_1023_octet_packets" },
241         { "rx_1024_to_1522_octet_packets" },
242         { "rx_1523_to_2047_octet_packets" },
243         { "rx_2048_to_4095_octet_packets" },
244         { "rx_4096_to_8191_octet_packets" },
245         { "rx_8192_to_9022_octet_packets" },
246
247         { "tx_octets" },
248         { "tx_collisions" },
249
250         { "tx_xon_sent" },
251         { "tx_xoff_sent" },
252         { "tx_flow_control" },
253         { "tx_mac_errors" },
254         { "tx_single_collisions" },
255         { "tx_mult_collisions" },
256         { "tx_deferred" },
257         { "tx_excessive_collisions" },
258         { "tx_late_collisions" },
259         { "tx_collide_2times" },
260         { "tx_collide_3times" },
261         { "tx_collide_4times" },
262         { "tx_collide_5times" },
263         { "tx_collide_6times" },
264         { "tx_collide_7times" },
265         { "tx_collide_8times" },
266         { "tx_collide_9times" },
267         { "tx_collide_10times" },
268         { "tx_collide_11times" },
269         { "tx_collide_12times" },
270         { "tx_collide_13times" },
271         { "tx_collide_14times" },
272         { "tx_collide_15times" },
273         { "tx_ucast_packets" },
274         { "tx_mcast_packets" },
275         { "tx_bcast_packets" },
276         { "tx_carrier_sense_errors" },
277         { "tx_discards" },
278         { "tx_errors" },
279
280         { "dma_writeq_full" },
281         { "dma_write_prioq_full" },
282         { "rxbds_empty" },
283         { "rx_discards" },
284         { "rx_errors" },
285         { "rx_threshold_hit" },
286
287         { "dma_readq_full" },
288         { "dma_read_prioq_full" },
289         { "tx_comp_queue_full" },
290
291         { "ring_set_send_prod_index" },
292         { "ring_status_update" },
293         { "nic_irqs" },
294         { "nic_avoided_irqs" },
295         { "nic_tx_threshold_hit" }
296 };
297
298 static const struct {
299         const char string[ETH_GSTRING_LEN];
300 } ethtool_test_keys[TG3_NUM_TEST] = {
301         { "nvram test     (online) " },
302         { "link test      (online) " },
303         { "register test  (offline)" },
304         { "memory test    (offline)" },
305         { "loopback test  (offline)" },
306         { "interrupt test (offline)" },
307 };
308
309 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
310 {
311         writel(val, tp->regs + off);
312 }
313
314 static u32 tg3_read32(struct tg3 *tp, u32 off)
315 {
316         return (readl(tp->regs + off));
317 }
318
319 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
320 {
321         unsigned long flags;
322
323         spin_lock_irqsave(&tp->indirect_lock, flags);
324         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
325         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
326         spin_unlock_irqrestore(&tp->indirect_lock, flags);
327 }
328
329 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
330 {
331         writel(val, tp->regs + off);
332         readl(tp->regs + off);
333 }
334
335 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
336 {
337         unsigned long flags;
338         u32 val;
339
340         spin_lock_irqsave(&tp->indirect_lock, flags);
341         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
342         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
343         spin_unlock_irqrestore(&tp->indirect_lock, flags);
344         return val;
345 }
346
347 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
348 {
349         unsigned long flags;
350
351         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
352                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
353                                        TG3_64BIT_REG_LOW, val);
354                 return;
355         }
356         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
357                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
358                                        TG3_64BIT_REG_LOW, val);
359                 return;
360         }
361
362         spin_lock_irqsave(&tp->indirect_lock, flags);
363         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
364         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
365         spin_unlock_irqrestore(&tp->indirect_lock, flags);
366
367         /* In indirect mode when disabling interrupts, we also need
368          * to clear the interrupt bit in the GRC local ctrl register.
369          */
370         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
371             (val == 0x1)) {
372                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
373                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
374         }
375 }
376
377 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
378 {
379         unsigned long flags;
380         u32 val;
381
382         spin_lock_irqsave(&tp->indirect_lock, flags);
383         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
384         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
385         spin_unlock_irqrestore(&tp->indirect_lock, flags);
386         return val;
387 }
388
389 /* usec_wait specifies the wait time in usec when writing to certain registers
390  * where it is unsafe to read back the register without some delay.
391  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
392  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
393  */
394 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
395 {
396         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
397             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
398                 /* Non-posted methods */
399                 tp->write32(tp, off, val);
400         else {
401                 /* Posted method */
402                 tg3_write32(tp, off, val);
403                 if (usec_wait)
404                         udelay(usec_wait);
405                 tp->read32(tp, off);
406         }
407         /* Wait again after the read for the posted method to guarantee that
408          * the wait time is met.
409          */
410         if (usec_wait)
411                 udelay(usec_wait);
412 }
413
414 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
415 {
416         tp->write32_mbox(tp, off, val);
417         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
418             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
419                 tp->read32_mbox(tp, off);
420 }
421
422 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
423 {
424         void __iomem *mbox = tp->regs + off;
425         writel(val, mbox);
426         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
427                 writel(val, mbox);
428         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
429                 readl(mbox);
430 }
431
432 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
433 {
434         return (readl(tp->regs + off + GRCMBOX_BASE));
435 }
436
437 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
438 {
439         writel(val, tp->regs + off + GRCMBOX_BASE);
440 }
441
442 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
443 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
444 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
445 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
446 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
447
448 #define tw32(reg,val)           tp->write32(tp, reg, val)
449 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
450 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
451 #define tr32(reg)               tp->read32(tp, reg)
452
453 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
454 {
455         unsigned long flags;
456
457         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
458             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
459                 return;
460
461         spin_lock_irqsave(&tp->indirect_lock, flags);
462         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
463                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
464                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
465
466                 /* Always leave this as zero. */
467                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
468         } else {
469                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
470                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
471
472                 /* Always leave this as zero. */
473                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
474         }
475         spin_unlock_irqrestore(&tp->indirect_lock, flags);
476 }
477
478 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
479 {
480         unsigned long flags;
481
482         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
483             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
484                 *val = 0;
485                 return;
486         }
487
488         spin_lock_irqsave(&tp->indirect_lock, flags);
489         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
490                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
491                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
492
493                 /* Always leave this as zero. */
494                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
495         } else {
496                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
497                 *val = tr32(TG3PCI_MEM_WIN_DATA);
498
499                 /* Always leave this as zero. */
500                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
501         }
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503 }
504
505 static void tg3_disable_ints(struct tg3 *tp)
506 {
507         tw32(TG3PCI_MISC_HOST_CTRL,
508              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
509         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
510 }
511
512 static inline void tg3_cond_int(struct tg3 *tp)
513 {
514         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
515             (tp->hw_status->status & SD_STATUS_UPDATED))
516                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
517         else
518                 tw32(HOSTCC_MODE, tp->coalesce_mode |
519                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
520 }
521
522 static void tg3_enable_ints(struct tg3 *tp)
523 {
524         tp->irq_sync = 0;
525         wmb();
526
527         tw32(TG3PCI_MISC_HOST_CTRL,
528              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
529         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
530                        (tp->last_tag << 24));
531         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
532                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
533                                (tp->last_tag << 24));
534         tg3_cond_int(tp);
535 }
536
537 static inline unsigned int tg3_has_work(struct tg3 *tp)
538 {
539         struct tg3_hw_status *sblk = tp->hw_status;
540         unsigned int work_exists = 0;
541
542         /* check for phy events */
543         if (!(tp->tg3_flags &
544               (TG3_FLAG_USE_LINKCHG_REG |
545                TG3_FLAG_POLL_SERDES))) {
546                 if (sblk->status & SD_STATUS_LINK_CHG)
547                         work_exists = 1;
548         }
549         /* check for RX/TX work to do */
550         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
551             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
552                 work_exists = 1;
553
554         return work_exists;
555 }
556
557 /* tg3_restart_ints
558  *  similar to tg3_enable_ints, but it accurately determines whether there
559  *  is new work pending and can return without flushing the PIO write
560  *  which reenables interrupts
561  */
562 static void tg3_restart_ints(struct tg3 *tp)
563 {
564         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
565                      tp->last_tag << 24);
566         mmiowb();
567
568         /* When doing tagged status, this work check is unnecessary.
569          * The last_tag we write above tells the chip which piece of
570          * work we've completed.
571          */
572         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
573             tg3_has_work(tp))
574                 tw32(HOSTCC_MODE, tp->coalesce_mode |
575                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
576 }
577
578 static inline void tg3_netif_stop(struct tg3 *tp)
579 {
580         tp->dev->trans_start = jiffies; /* prevent tx timeout */
581         netif_poll_disable(tp->dev);
582         netif_tx_disable(tp->dev);
583 }
584
585 static inline void tg3_netif_start(struct tg3 *tp)
586 {
587         netif_wake_queue(tp->dev);
588         /* NOTE: unconditional netif_wake_queue is only appropriate
589          * so long as all callers are assured to have free tx slots
590          * (such as after tg3_init_hw)
591          */
592         netif_poll_enable(tp->dev);
593         tp->hw_status->status |= SD_STATUS_UPDATED;
594         tg3_enable_ints(tp);
595 }
596
597 static void tg3_switch_clocks(struct tg3 *tp)
598 {
599         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
600         u32 orig_clock_ctrl;
601
602         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
603                 return;
604
605         orig_clock_ctrl = clock_ctrl;
606         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
607                        CLOCK_CTRL_CLKRUN_OENABLE |
608                        0x1f);
609         tp->pci_clock_ctrl = clock_ctrl;
610
611         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
612                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
613                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
614                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
615                 }
616         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
617                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
618                             clock_ctrl |
619                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
620                             40);
621                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
622                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
623                             40);
624         }
625         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
626 }
627
628 #define PHY_BUSY_LOOPS  5000
629
630 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
631 {
632         u32 frame_val;
633         unsigned int loops;
634         int ret;
635
636         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
637                 tw32_f(MAC_MI_MODE,
638                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
639                 udelay(80);
640         }
641
642         *val = 0x0;
643
644         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
645                       MI_COM_PHY_ADDR_MASK);
646         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
647                       MI_COM_REG_ADDR_MASK);
648         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
649
650         tw32_f(MAC_MI_COM, frame_val);
651
652         loops = PHY_BUSY_LOOPS;
653         while (loops != 0) {
654                 udelay(10);
655                 frame_val = tr32(MAC_MI_COM);
656
657                 if ((frame_val & MI_COM_BUSY) == 0) {
658                         udelay(5);
659                         frame_val = tr32(MAC_MI_COM);
660                         break;
661                 }
662                 loops -= 1;
663         }
664
665         ret = -EBUSY;
666         if (loops != 0) {
667                 *val = frame_val & MI_COM_DATA_MASK;
668                 ret = 0;
669         }
670
671         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
672                 tw32_f(MAC_MI_MODE, tp->mi_mode);
673                 udelay(80);
674         }
675
676         return ret;
677 }
678
679 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
680 {
681         u32 frame_val;
682         unsigned int loops;
683         int ret;
684
685         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
686             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
687                 return 0;
688
689         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
690                 tw32_f(MAC_MI_MODE,
691                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
692                 udelay(80);
693         }
694
695         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
696                       MI_COM_PHY_ADDR_MASK);
697         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
698                       MI_COM_REG_ADDR_MASK);
699         frame_val |= (val & MI_COM_DATA_MASK);
700         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
701
702         tw32_f(MAC_MI_COM, frame_val);
703
704         loops = PHY_BUSY_LOOPS;
705         while (loops != 0) {
706                 udelay(10);
707                 frame_val = tr32(MAC_MI_COM);
708                 if ((frame_val & MI_COM_BUSY) == 0) {
709                         udelay(5);
710                         frame_val = tr32(MAC_MI_COM);
711                         break;
712                 }
713                 loops -= 1;
714         }
715
716         ret = -EBUSY;
717         if (loops != 0)
718                 ret = 0;
719
720         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
721                 tw32_f(MAC_MI_MODE, tp->mi_mode);
722                 udelay(80);
723         }
724
725         return ret;
726 }
727
728 static void tg3_phy_set_wirespeed(struct tg3 *tp)
729 {
730         u32 val;
731
732         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
733                 return;
734
735         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
736             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
737                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
738                              (val | (1 << 15) | (1 << 4)));
739 }
740
741 static int tg3_bmcr_reset(struct tg3 *tp)
742 {
743         u32 phy_control;
744         int limit, err;
745
746         /* OK, reset it, and poll the BMCR_RESET bit until it
747          * clears or we time out.
748          */
749         phy_control = BMCR_RESET;
750         err = tg3_writephy(tp, MII_BMCR, phy_control);
751         if (err != 0)
752                 return -EBUSY;
753
754         limit = 5000;
755         while (limit--) {
756                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
757                 if (err != 0)
758                         return -EBUSY;
759
760                 if ((phy_control & BMCR_RESET) == 0) {
761                         udelay(40);
762                         break;
763                 }
764                 udelay(10);
765         }
766         if (limit <= 0)
767                 return -EBUSY;
768
769         return 0;
770 }
771
772 static int tg3_wait_macro_done(struct tg3 *tp)
773 {
774         int limit = 100;
775
776         while (limit--) {
777                 u32 tmp32;
778
779                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
780                         if ((tmp32 & 0x1000) == 0)
781                                 break;
782                 }
783         }
784         if (limit <= 0)
785                 return -EBUSY;
786
787         return 0;
788 }
789
790 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
791 {
792         static const u32 test_pat[4][6] = {
793         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
794         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
795         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
796         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
797         };
798         int chan;
799
800         for (chan = 0; chan < 4; chan++) {
801                 int i;
802
803                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
804                              (chan * 0x2000) | 0x0200);
805                 tg3_writephy(tp, 0x16, 0x0002);
806
807                 for (i = 0; i < 6; i++)
808                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
809                                      test_pat[chan][i]);
810
811                 tg3_writephy(tp, 0x16, 0x0202);
812                 if (tg3_wait_macro_done(tp)) {
813                         *resetp = 1;
814                         return -EBUSY;
815                 }
816
817                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
818                              (chan * 0x2000) | 0x0200);
819                 tg3_writephy(tp, 0x16, 0x0082);
820                 if (tg3_wait_macro_done(tp)) {
821                         *resetp = 1;
822                         return -EBUSY;
823                 }
824
825                 tg3_writephy(tp, 0x16, 0x0802);
826                 if (tg3_wait_macro_done(tp)) {
827                         *resetp = 1;
828                         return -EBUSY;
829                 }
830
831                 for (i = 0; i < 6; i += 2) {
832                         u32 low, high;
833
834                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
835                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
836                             tg3_wait_macro_done(tp)) {
837                                 *resetp = 1;
838                                 return -EBUSY;
839                         }
840                         low &= 0x7fff;
841                         high &= 0x000f;
842                         if (low != test_pat[chan][i] ||
843                             high != test_pat[chan][i+1]) {
844                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
845                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
846                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
847
848                                 return -EBUSY;
849                         }
850                 }
851         }
852
853         return 0;
854 }
855
856 static int tg3_phy_reset_chanpat(struct tg3 *tp)
857 {
858         int chan;
859
860         for (chan = 0; chan < 4; chan++) {
861                 int i;
862
863                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
864                              (chan * 0x2000) | 0x0200);
865                 tg3_writephy(tp, 0x16, 0x0002);
866                 for (i = 0; i < 6; i++)
867                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
868                 tg3_writephy(tp, 0x16, 0x0202);
869                 if (tg3_wait_macro_done(tp))
870                         return -EBUSY;
871         }
872
873         return 0;
874 }
875
876 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
877 {
878         u32 reg32, phy9_orig;
879         int retries, do_phy_reset, err;
880
881         retries = 10;
882         do_phy_reset = 1;
883         do {
884                 if (do_phy_reset) {
885                         err = tg3_bmcr_reset(tp);
886                         if (err)
887                                 return err;
888                         do_phy_reset = 0;
889                 }
890
891                 /* Disable transmitter and interrupt.  */
892                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
893                         continue;
894
895                 reg32 |= 0x3000;
896                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
897
898                 /* Set full-duplex, 1000 mbps.  */
899                 tg3_writephy(tp, MII_BMCR,
900                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
901
902                 /* Set to master mode.  */
903                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
904                         continue;
905
906                 tg3_writephy(tp, MII_TG3_CTRL,
907                              (MII_TG3_CTRL_AS_MASTER |
908                               MII_TG3_CTRL_ENABLE_AS_MASTER));
909
910                 /* Enable SM_DSP_CLOCK and 6dB.  */
911                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
912
913                 /* Block the PHY control access.  */
914                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
915                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
916
917                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
918                 if (!err)
919                         break;
920         } while (--retries);
921
922         err = tg3_phy_reset_chanpat(tp);
923         if (err)
924                 return err;
925
926         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
927         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
928
929         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
930         tg3_writephy(tp, 0x16, 0x0000);
931
932         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
933             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
934                 /* Set Extended packet length bit for jumbo frames */
935                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
936         }
937         else {
938                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
939         }
940
941         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
942
943         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
944                 reg32 &= ~0x3000;
945                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
946         } else if (!err)
947                 err = -EBUSY;
948
949         return err;
950 }
951
952 static void tg3_link_report(struct tg3 *);
953
954 /* This will reset the tigon3 PHY if there is no valid
955  * link unless the FORCE argument is non-zero.
956  */
957 static int tg3_phy_reset(struct tg3 *tp)
958 {
959         u32 phy_status;
960         int err;
961
962         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
963         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
964         if (err != 0)
965                 return -EBUSY;
966
967         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
968                 netif_carrier_off(tp->dev);
969                 tg3_link_report(tp);
970         }
971
972         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
973             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
974             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
975                 err = tg3_phy_reset_5703_4_5(tp);
976                 if (err)
977                         return err;
978                 goto out;
979         }
980
981         err = tg3_bmcr_reset(tp);
982         if (err)
983                 return err;
984
985 out:
986         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
987                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
988                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
989                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
990                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
991                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
992                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
993         }
994         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
995                 tg3_writephy(tp, 0x1c, 0x8d68);
996                 tg3_writephy(tp, 0x1c, 0x8d68);
997         }
998         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
999                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1000                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1001                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1002                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1003                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1004                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1005                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1006                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1007         }
1008         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1009                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1010                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1011                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1012                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1013         }
1014         /* Set Extended packet length bit (bit 14) on all chips that */
1015         /* support jumbo frames */
1016         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1017                 /* Cannot do read-modify-write on 5401 */
1018                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1019         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1020                 u32 phy_reg;
1021
1022                 /* Set bit 14 with read-modify-write to preserve other bits */
1023                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1024                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1025                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1026         }
1027
1028         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1029          * jumbo frames transmission.
1030          */
1031         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1032                 u32 phy_reg;
1033
1034                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1035                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1036                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1037         }
1038
1039         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1040                 u32 phy_reg;
1041
1042                 /* adjust output voltage */
1043                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1044
1045                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phy_reg)) {
1046                         u32 phy_reg2;
1047
1048                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1049                                      phy_reg | MII_TG3_EPHY_SHADOW_EN);
1050                         /* Enable auto-MDIX */
1051                         if (!tg3_readphy(tp, 0x10, &phy_reg2))
1052                                 tg3_writephy(tp, 0x10, phy_reg2 | 0x4000);
1053                         tg3_writephy(tp, MII_TG3_EPHY_TEST, phy_reg);
1054                 }
1055         }
1056
1057         tg3_phy_set_wirespeed(tp);
1058         return 0;
1059 }
1060
1061 static void tg3_frob_aux_power(struct tg3 *tp)
1062 {
1063         struct tg3 *tp_peer = tp;
1064
1065         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1066                 return;
1067
1068         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1069             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1070                 struct net_device *dev_peer;
1071
1072                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1073                 /* remove_one() may have been run on the peer. */
1074                 if (!dev_peer)
1075                         tp_peer = tp;
1076                 else
1077                         tp_peer = netdev_priv(dev_peer);
1078         }
1079
1080         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1081             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1082             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1083             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1084                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1085                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1086                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1087                                     (GRC_LCLCTRL_GPIO_OE0 |
1088                                      GRC_LCLCTRL_GPIO_OE1 |
1089                                      GRC_LCLCTRL_GPIO_OE2 |
1090                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1091                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1092                                     100);
1093                 } else {
1094                         u32 no_gpio2;
1095                         u32 grc_local_ctrl = 0;
1096
1097                         if (tp_peer != tp &&
1098                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1099                                 return;
1100
1101                         /* Workaround to prevent overdrawing Amps. */
1102                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1103                             ASIC_REV_5714) {
1104                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1105                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1106                                             grc_local_ctrl, 100);
1107                         }
1108
1109                         /* On 5753 and variants, GPIO2 cannot be used. */
1110                         no_gpio2 = tp->nic_sram_data_cfg &
1111                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1112
1113                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1114                                          GRC_LCLCTRL_GPIO_OE1 |
1115                                          GRC_LCLCTRL_GPIO_OE2 |
1116                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1117                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1118                         if (no_gpio2) {
1119                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1120                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1121                         }
1122                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1123                                                     grc_local_ctrl, 100);
1124
1125                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1126
1127                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1128                                                     grc_local_ctrl, 100);
1129
1130                         if (!no_gpio2) {
1131                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1132                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1133                                             grc_local_ctrl, 100);
1134                         }
1135                 }
1136         } else {
1137                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1138                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1139                         if (tp_peer != tp &&
1140                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1141                                 return;
1142
1143                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1144                                     (GRC_LCLCTRL_GPIO_OE1 |
1145                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1146
1147                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1148                                     GRC_LCLCTRL_GPIO_OE1, 100);
1149
1150                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1151                                     (GRC_LCLCTRL_GPIO_OE1 |
1152                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1153                 }
1154         }
1155 }
1156
1157 static int tg3_setup_phy(struct tg3 *, int);
1158
1159 #define RESET_KIND_SHUTDOWN     0
1160 #define RESET_KIND_INIT         1
1161 #define RESET_KIND_SUSPEND      2
1162
1163 static void tg3_write_sig_post_reset(struct tg3 *, int);
1164 static int tg3_halt_cpu(struct tg3 *, u32);
1165 static int tg3_nvram_lock(struct tg3 *);
1166 static void tg3_nvram_unlock(struct tg3 *);
1167
1168 static void tg3_power_down_phy(struct tg3 *tp)
1169 {
1170         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
1171                 return;
1172
1173         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
1174                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1175                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1176                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1177         }
1178
1179         /* The PHY should not be powered down on some chips because
1180          * of bugs.
1181          */
1182         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1183             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1184             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1185              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1186                 return;
1187         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1188 }
1189
1190 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1191 {
1192         u32 misc_host_ctrl;
1193         u16 power_control, power_caps;
1194         int pm = tp->pm_cap;
1195
1196         /* Make sure register accesses (indirect or otherwise)
1197          * will function correctly.
1198          */
1199         pci_write_config_dword(tp->pdev,
1200                                TG3PCI_MISC_HOST_CTRL,
1201                                tp->misc_host_ctrl);
1202
1203         pci_read_config_word(tp->pdev,
1204                              pm + PCI_PM_CTRL,
1205                              &power_control);
1206         power_control |= PCI_PM_CTRL_PME_STATUS;
1207         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1208         switch (state) {
1209         case PCI_D0:
1210                 power_control |= 0;
1211                 pci_write_config_word(tp->pdev,
1212                                       pm + PCI_PM_CTRL,
1213                                       power_control);
1214                 udelay(100);    /* Delay after power state change */
1215
1216                 /* Switch out of Vaux if it is not a LOM */
1217                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1218                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1219
1220                 return 0;
1221
1222         case PCI_D1:
1223                 power_control |= 1;
1224                 break;
1225
1226         case PCI_D2:
1227                 power_control |= 2;
1228                 break;
1229
1230         case PCI_D3hot:
1231                 power_control |= 3;
1232                 break;
1233
1234         default:
1235                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1236                        "requested.\n",
1237                        tp->dev->name, state);
1238                 return -EINVAL;
1239         };
1240
1241         power_control |= PCI_PM_CTRL_PME_ENABLE;
1242
1243         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1244         tw32(TG3PCI_MISC_HOST_CTRL,
1245              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1246
1247         if (tp->link_config.phy_is_low_power == 0) {
1248                 tp->link_config.phy_is_low_power = 1;
1249                 tp->link_config.orig_speed = tp->link_config.speed;
1250                 tp->link_config.orig_duplex = tp->link_config.duplex;
1251                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1252         }
1253
1254         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1255                 tp->link_config.speed = SPEED_10;
1256                 tp->link_config.duplex = DUPLEX_HALF;
1257                 tp->link_config.autoneg = AUTONEG_ENABLE;
1258                 tg3_setup_phy(tp, 0);
1259         }
1260
1261         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1262                 u32 val;
1263
1264                 val = tr32(GRC_VCPU_EXT_CTRL);
1265                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1266         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1267                 int i;
1268                 u32 val;
1269
1270                 for (i = 0; i < 200; i++) {
1271                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1272                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1273                                 break;
1274                         msleep(1);
1275                 }
1276         }
1277         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1278                                              WOL_DRV_STATE_SHUTDOWN |
1279                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1280
1281         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1282
1283         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1284                 u32 mac_mode;
1285
1286                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1287                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1288                         udelay(40);
1289
1290                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1291                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1292                         else
1293                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1294
1295                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1296                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1297                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1298                 } else {
1299                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1300                 }
1301
1302                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1303                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1304
1305                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1306                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1307                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1308
1309                 tw32_f(MAC_MODE, mac_mode);
1310                 udelay(100);
1311
1312                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1313                 udelay(10);
1314         }
1315
1316         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1317             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1318              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1319                 u32 base_val;
1320
1321                 base_val = tp->pci_clock_ctrl;
1322                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1323                              CLOCK_CTRL_TXCLK_DISABLE);
1324
1325                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1326                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1327         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1328                 /* do nothing */
1329         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1330                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1331                 u32 newbits1, newbits2;
1332
1333                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1334                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1335                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1336                                     CLOCK_CTRL_TXCLK_DISABLE |
1337                                     CLOCK_CTRL_ALTCLK);
1338                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1339                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1340                         newbits1 = CLOCK_CTRL_625_CORE;
1341                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1342                 } else {
1343                         newbits1 = CLOCK_CTRL_ALTCLK;
1344                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1345                 }
1346
1347                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1348                             40);
1349
1350                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1351                             40);
1352
1353                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1354                         u32 newbits3;
1355
1356                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1357                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1358                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1359                                             CLOCK_CTRL_TXCLK_DISABLE |
1360                                             CLOCK_CTRL_44MHZ_CORE);
1361                         } else {
1362                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1363                         }
1364
1365                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1366                                     tp->pci_clock_ctrl | newbits3, 40);
1367                 }
1368         }
1369
1370         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1371             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1372                 tg3_power_down_phy(tp);
1373
1374         tg3_frob_aux_power(tp);
1375
1376         /* Workaround for unstable PLL clock */
1377         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1378             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1379                 u32 val = tr32(0x7d00);
1380
1381                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1382                 tw32(0x7d00, val);
1383                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1384                         int err;
1385
1386                         err = tg3_nvram_lock(tp);
1387                         tg3_halt_cpu(tp, RX_CPU_BASE);
1388                         if (!err)
1389                                 tg3_nvram_unlock(tp);
1390                 }
1391         }
1392
1393         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1394
1395         /* Finally, set the new power state. */
1396         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1397         udelay(100);    /* Delay after power state change */
1398
1399         return 0;
1400 }
1401
1402 static void tg3_link_report(struct tg3 *tp)
1403 {
1404         if (!netif_carrier_ok(tp->dev)) {
1405                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1406         } else {
1407                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1408                        tp->dev->name,
1409                        (tp->link_config.active_speed == SPEED_1000 ?
1410                         1000 :
1411                         (tp->link_config.active_speed == SPEED_100 ?
1412                          100 : 10)),
1413                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1414                         "full" : "half"));
1415
1416                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1417                        "%s for RX.\n",
1418                        tp->dev->name,
1419                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1420                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1421         }
1422 }
1423
1424 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1425 {
1426         u32 new_tg3_flags = 0;
1427         u32 old_rx_mode = tp->rx_mode;
1428         u32 old_tx_mode = tp->tx_mode;
1429
1430         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1431
1432                 /* Convert 1000BaseX flow control bits to 1000BaseT
1433                  * bits before resolving flow control.
1434                  */
1435                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1436                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1437                                        ADVERTISE_PAUSE_ASYM);
1438                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1439
1440                         if (local_adv & ADVERTISE_1000XPAUSE)
1441                                 local_adv |= ADVERTISE_PAUSE_CAP;
1442                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1443                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1444                         if (remote_adv & LPA_1000XPAUSE)
1445                                 remote_adv |= LPA_PAUSE_CAP;
1446                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1447                                 remote_adv |= LPA_PAUSE_ASYM;
1448                 }
1449
1450                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1451                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1452                                 if (remote_adv & LPA_PAUSE_CAP)
1453                                         new_tg3_flags |=
1454                                                 (TG3_FLAG_RX_PAUSE |
1455                                                 TG3_FLAG_TX_PAUSE);
1456                                 else if (remote_adv & LPA_PAUSE_ASYM)
1457                                         new_tg3_flags |=
1458                                                 (TG3_FLAG_RX_PAUSE);
1459                         } else {
1460                                 if (remote_adv & LPA_PAUSE_CAP)
1461                                         new_tg3_flags |=
1462                                                 (TG3_FLAG_RX_PAUSE |
1463                                                 TG3_FLAG_TX_PAUSE);
1464                         }
1465                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1466                         if ((remote_adv & LPA_PAUSE_CAP) &&
1467                         (remote_adv & LPA_PAUSE_ASYM))
1468                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1469                 }
1470
1471                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1472                 tp->tg3_flags |= new_tg3_flags;
1473         } else {
1474                 new_tg3_flags = tp->tg3_flags;
1475         }
1476
1477         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1478                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1479         else
1480                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1481
1482         if (old_rx_mode != tp->rx_mode) {
1483                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1484         }
1485
1486         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1487                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1488         else
1489                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1490
1491         if (old_tx_mode != tp->tx_mode) {
1492                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1493         }
1494 }
1495
1496 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1497 {
1498         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1499         case MII_TG3_AUX_STAT_10HALF:
1500                 *speed = SPEED_10;
1501                 *duplex = DUPLEX_HALF;
1502                 break;
1503
1504         case MII_TG3_AUX_STAT_10FULL:
1505                 *speed = SPEED_10;
1506                 *duplex = DUPLEX_FULL;
1507                 break;
1508
1509         case MII_TG3_AUX_STAT_100HALF:
1510                 *speed = SPEED_100;
1511                 *duplex = DUPLEX_HALF;
1512                 break;
1513
1514         case MII_TG3_AUX_STAT_100FULL:
1515                 *speed = SPEED_100;
1516                 *duplex = DUPLEX_FULL;
1517                 break;
1518
1519         case MII_TG3_AUX_STAT_1000HALF:
1520                 *speed = SPEED_1000;
1521                 *duplex = DUPLEX_HALF;
1522                 break;
1523
1524         case MII_TG3_AUX_STAT_1000FULL:
1525                 *speed = SPEED_1000;
1526                 *duplex = DUPLEX_FULL;
1527                 break;
1528
1529         default:
1530                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1531                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1532                                  SPEED_10;
1533                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1534                                   DUPLEX_HALF;
1535                         break;
1536                 }
1537                 *speed = SPEED_INVALID;
1538                 *duplex = DUPLEX_INVALID;
1539                 break;
1540         };
1541 }
1542
1543 static void tg3_phy_copper_begin(struct tg3 *tp)
1544 {
1545         u32 new_adv;
1546         int i;
1547
1548         if (tp->link_config.phy_is_low_power) {
1549                 /* Entering low power mode.  Disable gigabit and
1550                  * 100baseT advertisements.
1551                  */
1552                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1553
1554                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1555                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1556                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1557                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1558
1559                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1560         } else if (tp->link_config.speed == SPEED_INVALID) {
1561                 tp->link_config.advertising =
1562                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1563                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1564                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1565                          ADVERTISED_Autoneg | ADVERTISED_MII);
1566
1567                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1568                         tp->link_config.advertising &=
1569                                 ~(ADVERTISED_1000baseT_Half |
1570                                   ADVERTISED_1000baseT_Full);
1571
1572                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1573                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1574                         new_adv |= ADVERTISE_10HALF;
1575                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1576                         new_adv |= ADVERTISE_10FULL;
1577                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1578                         new_adv |= ADVERTISE_100HALF;
1579                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1580                         new_adv |= ADVERTISE_100FULL;
1581                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1582
1583                 if (tp->link_config.advertising &
1584                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1585                         new_adv = 0;
1586                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1587                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1588                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1589                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1590                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1591                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1592                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1593                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1594                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1595                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1596                 } else {
1597                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1598                 }
1599         } else {
1600                 /* Asking for a specific link mode. */
1601                 if (tp->link_config.speed == SPEED_1000) {
1602                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1603                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1604
1605                         if (tp->link_config.duplex == DUPLEX_FULL)
1606                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1607                         else
1608                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1609                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1610                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1611                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1612                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1613                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1614                 } else {
1615                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1616
1617                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1618                         if (tp->link_config.speed == SPEED_100) {
1619                                 if (tp->link_config.duplex == DUPLEX_FULL)
1620                                         new_adv |= ADVERTISE_100FULL;
1621                                 else
1622                                         new_adv |= ADVERTISE_100HALF;
1623                         } else {
1624                                 if (tp->link_config.duplex == DUPLEX_FULL)
1625                                         new_adv |= ADVERTISE_10FULL;
1626                                 else
1627                                         new_adv |= ADVERTISE_10HALF;
1628                         }
1629                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1630                 }
1631         }
1632
1633         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1634             tp->link_config.speed != SPEED_INVALID) {
1635                 u32 bmcr, orig_bmcr;
1636
1637                 tp->link_config.active_speed = tp->link_config.speed;
1638                 tp->link_config.active_duplex = tp->link_config.duplex;
1639
1640                 bmcr = 0;
1641                 switch (tp->link_config.speed) {
1642                 default:
1643                 case SPEED_10:
1644                         break;
1645
1646                 case SPEED_100:
1647                         bmcr |= BMCR_SPEED100;
1648                         break;
1649
1650                 case SPEED_1000:
1651                         bmcr |= TG3_BMCR_SPEED1000;
1652                         break;
1653                 };
1654
1655                 if (tp->link_config.duplex == DUPLEX_FULL)
1656                         bmcr |= BMCR_FULLDPLX;
1657
1658                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1659                     (bmcr != orig_bmcr)) {
1660                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1661                         for (i = 0; i < 1500; i++) {
1662                                 u32 tmp;
1663
1664                                 udelay(10);
1665                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1666                                     tg3_readphy(tp, MII_BMSR, &tmp))
1667                                         continue;
1668                                 if (!(tmp & BMSR_LSTATUS)) {
1669                                         udelay(40);
1670                                         break;
1671                                 }
1672                         }
1673                         tg3_writephy(tp, MII_BMCR, bmcr);
1674                         udelay(40);
1675                 }
1676         } else {
1677                 tg3_writephy(tp, MII_BMCR,
1678                              BMCR_ANENABLE | BMCR_ANRESTART);
1679         }
1680 }
1681
1682 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1683 {
1684         int err;
1685
1686         /* Turn off tap power management. */
1687         /* Set Extended packet length bit */
1688         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1689
1690         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1691         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1692
1693         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1694         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1695
1696         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1697         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1698
1699         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1700         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1701
1702         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1703         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1704
1705         udelay(40);
1706
1707         return err;
1708 }
1709
1710 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1711 {
1712         u32 adv_reg, all_mask;
1713
1714         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1715                 return 0;
1716
1717         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1718                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1719         if ((adv_reg & all_mask) != all_mask)
1720                 return 0;
1721         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1722                 u32 tg3_ctrl;
1723
1724                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1725                         return 0;
1726
1727                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1728                             MII_TG3_CTRL_ADV_1000_FULL);
1729                 if ((tg3_ctrl & all_mask) != all_mask)
1730                         return 0;
1731         }
1732         return 1;
1733 }
1734
1735 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1736 {
1737         int current_link_up;
1738         u32 bmsr, dummy;
1739         u16 current_speed;
1740         u8 current_duplex;
1741         int i, err;
1742
1743         tw32(MAC_EVENT, 0);
1744
1745         tw32_f(MAC_STATUS,
1746              (MAC_STATUS_SYNC_CHANGED |
1747               MAC_STATUS_CFG_CHANGED |
1748               MAC_STATUS_MI_COMPLETION |
1749               MAC_STATUS_LNKSTATE_CHANGED));
1750         udelay(40);
1751
1752         tp->mi_mode = MAC_MI_MODE_BASE;
1753         tw32_f(MAC_MI_MODE, tp->mi_mode);
1754         udelay(80);
1755
1756         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1757
1758         /* Some third-party PHYs need to be reset on link going
1759          * down.
1760          */
1761         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1762              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1763              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1764             netif_carrier_ok(tp->dev)) {
1765                 tg3_readphy(tp, MII_BMSR, &bmsr);
1766                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1767                     !(bmsr & BMSR_LSTATUS))
1768                         force_reset = 1;
1769         }
1770         if (force_reset)
1771                 tg3_phy_reset(tp);
1772
1773         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1774                 tg3_readphy(tp, MII_BMSR, &bmsr);
1775                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1776                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1777                         bmsr = 0;
1778
1779                 if (!(bmsr & BMSR_LSTATUS)) {
1780                         err = tg3_init_5401phy_dsp(tp);
1781                         if (err)
1782                                 return err;
1783
1784                         tg3_readphy(tp, MII_BMSR, &bmsr);
1785                         for (i = 0; i < 1000; i++) {
1786                                 udelay(10);
1787                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1788                                     (bmsr & BMSR_LSTATUS)) {
1789                                         udelay(40);
1790                                         break;
1791                                 }
1792                         }
1793
1794                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1795                             !(bmsr & BMSR_LSTATUS) &&
1796                             tp->link_config.active_speed == SPEED_1000) {
1797                                 err = tg3_phy_reset(tp);
1798                                 if (!err)
1799                                         err = tg3_init_5401phy_dsp(tp);
1800                                 if (err)
1801                                         return err;
1802                         }
1803                 }
1804         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1805                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1806                 /* 5701 {A0,B0} CRC bug workaround */
1807                 tg3_writephy(tp, 0x15, 0x0a75);
1808                 tg3_writephy(tp, 0x1c, 0x8c68);
1809                 tg3_writephy(tp, 0x1c, 0x8d68);
1810                 tg3_writephy(tp, 0x1c, 0x8c68);
1811         }
1812
1813         /* Clear pending interrupts... */
1814         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1815         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1816
1817         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1818                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1819         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1820                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1821
1822         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1823             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1824                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1825                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1826                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1827                 else
1828                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1829         }
1830
1831         current_link_up = 0;
1832         current_speed = SPEED_INVALID;
1833         current_duplex = DUPLEX_INVALID;
1834
1835         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1836                 u32 val;
1837
1838                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1839                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1840                 if (!(val & (1 << 10))) {
1841                         val |= (1 << 10);
1842                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1843                         goto relink;
1844                 }
1845         }
1846
1847         bmsr = 0;
1848         for (i = 0; i < 100; i++) {
1849                 tg3_readphy(tp, MII_BMSR, &bmsr);
1850                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1851                     (bmsr & BMSR_LSTATUS))
1852                         break;
1853                 udelay(40);
1854         }
1855
1856         if (bmsr & BMSR_LSTATUS) {
1857                 u32 aux_stat, bmcr;
1858
1859                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1860                 for (i = 0; i < 2000; i++) {
1861                         udelay(10);
1862                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1863                             aux_stat)
1864                                 break;
1865                 }
1866
1867                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1868                                              &current_speed,
1869                                              &current_duplex);
1870
1871                 bmcr = 0;
1872                 for (i = 0; i < 200; i++) {
1873                         tg3_readphy(tp, MII_BMCR, &bmcr);
1874                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1875                                 continue;
1876                         if (bmcr && bmcr != 0x7fff)
1877                                 break;
1878                         udelay(10);
1879                 }
1880
1881                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1882                         if (bmcr & BMCR_ANENABLE) {
1883                                 current_link_up = 1;
1884
1885                                 /* Force autoneg restart if we are exiting
1886                                  * low power mode.
1887                                  */
1888                                 if (!tg3_copper_is_advertising_all(tp))
1889                                         current_link_up = 0;
1890                         } else {
1891                                 current_link_up = 0;
1892                         }
1893                 } else {
1894                         if (!(bmcr & BMCR_ANENABLE) &&
1895                             tp->link_config.speed == current_speed &&
1896                             tp->link_config.duplex == current_duplex) {
1897                                 current_link_up = 1;
1898                         } else {
1899                                 current_link_up = 0;
1900                         }
1901                 }
1902
1903                 tp->link_config.active_speed = current_speed;
1904                 tp->link_config.active_duplex = current_duplex;
1905         }
1906
1907         if (current_link_up == 1 &&
1908             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1909             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1910                 u32 local_adv, remote_adv;
1911
1912                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1913                         local_adv = 0;
1914                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1915
1916                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1917                         remote_adv = 0;
1918
1919                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1920
1921                 /* If we are not advertising full pause capability,
1922                  * something is wrong.  Bring the link down and reconfigure.
1923                  */
1924                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1925                         current_link_up = 0;
1926                 } else {
1927                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1928                 }
1929         }
1930 relink:
1931         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1932                 u32 tmp;
1933
1934                 tg3_phy_copper_begin(tp);
1935
1936                 tg3_readphy(tp, MII_BMSR, &tmp);
1937                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1938                     (tmp & BMSR_LSTATUS))
1939                         current_link_up = 1;
1940         }
1941
1942         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1943         if (current_link_up == 1) {
1944                 if (tp->link_config.active_speed == SPEED_100 ||
1945                     tp->link_config.active_speed == SPEED_10)
1946                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1947                 else
1948                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1949         } else
1950                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1951
1952         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1953         if (tp->link_config.active_duplex == DUPLEX_HALF)
1954                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1955
1956         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1957         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1958                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1959                     (current_link_up == 1 &&
1960                      tp->link_config.active_speed == SPEED_10))
1961                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1962         } else {
1963                 if (current_link_up == 1)
1964                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1965         }
1966
1967         /* ??? Without this setting Netgear GA302T PHY does not
1968          * ??? send/receive packets...
1969          */
1970         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1971             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1972                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1973                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1974                 udelay(80);
1975         }
1976
1977         tw32_f(MAC_MODE, tp->mac_mode);
1978         udelay(40);
1979
1980         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1981                 /* Polled via timer. */
1982                 tw32_f(MAC_EVENT, 0);
1983         } else {
1984                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1985         }
1986         udelay(40);
1987
1988         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1989             current_link_up == 1 &&
1990             tp->link_config.active_speed == SPEED_1000 &&
1991             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1992              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1993                 udelay(120);
1994                 tw32_f(MAC_STATUS,
1995                      (MAC_STATUS_SYNC_CHANGED |
1996                       MAC_STATUS_CFG_CHANGED));
1997                 udelay(40);
1998                 tg3_write_mem(tp,
1999                               NIC_SRAM_FIRMWARE_MBOX,
2000                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2001         }
2002
2003         if (current_link_up != netif_carrier_ok(tp->dev)) {
2004                 if (current_link_up)
2005                         netif_carrier_on(tp->dev);
2006                 else
2007                         netif_carrier_off(tp->dev);
2008                 tg3_link_report(tp);
2009         }
2010
2011         return 0;
2012 }
2013
2014 struct tg3_fiber_aneginfo {
2015         int state;
2016 #define ANEG_STATE_UNKNOWN              0
2017 #define ANEG_STATE_AN_ENABLE            1
2018 #define ANEG_STATE_RESTART_INIT         2
2019 #define ANEG_STATE_RESTART              3
2020 #define ANEG_STATE_DISABLE_LINK_OK      4
2021 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2022 #define ANEG_STATE_ABILITY_DETECT       6
2023 #define ANEG_STATE_ACK_DETECT_INIT      7
2024 #define ANEG_STATE_ACK_DETECT           8
2025 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2026 #define ANEG_STATE_COMPLETE_ACK         10
2027 #define ANEG_STATE_IDLE_DETECT_INIT     11
2028 #define ANEG_STATE_IDLE_DETECT          12
2029 #define ANEG_STATE_LINK_OK              13
2030 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2031 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2032
2033         u32 flags;
2034 #define MR_AN_ENABLE            0x00000001
2035 #define MR_RESTART_AN           0x00000002
2036 #define MR_AN_COMPLETE          0x00000004
2037 #define MR_PAGE_RX              0x00000008
2038 #define MR_NP_LOADED            0x00000010
2039 #define MR_TOGGLE_TX            0x00000020
2040 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2041 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2042 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2043 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2044 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2045 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2046 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2047 #define MR_TOGGLE_RX            0x00002000
2048 #define MR_NP_RX                0x00004000
2049
2050 #define MR_LINK_OK              0x80000000
2051
2052         unsigned long link_time, cur_time;
2053
2054         u32 ability_match_cfg;
2055         int ability_match_count;
2056
2057         char ability_match, idle_match, ack_match;
2058
2059         u32 txconfig, rxconfig;
2060 #define ANEG_CFG_NP             0x00000080
2061 #define ANEG_CFG_ACK            0x00000040
2062 #define ANEG_CFG_RF2            0x00000020
2063 #define ANEG_CFG_RF1            0x00000010
2064 #define ANEG_CFG_PS2            0x00000001
2065 #define ANEG_CFG_PS1            0x00008000
2066 #define ANEG_CFG_HD             0x00004000
2067 #define ANEG_CFG_FD             0x00002000
2068 #define ANEG_CFG_INVAL          0x00001f06
2069
2070 };
2071 #define ANEG_OK         0
2072 #define ANEG_DONE       1
2073 #define ANEG_TIMER_ENAB 2
2074 #define ANEG_FAILED     -1
2075
2076 #define ANEG_STATE_SETTLE_TIME  10000
2077
2078 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2079                                    struct tg3_fiber_aneginfo *ap)
2080 {
2081         unsigned long delta;
2082         u32 rx_cfg_reg;
2083         int ret;
2084
2085         if (ap->state == ANEG_STATE_UNKNOWN) {
2086                 ap->rxconfig = 0;
2087                 ap->link_time = 0;
2088                 ap->cur_time = 0;
2089                 ap->ability_match_cfg = 0;
2090                 ap->ability_match_count = 0;
2091                 ap->ability_match = 0;
2092                 ap->idle_match = 0;
2093                 ap->ack_match = 0;
2094         }
2095         ap->cur_time++;
2096
2097         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2098                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2099
2100                 if (rx_cfg_reg != ap->ability_match_cfg) {
2101                         ap->ability_match_cfg = rx_cfg_reg;
2102                         ap->ability_match = 0;
2103                         ap->ability_match_count = 0;
2104                 } else {
2105                         if (++ap->ability_match_count > 1) {
2106                                 ap->ability_match = 1;
2107                                 ap->ability_match_cfg = rx_cfg_reg;
2108                         }
2109                 }
2110                 if (rx_cfg_reg & ANEG_CFG_ACK)
2111                         ap->ack_match = 1;
2112                 else
2113                         ap->ack_match = 0;
2114
2115                 ap->idle_match = 0;
2116         } else {
2117                 ap->idle_match = 1;
2118                 ap->ability_match_cfg = 0;
2119                 ap->ability_match_count = 0;
2120                 ap->ability_match = 0;
2121                 ap->ack_match = 0;
2122
2123                 rx_cfg_reg = 0;
2124         }
2125
2126         ap->rxconfig = rx_cfg_reg;
2127         ret = ANEG_OK;
2128
2129         switch(ap->state) {
2130         case ANEG_STATE_UNKNOWN:
2131                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2132                         ap->state = ANEG_STATE_AN_ENABLE;
2133
2134                 /* fallthru */
2135         case ANEG_STATE_AN_ENABLE:
2136                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2137                 if (ap->flags & MR_AN_ENABLE) {
2138                         ap->link_time = 0;
2139                         ap->cur_time = 0;
2140                         ap->ability_match_cfg = 0;
2141                         ap->ability_match_count = 0;
2142                         ap->ability_match = 0;
2143                         ap->idle_match = 0;
2144                         ap->ack_match = 0;
2145
2146                         ap->state = ANEG_STATE_RESTART_INIT;
2147                 } else {
2148                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2149                 }
2150                 break;
2151
2152         case ANEG_STATE_RESTART_INIT:
2153                 ap->link_time = ap->cur_time;
2154                 ap->flags &= ~(MR_NP_LOADED);
2155                 ap->txconfig = 0;
2156                 tw32(MAC_TX_AUTO_NEG, 0);
2157                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2158                 tw32_f(MAC_MODE, tp->mac_mode);
2159                 udelay(40);
2160
2161                 ret = ANEG_TIMER_ENAB;
2162                 ap->state = ANEG_STATE_RESTART;
2163
2164                 /* fallthru */
2165         case ANEG_STATE_RESTART:
2166                 delta = ap->cur_time - ap->link_time;
2167                 if (delta > ANEG_STATE_SETTLE_TIME) {
2168                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2169                 } else {
2170                         ret = ANEG_TIMER_ENAB;
2171                 }
2172                 break;
2173
2174         case ANEG_STATE_DISABLE_LINK_OK:
2175                 ret = ANEG_DONE;
2176                 break;
2177
2178         case ANEG_STATE_ABILITY_DETECT_INIT:
2179                 ap->flags &= ~(MR_TOGGLE_TX);
2180                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2181                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2182                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2183                 tw32_f(MAC_MODE, tp->mac_mode);
2184                 udelay(40);
2185
2186                 ap->state = ANEG_STATE_ABILITY_DETECT;
2187                 break;
2188
2189         case ANEG_STATE_ABILITY_DETECT:
2190                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2191                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2192                 }
2193                 break;
2194
2195         case ANEG_STATE_ACK_DETECT_INIT:
2196                 ap->txconfig |= ANEG_CFG_ACK;
2197                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2198                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2199                 tw32_f(MAC_MODE, tp->mac_mode);
2200                 udelay(40);
2201
2202                 ap->state = ANEG_STATE_ACK_DETECT;
2203
2204                 /* fallthru */
2205         case ANEG_STATE_ACK_DETECT:
2206                 if (ap->ack_match != 0) {
2207                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2208                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2209                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2210                         } else {
2211                                 ap->state = ANEG_STATE_AN_ENABLE;
2212                         }
2213                 } else if (ap->ability_match != 0 &&
2214                            ap->rxconfig == 0) {
2215                         ap->state = ANEG_STATE_AN_ENABLE;
2216                 }
2217                 break;
2218
2219         case ANEG_STATE_COMPLETE_ACK_INIT:
2220                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2221                         ret = ANEG_FAILED;
2222                         break;
2223                 }
2224                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2225                                MR_LP_ADV_HALF_DUPLEX |
2226                                MR_LP_ADV_SYM_PAUSE |
2227                                MR_LP_ADV_ASYM_PAUSE |
2228                                MR_LP_ADV_REMOTE_FAULT1 |
2229                                MR_LP_ADV_REMOTE_FAULT2 |
2230                                MR_LP_ADV_NEXT_PAGE |
2231                                MR_TOGGLE_RX |
2232                                MR_NP_RX);
2233                 if (ap->rxconfig & ANEG_CFG_FD)
2234                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2235                 if (ap->rxconfig & ANEG_CFG_HD)
2236                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2237                 if (ap->rxconfig & ANEG_CFG_PS1)
2238                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2239                 if (ap->rxconfig & ANEG_CFG_PS2)
2240                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2241                 if (ap->rxconfig & ANEG_CFG_RF1)
2242                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2243                 if (ap->rxconfig & ANEG_CFG_RF2)
2244                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2245                 if (ap->rxconfig & ANEG_CFG_NP)
2246                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2247
2248                 ap->link_time = ap->cur_time;
2249
2250                 ap->flags ^= (MR_TOGGLE_TX);
2251                 if (ap->rxconfig & 0x0008)
2252                         ap->flags |= MR_TOGGLE_RX;
2253                 if (ap->rxconfig & ANEG_CFG_NP)
2254                         ap->flags |= MR_NP_RX;
2255                 ap->flags |= MR_PAGE_RX;
2256
2257                 ap->state = ANEG_STATE_COMPLETE_ACK;
2258                 ret = ANEG_TIMER_ENAB;
2259                 break;
2260
2261         case ANEG_STATE_COMPLETE_ACK:
2262                 if (ap->ability_match != 0 &&
2263                     ap->rxconfig == 0) {
2264                         ap->state = ANEG_STATE_AN_ENABLE;
2265                         break;
2266                 }
2267                 delta = ap->cur_time - ap->link_time;
2268                 if (delta > ANEG_STATE_SETTLE_TIME) {
2269                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2270                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2271                         } else {
2272                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2273                                     !(ap->flags & MR_NP_RX)) {
2274                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2275                                 } else {
2276                                         ret = ANEG_FAILED;
2277                                 }
2278                         }
2279                 }
2280                 break;
2281
2282         case ANEG_STATE_IDLE_DETECT_INIT:
2283                 ap->link_time = ap->cur_time;
2284                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2285                 tw32_f(MAC_MODE, tp->mac_mode);
2286                 udelay(40);
2287
2288                 ap->state = ANEG_STATE_IDLE_DETECT;
2289                 ret = ANEG_TIMER_ENAB;
2290                 break;
2291
2292         case ANEG_STATE_IDLE_DETECT:
2293                 if (ap->ability_match != 0 &&
2294                     ap->rxconfig == 0) {
2295                         ap->state = ANEG_STATE_AN_ENABLE;
2296                         break;
2297                 }
2298                 delta = ap->cur_time - ap->link_time;
2299                 if (delta > ANEG_STATE_SETTLE_TIME) {
2300                         /* XXX another gem from the Broadcom driver :( */
2301                         ap->state = ANEG_STATE_LINK_OK;
2302                 }
2303                 break;
2304
2305         case ANEG_STATE_LINK_OK:
2306                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2307                 ret = ANEG_DONE;
2308                 break;
2309
2310         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2311                 /* ??? unimplemented */
2312                 break;
2313
2314         case ANEG_STATE_NEXT_PAGE_WAIT:
2315                 /* ??? unimplemented */
2316                 break;
2317
2318         default:
2319                 ret = ANEG_FAILED;
2320                 break;
2321         };
2322
2323         return ret;
2324 }
2325
2326 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2327 {
2328         int res = 0;
2329         struct tg3_fiber_aneginfo aninfo;
2330         int status = ANEG_FAILED;
2331         unsigned int tick;
2332         u32 tmp;
2333
2334         tw32_f(MAC_TX_AUTO_NEG, 0);
2335
2336         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2337         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2338         udelay(40);
2339
2340         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2341         udelay(40);
2342
2343         memset(&aninfo, 0, sizeof(aninfo));
2344         aninfo.flags |= MR_AN_ENABLE;
2345         aninfo.state = ANEG_STATE_UNKNOWN;
2346         aninfo.cur_time = 0;
2347         tick = 0;
2348         while (++tick < 195000) {
2349                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2350                 if (status == ANEG_DONE || status == ANEG_FAILED)
2351                         break;
2352
2353                 udelay(1);
2354         }
2355
2356         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2357         tw32_f(MAC_MODE, tp->mac_mode);
2358         udelay(40);
2359
2360         *flags = aninfo.flags;
2361
2362         if (status == ANEG_DONE &&
2363             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2364                              MR_LP_ADV_FULL_DUPLEX)))
2365                 res = 1;
2366
2367         return res;
2368 }
2369
2370 static void tg3_init_bcm8002(struct tg3 *tp)
2371 {
2372         u32 mac_status = tr32(MAC_STATUS);
2373         int i;
2374
2375         /* Reset when initting first time or we have a link. */
2376         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2377             !(mac_status & MAC_STATUS_PCS_SYNCED))
2378                 return;
2379
2380         /* Set PLL lock range. */
2381         tg3_writephy(tp, 0x16, 0x8007);
2382
2383         /* SW reset */
2384         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2385
2386         /* Wait for reset to complete. */
2387         /* XXX schedule_timeout() ... */
2388         for (i = 0; i < 500; i++)
2389                 udelay(10);
2390
2391         /* Config mode; select PMA/Ch 1 regs. */
2392         tg3_writephy(tp, 0x10, 0x8411);
2393
2394         /* Enable auto-lock and comdet, select txclk for tx. */
2395         tg3_writephy(tp, 0x11, 0x0a10);
2396
2397         tg3_writephy(tp, 0x18, 0x00a0);
2398         tg3_writephy(tp, 0x16, 0x41ff);
2399
2400         /* Assert and deassert POR. */
2401         tg3_writephy(tp, 0x13, 0x0400);
2402         udelay(40);
2403         tg3_writephy(tp, 0x13, 0x0000);
2404
2405         tg3_writephy(tp, 0x11, 0x0a50);
2406         udelay(40);
2407         tg3_writephy(tp, 0x11, 0x0a10);
2408
2409         /* Wait for signal to stabilize */
2410         /* XXX schedule_timeout() ... */
2411         for (i = 0; i < 15000; i++)
2412                 udelay(10);
2413
2414         /* Deselect the channel register so we can read the PHYID
2415          * later.
2416          */
2417         tg3_writephy(tp, 0x10, 0x8011);
2418 }
2419
2420 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2421 {
2422         u32 sg_dig_ctrl, sg_dig_status;
2423         u32 serdes_cfg, expected_sg_dig_ctrl;
2424         int workaround, port_a;
2425         int current_link_up;
2426
2427         serdes_cfg = 0;
2428         expected_sg_dig_ctrl = 0;
2429         workaround = 0;
2430         port_a = 1;
2431         current_link_up = 0;
2432
2433         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2434             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2435                 workaround = 1;
2436                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2437                         port_a = 0;
2438
2439                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2440                 /* preserve bits 20-23 for voltage regulator */
2441                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2442         }
2443
2444         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2445
2446         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2447                 if (sg_dig_ctrl & (1 << 31)) {
2448                         if (workaround) {
2449                                 u32 val = serdes_cfg;
2450
2451                                 if (port_a)
2452                                         val |= 0xc010000;
2453                                 else
2454                                         val |= 0x4010000;
2455                                 tw32_f(MAC_SERDES_CFG, val);
2456                         }
2457                         tw32_f(SG_DIG_CTRL, 0x01388400);
2458                 }
2459                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2460                         tg3_setup_flow_control(tp, 0, 0);
2461                         current_link_up = 1;
2462                 }
2463                 goto out;
2464         }
2465
2466         /* Want auto-negotiation.  */
2467         expected_sg_dig_ctrl = 0x81388400;
2468
2469         /* Pause capability */
2470         expected_sg_dig_ctrl |= (1 << 11);
2471
2472         /* Asymettric pause */
2473         expected_sg_dig_ctrl |= (1 << 12);
2474
2475         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2476                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2477                     tp->serdes_counter &&
2478                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2479                                     MAC_STATUS_RCVD_CFG)) ==
2480                      MAC_STATUS_PCS_SYNCED)) {
2481                         tp->serdes_counter--;
2482                         current_link_up = 1;
2483                         goto out;
2484                 }
2485 restart_autoneg:
2486                 if (workaround)
2487                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2488                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2489                 udelay(5);
2490                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2491
2492                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2493                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2494         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2495                                  MAC_STATUS_SIGNAL_DET)) {
2496                 sg_dig_status = tr32(SG_DIG_STATUS);
2497                 mac_status = tr32(MAC_STATUS);
2498
2499                 if ((sg_dig_status & (1 << 1)) &&
2500                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2501                         u32 local_adv, remote_adv;
2502
2503                         local_adv = ADVERTISE_PAUSE_CAP;
2504                         remote_adv = 0;
2505                         if (sg_dig_status & (1 << 19))
2506                                 remote_adv |= LPA_PAUSE_CAP;
2507                         if (sg_dig_status & (1 << 20))
2508                                 remote_adv |= LPA_PAUSE_ASYM;
2509
2510                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2511                         current_link_up = 1;
2512                         tp->serdes_counter = 0;
2513                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2514                 } else if (!(sg_dig_status & (1 << 1))) {
2515                         if (tp->serdes_counter)
2516                                 tp->serdes_counter--;
2517                         else {
2518                                 if (workaround) {
2519                                         u32 val = serdes_cfg;
2520
2521                                         if (port_a)
2522                                                 val |= 0xc010000;
2523                                         else
2524                                                 val |= 0x4010000;
2525
2526                                         tw32_f(MAC_SERDES_CFG, val);
2527                                 }
2528
2529                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2530                                 udelay(40);
2531
2532                                 /* Link parallel detection - link is up */
2533                                 /* only if we have PCS_SYNC and not */
2534                                 /* receiving config code words */
2535                                 mac_status = tr32(MAC_STATUS);
2536                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2537                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2538                                         tg3_setup_flow_control(tp, 0, 0);
2539                                         current_link_up = 1;
2540                                         tp->tg3_flags2 |=
2541                                                 TG3_FLG2_PARALLEL_DETECT;
2542                                         tp->serdes_counter =
2543                                                 SERDES_PARALLEL_DET_TIMEOUT;
2544                                 } else
2545                                         goto restart_autoneg;
2546                         }
2547                 }
2548         } else {
2549                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2550                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2551         }
2552
2553 out:
2554         return current_link_up;
2555 }
2556
2557 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2558 {
2559         int current_link_up = 0;
2560
2561         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2562                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2563                 goto out;
2564         }
2565
2566         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2567                 u32 flags;
2568                 int i;
2569
2570                 if (fiber_autoneg(tp, &flags)) {
2571                         u32 local_adv, remote_adv;
2572
2573                         local_adv = ADVERTISE_PAUSE_CAP;
2574                         remote_adv = 0;
2575                         if (flags & MR_LP_ADV_SYM_PAUSE)
2576                                 remote_adv |= LPA_PAUSE_CAP;
2577                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2578                                 remote_adv |= LPA_PAUSE_ASYM;
2579
2580                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2581
2582                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2583                         current_link_up = 1;
2584                 }
2585                 for (i = 0; i < 30; i++) {
2586                         udelay(20);
2587                         tw32_f(MAC_STATUS,
2588                                (MAC_STATUS_SYNC_CHANGED |
2589                                 MAC_STATUS_CFG_CHANGED));
2590                         udelay(40);
2591                         if ((tr32(MAC_STATUS) &
2592                              (MAC_STATUS_SYNC_CHANGED |
2593                               MAC_STATUS_CFG_CHANGED)) == 0)
2594                                 break;
2595                 }
2596
2597                 mac_status = tr32(MAC_STATUS);
2598                 if (current_link_up == 0 &&
2599                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2600                     !(mac_status & MAC_STATUS_RCVD_CFG))
2601                         current_link_up = 1;
2602         } else {
2603                 /* Forcing 1000FD link up. */
2604                 current_link_up = 1;
2605                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2606
2607                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2608                 udelay(40);
2609         }
2610
2611 out:
2612         return current_link_up;
2613 }
2614
2615 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2616 {
2617         u32 orig_pause_cfg;
2618         u16 orig_active_speed;
2619         u8 orig_active_duplex;
2620         u32 mac_status;
2621         int current_link_up;
2622         int i;
2623
2624         orig_pause_cfg =
2625                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2626                                   TG3_FLAG_TX_PAUSE));
2627         orig_active_speed = tp->link_config.active_speed;
2628         orig_active_duplex = tp->link_config.active_duplex;
2629
2630         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2631             netif_carrier_ok(tp->dev) &&
2632             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2633                 mac_status = tr32(MAC_STATUS);
2634                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2635                                MAC_STATUS_SIGNAL_DET |
2636                                MAC_STATUS_CFG_CHANGED |
2637                                MAC_STATUS_RCVD_CFG);
2638                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2639                                    MAC_STATUS_SIGNAL_DET)) {
2640                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2641                                             MAC_STATUS_CFG_CHANGED));
2642                         return 0;
2643                 }
2644         }
2645
2646         tw32_f(MAC_TX_AUTO_NEG, 0);
2647
2648         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2649         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2650         tw32_f(MAC_MODE, tp->mac_mode);
2651         udelay(40);
2652
2653         if (tp->phy_id == PHY_ID_BCM8002)
2654                 tg3_init_bcm8002(tp);
2655
2656         /* Enable link change event even when serdes polling.  */
2657         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2658         udelay(40);
2659
2660         current_link_up = 0;
2661         mac_status = tr32(MAC_STATUS);
2662
2663         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2664                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2665         else
2666                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2667
2668         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2669         tw32_f(MAC_MODE, tp->mac_mode);
2670         udelay(40);
2671
2672         tp->hw_status->status =
2673                 (SD_STATUS_UPDATED |
2674                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2675
2676         for (i = 0; i < 100; i++) {
2677                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2678                                     MAC_STATUS_CFG_CHANGED));
2679                 udelay(5);
2680                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2681                                          MAC_STATUS_CFG_CHANGED |
2682                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2683                         break;
2684         }
2685
2686         mac_status = tr32(MAC_STATUS);
2687         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2688                 current_link_up = 0;
2689                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2690                     tp->serdes_counter == 0) {
2691                         tw32_f(MAC_MODE, (tp->mac_mode |
2692                                           MAC_MODE_SEND_CONFIGS));
2693                         udelay(1);
2694                         tw32_f(MAC_MODE, tp->mac_mode);
2695                 }
2696         }
2697
2698         if (current_link_up == 1) {
2699                 tp->link_config.active_speed = SPEED_1000;
2700                 tp->link_config.active_duplex = DUPLEX_FULL;
2701                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2702                                     LED_CTRL_LNKLED_OVERRIDE |
2703                                     LED_CTRL_1000MBPS_ON));
2704         } else {
2705                 tp->link_config.active_speed = SPEED_INVALID;
2706                 tp->link_config.active_duplex = DUPLEX_INVALID;
2707                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2708                                     LED_CTRL_LNKLED_OVERRIDE |
2709                                     LED_CTRL_TRAFFIC_OVERRIDE));
2710         }
2711
2712         if (current_link_up != netif_carrier_ok(tp->dev)) {
2713                 if (current_link_up)
2714                         netif_carrier_on(tp->dev);
2715                 else
2716                         netif_carrier_off(tp->dev);
2717                 tg3_link_report(tp);
2718         } else {
2719                 u32 now_pause_cfg =
2720                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2721                                          TG3_FLAG_TX_PAUSE);
2722                 if (orig_pause_cfg != now_pause_cfg ||
2723                     orig_active_speed != tp->link_config.active_speed ||
2724                     orig_active_duplex != tp->link_config.active_duplex)
2725                         tg3_link_report(tp);
2726         }
2727
2728         return 0;
2729 }
2730
2731 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2732 {
2733         int current_link_up, err = 0;
2734         u32 bmsr, bmcr;
2735         u16 current_speed;
2736         u8 current_duplex;
2737
2738         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2739         tw32_f(MAC_MODE, tp->mac_mode);
2740         udelay(40);
2741
2742         tw32(MAC_EVENT, 0);
2743
2744         tw32_f(MAC_STATUS,
2745              (MAC_STATUS_SYNC_CHANGED |
2746               MAC_STATUS_CFG_CHANGED |
2747               MAC_STATUS_MI_COMPLETION |
2748               MAC_STATUS_LNKSTATE_CHANGED));
2749         udelay(40);
2750
2751         if (force_reset)
2752                 tg3_phy_reset(tp);
2753
2754         current_link_up = 0;
2755         current_speed = SPEED_INVALID;
2756         current_duplex = DUPLEX_INVALID;
2757
2758         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2759         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2760         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2761                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2762                         bmsr |= BMSR_LSTATUS;
2763                 else
2764                         bmsr &= ~BMSR_LSTATUS;
2765         }
2766
2767         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2768
2769         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2770             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2771                 /* do nothing, just check for link up at the end */
2772         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2773                 u32 adv, new_adv;
2774
2775                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2776                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2777                                   ADVERTISE_1000XPAUSE |
2778                                   ADVERTISE_1000XPSE_ASYM |
2779                                   ADVERTISE_SLCT);
2780
2781                 /* Always advertise symmetric PAUSE just like copper */
2782                 new_adv |= ADVERTISE_1000XPAUSE;
2783
2784                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2785                         new_adv |= ADVERTISE_1000XHALF;
2786                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2787                         new_adv |= ADVERTISE_1000XFULL;
2788
2789                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2790                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2791                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2792                         tg3_writephy(tp, MII_BMCR, bmcr);
2793
2794                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2795                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2796                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2797
2798                         return err;
2799                 }
2800         } else {
2801                 u32 new_bmcr;
2802
2803                 bmcr &= ~BMCR_SPEED1000;
2804                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2805
2806                 if (tp->link_config.duplex == DUPLEX_FULL)
2807                         new_bmcr |= BMCR_FULLDPLX;
2808
2809                 if (new_bmcr != bmcr) {
2810                         /* BMCR_SPEED1000 is a reserved bit that needs
2811                          * to be set on write.
2812                          */
2813                         new_bmcr |= BMCR_SPEED1000;
2814
2815                         /* Force a linkdown */
2816                         if (netif_carrier_ok(tp->dev)) {
2817                                 u32 adv;
2818
2819                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2820                                 adv &= ~(ADVERTISE_1000XFULL |
2821                                          ADVERTISE_1000XHALF |
2822                                          ADVERTISE_SLCT);
2823                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2824                                 tg3_writephy(tp, MII_BMCR, bmcr |
2825                                                            BMCR_ANRESTART |
2826                                                            BMCR_ANENABLE);
2827                                 udelay(10);
2828                                 netif_carrier_off(tp->dev);
2829                         }
2830                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2831                         bmcr = new_bmcr;
2832                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2833                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2834                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2835                             ASIC_REV_5714) {
2836                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2837                                         bmsr |= BMSR_LSTATUS;
2838                                 else
2839                                         bmsr &= ~BMSR_LSTATUS;
2840                         }
2841                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2842                 }
2843         }
2844
2845         if (bmsr & BMSR_LSTATUS) {
2846                 current_speed = SPEED_1000;
2847                 current_link_up = 1;
2848                 if (bmcr & BMCR_FULLDPLX)
2849                         current_duplex = DUPLEX_FULL;
2850                 else
2851                         current_duplex = DUPLEX_HALF;
2852
2853                 if (bmcr & BMCR_ANENABLE) {
2854                         u32 local_adv, remote_adv, common;
2855
2856                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2857                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2858                         common = local_adv & remote_adv;
2859                         if (common & (ADVERTISE_1000XHALF |
2860                                       ADVERTISE_1000XFULL)) {
2861                                 if (common & ADVERTISE_1000XFULL)
2862                                         current_duplex = DUPLEX_FULL;
2863                                 else
2864                                         current_duplex = DUPLEX_HALF;
2865
2866                                 tg3_setup_flow_control(tp, local_adv,
2867                                                        remote_adv);
2868                         }
2869                         else
2870                                 current_link_up = 0;
2871                 }
2872         }
2873
2874         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2875         if (tp->link_config.active_duplex == DUPLEX_HALF)
2876                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2877
2878         tw32_f(MAC_MODE, tp->mac_mode);
2879         udelay(40);
2880
2881         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2882
2883         tp->link_config.active_speed = current_speed;
2884         tp->link_config.active_duplex = current_duplex;
2885
2886         if (current_link_up != netif_carrier_ok(tp->dev)) {
2887                 if (current_link_up)
2888                         netif_carrier_on(tp->dev);
2889                 else {
2890                         netif_carrier_off(tp->dev);
2891                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2892                 }
2893                 tg3_link_report(tp);
2894         }
2895         return err;
2896 }
2897
2898 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2899 {
2900         if (tp->serdes_counter) {
2901                 /* Give autoneg time to complete. */
2902                 tp->serdes_counter--;
2903                 return;
2904         }
2905         if (!netif_carrier_ok(tp->dev) &&
2906             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2907                 u32 bmcr;
2908
2909                 tg3_readphy(tp, MII_BMCR, &bmcr);
2910                 if (bmcr & BMCR_ANENABLE) {
2911                         u32 phy1, phy2;
2912
2913                         /* Select shadow register 0x1f */
2914                         tg3_writephy(tp, 0x1c, 0x7c00);
2915                         tg3_readphy(tp, 0x1c, &phy1);
2916
2917                         /* Select expansion interrupt status register */
2918                         tg3_writephy(tp, 0x17, 0x0f01);
2919                         tg3_readphy(tp, 0x15, &phy2);
2920                         tg3_readphy(tp, 0x15, &phy2);
2921
2922                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2923                                 /* We have signal detect and not receiving
2924                                  * config code words, link is up by parallel
2925                                  * detection.
2926                                  */
2927
2928                                 bmcr &= ~BMCR_ANENABLE;
2929                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2930                                 tg3_writephy(tp, MII_BMCR, bmcr);
2931                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2932                         }
2933                 }
2934         }
2935         else if (netif_carrier_ok(tp->dev) &&
2936                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2937                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2938                 u32 phy2;
2939
2940                 /* Select expansion interrupt status register */
2941                 tg3_writephy(tp, 0x17, 0x0f01);
2942                 tg3_readphy(tp, 0x15, &phy2);
2943                 if (phy2 & 0x20) {
2944                         u32 bmcr;
2945
2946                         /* Config code words received, turn on autoneg. */
2947                         tg3_readphy(tp, MII_BMCR, &bmcr);
2948                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2949
2950                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2951
2952                 }
2953         }
2954 }
2955
2956 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2957 {
2958         int err;
2959
2960         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2961                 err = tg3_setup_fiber_phy(tp, force_reset);
2962         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2963                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2964         } else {
2965                 err = tg3_setup_copper_phy(tp, force_reset);
2966         }
2967
2968         if (tp->link_config.active_speed == SPEED_1000 &&
2969             tp->link_config.active_duplex == DUPLEX_HALF)
2970                 tw32(MAC_TX_LENGTHS,
2971                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2972                       (6 << TX_LENGTHS_IPG_SHIFT) |
2973                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2974         else
2975                 tw32(MAC_TX_LENGTHS,
2976                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2977                       (6 << TX_LENGTHS_IPG_SHIFT) |
2978                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2979
2980         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2981                 if (netif_carrier_ok(tp->dev)) {
2982                         tw32(HOSTCC_STAT_COAL_TICKS,
2983                              tp->coal.stats_block_coalesce_usecs);
2984                 } else {
2985                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2986                 }
2987         }
2988
2989         return err;
2990 }
2991
2992 /* This is called whenever we suspect that the system chipset is re-
2993  * ordering the sequence of MMIO to the tx send mailbox. The symptom
2994  * is bogus tx completions. We try to recover by setting the
2995  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2996  * in the workqueue.
2997  */
2998 static void tg3_tx_recover(struct tg3 *tp)
2999 {
3000         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3001                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3002
3003         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3004                "mapped I/O cycles to the network device, attempting to "
3005                "recover. Please report the problem to the driver maintainer "
3006                "and include system chipset information.\n", tp->dev->name);
3007
3008         spin_lock(&tp->lock);
3009         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3010         spin_unlock(&tp->lock);
3011 }
3012
3013 static inline u32 tg3_tx_avail(struct tg3 *tp)
3014 {
3015         smp_mb();
3016         return (tp->tx_pending -
3017                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3018 }
3019
3020 /* Tigon3 never reports partial packet sends.  So we do not
3021  * need special logic to handle SKBs that have not had all
3022  * of their frags sent yet, like SunGEM does.
3023  */
3024 static void tg3_tx(struct tg3 *tp)
3025 {
3026         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3027         u32 sw_idx = tp->tx_cons;
3028
3029         while (sw_idx != hw_idx) {
3030                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3031                 struct sk_buff *skb = ri->skb;
3032                 int i, tx_bug = 0;
3033
3034                 if (unlikely(skb == NULL)) {
3035                         tg3_tx_recover(tp);
3036                         return;
3037                 }
3038
3039                 pci_unmap_single(tp->pdev,
3040                                  pci_unmap_addr(ri, mapping),
3041                                  skb_headlen(skb),
3042                                  PCI_DMA_TODEVICE);
3043
3044                 ri->skb = NULL;
3045
3046                 sw_idx = NEXT_TX(sw_idx);
3047
3048                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3049                         ri = &tp->tx_buffers[sw_idx];
3050                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3051                                 tx_bug = 1;
3052
3053                         pci_unmap_page(tp->pdev,
3054                                        pci_unmap_addr(ri, mapping),
3055                                        skb_shinfo(skb)->frags[i].size,
3056                                        PCI_DMA_TODEVICE);
3057
3058                         sw_idx = NEXT_TX(sw_idx);
3059                 }
3060
3061                 dev_kfree_skb(skb);
3062
3063                 if (unlikely(tx_bug)) {
3064                         tg3_tx_recover(tp);
3065                         return;
3066                 }
3067         }
3068
3069         tp->tx_cons = sw_idx;
3070
3071         /* Need to make the tx_cons update visible to tg3_start_xmit()
3072          * before checking for netif_queue_stopped().  Without the
3073          * memory barrier, there is a small possibility that tg3_start_xmit()
3074          * will miss it and cause the queue to be stopped forever.
3075          */
3076         smp_mb();
3077
3078         if (unlikely(netif_queue_stopped(tp->dev) &&
3079                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3080                 netif_tx_lock(tp->dev);
3081                 if (netif_queue_stopped(tp->dev) &&
3082                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3083                         netif_wake_queue(tp->dev);
3084                 netif_tx_unlock(tp->dev);
3085         }
3086 }
3087
3088 /* Returns size of skb allocated or < 0 on error.
3089  *
3090  * We only need to fill in the address because the other members
3091  * of the RX descriptor are invariant, see tg3_init_rings.
3092  *
3093  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3094  * posting buffers we only dirty the first cache line of the RX
3095  * descriptor (containing the address).  Whereas for the RX status
3096  * buffers the cpu only reads the last cacheline of the RX descriptor
3097  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3098  */
3099 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3100                             int src_idx, u32 dest_idx_unmasked)
3101 {
3102         struct tg3_rx_buffer_desc *desc;
3103         struct ring_info *map, *src_map;
3104         struct sk_buff *skb;
3105         dma_addr_t mapping;
3106         int skb_size, dest_idx;
3107
3108         src_map = NULL;
3109         switch (opaque_key) {
3110         case RXD_OPAQUE_RING_STD:
3111                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3112                 desc = &tp->rx_std[dest_idx];
3113                 map = &tp->rx_std_buffers[dest_idx];
3114                 if (src_idx >= 0)
3115                         src_map = &tp->rx_std_buffers[src_idx];
3116                 skb_size = tp->rx_pkt_buf_sz;
3117                 break;
3118
3119         case RXD_OPAQUE_RING_JUMBO:
3120                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3121                 desc = &tp->rx_jumbo[dest_idx];
3122                 map = &tp->rx_jumbo_buffers[dest_idx];
3123                 if (src_idx >= 0)
3124                         src_map = &tp->rx_jumbo_buffers[src_idx];
3125                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3126                 break;
3127
3128         default:
3129                 return -EINVAL;
3130         };
3131
3132         /* Do not overwrite any of the map or rp information
3133          * until we are sure we can commit to a new buffer.
3134          *
3135          * Callers depend upon this behavior and assume that
3136          * we leave everything unchanged if we fail.
3137          */
3138         skb = netdev_alloc_skb(tp->dev, skb_size);
3139         if (skb == NULL)
3140                 return -ENOMEM;
3141
3142         skb_reserve(skb, tp->rx_offset);
3143
3144         mapping = pci_map_single(tp->pdev, skb->data,
3145                                  skb_size - tp->rx_offset,
3146                                  PCI_DMA_FROMDEVICE);
3147
3148         map->skb = skb;
3149         pci_unmap_addr_set(map, mapping, mapping);
3150
3151         if (src_map != NULL)
3152                 src_map->skb = NULL;
3153
3154         desc->addr_hi = ((u64)mapping >> 32);
3155         desc->addr_lo = ((u64)mapping & 0xffffffff);
3156
3157         return skb_size;
3158 }
3159
3160 /* We only need to move over in the address because the other
3161  * members of the RX descriptor are invariant.  See notes above
3162  * tg3_alloc_rx_skb for full details.
3163  */
3164 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3165                            int src_idx, u32 dest_idx_unmasked)
3166 {
3167         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3168         struct ring_info *src_map, *dest_map;
3169         int dest_idx;
3170
3171         switch (opaque_key) {
3172         case RXD_OPAQUE_RING_STD:
3173                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3174                 dest_desc = &tp->rx_std[dest_idx];
3175                 dest_map = &tp->rx_std_buffers[dest_idx];
3176                 src_desc = &tp->rx_std[src_idx];
3177                 src_map = &tp->rx_std_buffers[src_idx];
3178                 break;
3179
3180         case RXD_OPAQUE_RING_JUMBO:
3181                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3182                 dest_desc = &tp->rx_jumbo[dest_idx];
3183                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3184                 src_desc = &tp->rx_jumbo[src_idx];
3185                 src_map = &tp->rx_jumbo_buffers[src_idx];
3186                 break;
3187
3188         default:
3189                 return;
3190         };
3191
3192         dest_map->skb = src_map->skb;
3193         pci_unmap_addr_set(dest_map, mapping,
3194                            pci_unmap_addr(src_map, mapping));
3195         dest_desc->addr_hi = src_desc->addr_hi;
3196         dest_desc->addr_lo = src_desc->addr_lo;
3197
3198         src_map->skb = NULL;
3199 }
3200
3201 #if TG3_VLAN_TAG_USED
3202 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3203 {
3204         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3205 }
3206 #endif
3207
3208 /* The RX ring scheme is composed of multiple rings which post fresh
3209  * buffers to the chip, and one special ring the chip uses to report
3210  * status back to the host.
3211  *
3212  * The special ring reports the status of received packets to the
3213  * host.  The chip does not write into the original descriptor the
3214  * RX buffer was obtained from.  The chip simply takes the original
3215  * descriptor as provided by the host, updates the status and length
3216  * field, then writes this into the next status ring entry.
3217  *
3218  * Each ring the host uses to post buffers to the chip is described
3219  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3220  * it is first placed into the on-chip ram.  When the packet's length
3221  * is known, it walks down the TG3_BDINFO entries to select the ring.
3222  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3223  * which is within the range of the new packet's length is chosen.
3224  *
3225  * The "separate ring for rx status" scheme may sound queer, but it makes
3226  * sense from a cache coherency perspective.  If only the host writes
3227  * to the buffer post rings, and only the chip writes to the rx status
3228  * rings, then cache lines never move beyond shared-modified state.
3229  * If both the host and chip were to write into the same ring, cache line
3230  * eviction could occur since both entities want it in an exclusive state.
3231  */
3232 static int tg3_rx(struct tg3 *tp, int budget)
3233 {
3234         u32 work_mask, rx_std_posted = 0;
3235         u32 sw_idx = tp->rx_rcb_ptr;
3236         u16 hw_idx;
3237         int received;
3238
3239         hw_idx = tp->hw_status->idx[0].rx_producer;
3240         /*
3241          * We need to order the read of hw_idx and the read of
3242          * the opaque cookie.
3243          */
3244         rmb();
3245         work_mask = 0;
3246         received = 0;
3247         while (sw_idx != hw_idx && budget > 0) {
3248                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3249                 unsigned int len;
3250                 struct sk_buff *skb;
3251                 dma_addr_t dma_addr;
3252                 u32 opaque_key, desc_idx, *post_ptr;
3253
3254                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3255                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3256                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3257                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3258                                                   mapping);
3259                         skb = tp->rx_std_buffers[desc_idx].skb;
3260                         post_ptr = &tp->rx_std_ptr;
3261                         rx_std_posted++;
3262                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3263                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3264                                                   mapping);
3265                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3266                         post_ptr = &tp->rx_jumbo_ptr;
3267                 }
3268                 else {
3269                         goto next_pkt_nopost;
3270                 }
3271
3272                 work_mask |= opaque_key;
3273
3274                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3275                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3276                 drop_it:
3277                         tg3_recycle_rx(tp, opaque_key,
3278                                        desc_idx, *post_ptr);
3279                 drop_it_no_recycle:
3280                         /* Other statistics kept track of by card. */
3281                         tp->net_stats.rx_dropped++;
3282                         goto next_pkt;
3283                 }
3284
3285                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3286
3287                 if (len > RX_COPY_THRESHOLD
3288                         && tp->rx_offset == 2
3289                         /* rx_offset != 2 iff this is a 5701 card running
3290                          * in PCI-X mode [see tg3_get_invariants()] */
3291                 ) {
3292                         int skb_size;
3293
3294                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3295                                                     desc_idx, *post_ptr);
3296                         if (skb_size < 0)
3297                                 goto drop_it;
3298
3299                         pci_unmap_single(tp->pdev, dma_addr,
3300                                          skb_size - tp->rx_offset,
3301                                          PCI_DMA_FROMDEVICE);
3302
3303                         skb_put(skb, len);
3304                 } else {
3305                         struct sk_buff *copy_skb;
3306
3307                         tg3_recycle_rx(tp, opaque_key,
3308                                        desc_idx, *post_ptr);
3309
3310                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3311                         if (copy_skb == NULL)
3312                                 goto drop_it_no_recycle;
3313
3314                         skb_reserve(copy_skb, 2);
3315                         skb_put(copy_skb, len);
3316                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3317                         memcpy(copy_skb->data, skb->data, len);
3318                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3319
3320                         /* We'll reuse the original ring buffer. */
3321                         skb = copy_skb;
3322                 }
3323
3324                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3325                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3326                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3327                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3328                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3329                 else
3330                         skb->ip_summed = CHECKSUM_NONE;
3331
3332                 skb->protocol = eth_type_trans(skb, tp->dev);
3333 #if TG3_VLAN_TAG_USED
3334                 if (tp->vlgrp != NULL &&
3335                     desc->type_flags & RXD_FLAG_VLAN) {
3336                         tg3_vlan_rx(tp, skb,
3337                                     desc->err_vlan & RXD_VLAN_MASK);
3338                 } else
3339 #endif
3340                         netif_receive_skb(skb);
3341
3342                 tp->dev->last_rx = jiffies;
3343                 received++;
3344                 budget--;
3345
3346 next_pkt:
3347                 (*post_ptr)++;
3348
3349                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3350                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3351
3352                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3353                                      TG3_64BIT_REG_LOW, idx);
3354                         work_mask &= ~RXD_OPAQUE_RING_STD;
3355                         rx_std_posted = 0;
3356                 }
3357 next_pkt_nopost:
3358                 sw_idx++;
3359                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3360
3361                 /* Refresh hw_idx to see if there is new work */
3362                 if (sw_idx == hw_idx) {
3363                         hw_idx = tp->hw_status->idx[0].rx_producer;
3364                         rmb();
3365                 }
3366         }
3367
3368         /* ACK the status ring. */
3369         tp->rx_rcb_ptr = sw_idx;
3370         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3371
3372         /* Refill RX ring(s). */
3373         if (work_mask & RXD_OPAQUE_RING_STD) {
3374                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3375                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3376                              sw_idx);
3377         }
3378         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3379                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3380                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3381                              sw_idx);
3382         }
3383         mmiowb();
3384
3385         return received;
3386 }
3387
3388 static int tg3_poll(struct net_device *netdev, int *budget)
3389 {
3390         struct tg3 *tp = netdev_priv(netdev);
3391         struct tg3_hw_status *sblk = tp->hw_status;
3392         int done;
3393
3394         /* handle link change and other phy events */
3395         if (!(tp->tg3_flags &
3396               (TG3_FLAG_USE_LINKCHG_REG |
3397                TG3_FLAG_POLL_SERDES))) {
3398                 if (sblk->status & SD_STATUS_LINK_CHG) {
3399                         sblk->status = SD_STATUS_UPDATED |
3400                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3401                         spin_lock(&tp->lock);
3402                         tg3_setup_phy(tp, 0);
3403                         spin_unlock(&tp->lock);
3404                 }
3405         }
3406
3407         /* run TX completion thread */
3408         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3409                 tg3_tx(tp);
3410                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3411                         netif_rx_complete(netdev);
3412                         schedule_work(&tp->reset_task);
3413                         return 0;
3414                 }
3415         }
3416
3417         /* run RX thread, within the bounds set by NAPI.
3418          * All RX "locking" is done by ensuring outside
3419          * code synchronizes with dev->poll()
3420          */
3421         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3422                 int orig_budget = *budget;
3423                 int work_done;
3424
3425                 if (orig_budget > netdev->quota)
3426                         orig_budget = netdev->quota;
3427
3428                 work_done = tg3_rx(tp, orig_budget);
3429
3430                 *budget -= work_done;
3431                 netdev->quota -= work_done;
3432         }
3433
3434         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3435                 tp->last_tag = sblk->status_tag;
3436                 rmb();
3437         } else
3438                 sblk->status &= ~SD_STATUS_UPDATED;
3439
3440         /* if no more work, tell net stack and NIC we're done */
3441         done = !tg3_has_work(tp);
3442         if (done) {
3443                 netif_rx_complete(netdev);
3444                 tg3_restart_ints(tp);
3445         }
3446
3447         return (done ? 0 : 1);
3448 }
3449
3450 static void tg3_irq_quiesce(struct tg3 *tp)
3451 {
3452         BUG_ON(tp->irq_sync);
3453
3454         tp->irq_sync = 1;
3455         smp_mb();
3456
3457         synchronize_irq(tp->pdev->irq);
3458 }
3459
3460 static inline int tg3_irq_sync(struct tg3 *tp)
3461 {
3462         return tp->irq_sync;
3463 }
3464
3465 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3466  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3467  * with as well.  Most of the time, this is not necessary except when
3468  * shutting down the device.
3469  */
3470 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3471 {
3472         if (irq_sync)
3473                 tg3_irq_quiesce(tp);
3474         spin_lock_bh(&tp->lock);
3475 }
3476
3477 static inline void tg3_full_unlock(struct tg3 *tp)
3478 {
3479         spin_unlock_bh(&tp->lock);
3480 }
3481
3482 /* One-shot MSI handler - Chip automatically disables interrupt
3483  * after sending MSI so driver doesn't have to do it.
3484  */
3485 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3486 {
3487         struct net_device *dev = dev_id;
3488         struct tg3 *tp = netdev_priv(dev);
3489
3490         prefetch(tp->hw_status);
3491         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3492
3493         if (likely(!tg3_irq_sync(tp)))
3494                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3495
3496         return IRQ_HANDLED;
3497 }
3498
3499 /* MSI ISR - No need to check for interrupt sharing and no need to
3500  * flush status block and interrupt mailbox. PCI ordering rules
3501  * guarantee that MSI will arrive after the status block.
3502  */
3503 static irqreturn_t tg3_msi(int irq, void *dev_id)
3504 {
3505         struct net_device *dev = dev_id;
3506         struct tg3 *tp = netdev_priv(dev);
3507
3508         prefetch(tp->hw_status);
3509         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3510         /*
3511          * Writing any value to intr-mbox-0 clears PCI INTA# and
3512          * chip-internal interrupt pending events.
3513          * Writing non-zero to intr-mbox-0 additional tells the
3514          * NIC to stop sending us irqs, engaging "in-intr-handler"
3515          * event coalescing.
3516          */
3517         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3518         if (likely(!tg3_irq_sync(tp)))
3519                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3520
3521         return IRQ_RETVAL(1);
3522 }
3523
3524 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3525 {
3526         struct net_device *dev = dev_id;
3527         struct tg3 *tp = netdev_priv(dev);
3528         struct tg3_hw_status *sblk = tp->hw_status;
3529         unsigned int handled = 1;
3530
3531         /* In INTx mode, it is possible for the interrupt to arrive at
3532          * the CPU before the status block posted prior to the interrupt.
3533          * Reading the PCI State register will confirm whether the
3534          * interrupt is ours and will flush the status block.
3535          */
3536         if ((sblk->status & SD_STATUS_UPDATED) ||
3537             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3538                 /*
3539                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3540                  * chip-internal interrupt pending events.
3541                  * Writing non-zero to intr-mbox-0 additional tells the
3542                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3543                  * event coalescing.
3544                  */
3545                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3546                              0x00000001);
3547                 if (tg3_irq_sync(tp))
3548                         goto out;
3549                 sblk->status &= ~SD_STATUS_UPDATED;
3550                 if (likely(tg3_has_work(tp))) {
3551                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3552                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3553                 } else {
3554                         /* No work, shared interrupt perhaps?  re-enable
3555                          * interrupts, and flush that PCI write
3556                          */
3557                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3558                                 0x00000000);
3559                 }
3560         } else {        /* shared interrupt */
3561                 handled = 0;
3562         }
3563 out:
3564         return IRQ_RETVAL(handled);
3565 }
3566
3567 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3568 {
3569         struct net_device *dev = dev_id;
3570         struct tg3 *tp = netdev_priv(dev);
3571         struct tg3_hw_status *sblk = tp->hw_status;
3572         unsigned int handled = 1;
3573
3574         /* In INTx mode, it is possible for the interrupt to arrive at
3575          * the CPU before the status block posted prior to the interrupt.
3576          * Reading the PCI State register will confirm whether the
3577          * interrupt is ours and will flush the status block.
3578          */
3579         if ((sblk->status_tag != tp->last_tag) ||
3580             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3581                 /*
3582                  * writing any value to intr-mbox-0 clears PCI INTA# and
3583                  * chip-internal interrupt pending events.
3584                  * writing non-zero to intr-mbox-0 additional tells the
3585                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3586                  * event coalescing.
3587                  */
3588                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3589                              0x00000001);
3590                 if (tg3_irq_sync(tp))
3591                         goto out;
3592                 if (netif_rx_schedule_prep(dev)) {
3593                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3594                         /* Update last_tag to mark that this status has been
3595                          * seen. Because interrupt may be shared, we may be
3596                          * racing with tg3_poll(), so only update last_tag
3597                          * if tg3_poll() is not scheduled.
3598                          */
3599                         tp->last_tag = sblk->status_tag;
3600                         __netif_rx_schedule(dev);
3601                 }
3602         } else {        /* shared interrupt */
3603                 handled = 0;
3604         }
3605 out:
3606         return IRQ_RETVAL(handled);
3607 }
3608
3609 /* ISR for interrupt test */
3610 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3611 {
3612         struct net_device *dev = dev_id;
3613         struct tg3 *tp = netdev_priv(dev);
3614         struct tg3_hw_status *sblk = tp->hw_status;
3615
3616         if ((sblk->status & SD_STATUS_UPDATED) ||
3617             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3618                 tg3_disable_ints(tp);
3619                 return IRQ_RETVAL(1);
3620         }
3621         return IRQ_RETVAL(0);
3622 }
3623
3624 static int tg3_init_hw(struct tg3 *, int);
3625 static int tg3_halt(struct tg3 *, int, int);
3626
3627 /* Restart hardware after configuration changes, self-test, etc.
3628  * Invoked with tp->lock held.
3629  */
3630 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3631 {
3632         int err;
3633
3634         err = tg3_init_hw(tp, reset_phy);
3635         if (err) {
3636                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3637                        "aborting.\n", tp->dev->name);
3638                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3639                 tg3_full_unlock(tp);
3640                 del_timer_sync(&tp->timer);
3641                 tp->irq_sync = 0;
3642                 netif_poll_enable(tp->dev);
3643                 dev_close(tp->dev);
3644                 tg3_full_lock(tp, 0);
3645         }
3646         return err;
3647 }
3648
3649 #ifdef CONFIG_NET_POLL_CONTROLLER
3650 static void tg3_poll_controller(struct net_device *dev)
3651 {
3652         struct tg3 *tp = netdev_priv(dev);
3653
3654         tg3_interrupt(tp->pdev->irq, dev);
3655 }
3656 #endif
3657
3658 static void tg3_reset_task(struct work_struct *work)
3659 {
3660         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3661         unsigned int restart_timer;
3662
3663         tg3_full_lock(tp, 0);
3664         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3665
3666         if (!netif_running(tp->dev)) {
3667                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3668                 tg3_full_unlock(tp);
3669                 return;
3670         }
3671
3672         tg3_full_unlock(tp);
3673
3674         tg3_netif_stop(tp);
3675
3676         tg3_full_lock(tp, 1);
3677
3678         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3679         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3680
3681         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3682                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3683                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3684                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3685                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3686         }
3687
3688         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3689         if (tg3_init_hw(tp, 1))
3690                 goto out;
3691
3692         tg3_netif_start(tp);
3693
3694         if (restart_timer)
3695                 mod_timer(&tp->timer, jiffies + 1);
3696
3697 out:
3698         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3699
3700         tg3_full_unlock(tp);
3701 }
3702
3703 static void tg3_tx_timeout(struct net_device *dev)
3704 {
3705         struct tg3 *tp = netdev_priv(dev);
3706
3707         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3708                dev->name);
3709
3710         schedule_work(&tp->reset_task);
3711 }
3712
3713 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3714 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3715 {
3716         u32 base = (u32) mapping & 0xffffffff;
3717
3718         return ((base > 0xffffdcc0) &&
3719                 (base + len + 8 < base));
3720 }
3721
3722 /* Test for DMA addresses > 40-bit */
3723 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3724                                           int len)
3725 {
3726 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3727         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3728                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3729         return 0;
3730 #else
3731         return 0;
3732 #endif
3733 }
3734
3735 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3736
3737 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3738 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3739                                        u32 last_plus_one, u32 *start,
3740                                        u32 base_flags, u32 mss)
3741 {
3742         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3743         dma_addr_t new_addr = 0;
3744         u32 entry = *start;
3745         int i, ret = 0;
3746
3747         if (!new_skb) {
3748                 ret = -1;
3749         } else {
3750                 /* New SKB is guaranteed to be linear. */
3751                 entry = *start;
3752                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3753                                           PCI_DMA_TODEVICE);
3754                 /* Make sure new skb does not cross any 4G boundaries.
3755                  * Drop the packet if it does.
3756                  */
3757                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3758                         ret = -1;
3759                         dev_kfree_skb(new_skb);
3760                         new_skb = NULL;
3761                 } else {
3762                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3763                                     base_flags, 1 | (mss << 1));
3764                         *start = NEXT_TX(entry);
3765                 }
3766         }
3767
3768         /* Now clean up the sw ring entries. */
3769         i = 0;
3770         while (entry != last_plus_one) {
3771                 int len;
3772
3773                 if (i == 0)
3774                         len = skb_headlen(skb);
3775                 else
3776                         len = skb_shinfo(skb)->frags[i-1].size;
3777                 pci_unmap_single(tp->pdev,
3778                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3779                                  len, PCI_DMA_TODEVICE);
3780                 if (i == 0) {
3781                         tp->tx_buffers[entry].skb = new_skb;
3782                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3783                 } else {
3784                         tp->tx_buffers[entry].skb = NULL;
3785                 }
3786                 entry = NEXT_TX(entry);
3787                 i++;
3788         }
3789
3790         dev_kfree_skb(skb);
3791
3792         return ret;
3793 }
3794
3795 static void tg3_set_txd(struct tg3 *tp, int entry,
3796                         dma_addr_t mapping, int len, u32 flags,
3797                         u32 mss_and_is_end)
3798 {
3799         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3800         int is_end = (mss_and_is_end & 0x1);
3801         u32 mss = (mss_and_is_end >> 1);
3802         u32 vlan_tag = 0;
3803
3804         if (is_end)
3805                 flags |= TXD_FLAG_END;
3806         if (flags & TXD_FLAG_VLAN) {
3807                 vlan_tag = flags >> 16;
3808                 flags &= 0xffff;
3809         }
3810         vlan_tag |= (mss << TXD_MSS_SHIFT);
3811
3812         txd->addr_hi = ((u64) mapping >> 32);
3813         txd->addr_lo = ((u64) mapping & 0xffffffff);
3814         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3815         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3816 }
3817
3818 /* hard_start_xmit for devices that don't have any bugs and
3819  * support TG3_FLG2_HW_TSO_2 only.
3820  */
3821 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3822 {
3823         struct tg3 *tp = netdev_priv(dev);
3824         dma_addr_t mapping;
3825         u32 len, entry, base_flags, mss;
3826
3827         len = skb_headlen(skb);
3828
3829         /* We are running in BH disabled context with netif_tx_lock
3830          * and TX reclaim runs via tp->poll inside of a software
3831          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3832          * no IRQ context deadlocks to worry about either.  Rejoice!
3833          */
3834         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3835                 if (!netif_queue_stopped(dev)) {
3836                         netif_stop_queue(dev);
3837
3838                         /* This is a hard error, log it. */
3839                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3840                                "queue awake!\n", dev->name);
3841                 }
3842                 return NETDEV_TX_BUSY;
3843         }
3844
3845         entry = tp->tx_prod;
3846         base_flags = 0;
3847 #if TG3_TSO_SUPPORT != 0
3848         mss = 0;
3849         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3850             (mss = skb_shinfo(skb)->gso_size) != 0) {
3851                 int tcp_opt_len, ip_tcp_len;
3852
3853                 if (skb_header_cloned(skb) &&
3854                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3855                         dev_kfree_skb(skb);
3856                         goto out_unlock;
3857                 }
3858
3859                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3860                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3861                 else {
3862                         tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3863                         ip_tcp_len = (skb->nh.iph->ihl * 4) +
3864                                      sizeof(struct tcphdr);
3865
3866                         skb->nh.iph->check = 0;
3867                         skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
3868                                                      tcp_opt_len);
3869                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
3870                 }
3871
3872                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3873                                TXD_FLAG_CPU_POST_DMA);
3874
3875                 skb->h.th->check = 0;
3876
3877         }
3878         else if (skb->ip_summed == CHECKSUM_PARTIAL)
3879                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3880 #else
3881         mss = 0;
3882         if (skb->ip_summed == CHECKSUM_PARTIAL)
3883                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3884 #endif
3885 #if TG3_VLAN_TAG_USED
3886         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3887                 base_flags |= (TXD_FLAG_VLAN |
3888                                (vlan_tx_tag_get(skb) << 16));
3889 #endif
3890
3891         /* Queue skb data, a.k.a. the main skb fragment. */
3892         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3893
3894         tp->tx_buffers[entry].skb = skb;
3895         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3896
3897         tg3_set_txd(tp, entry, mapping, len, base_flags,
3898                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3899
3900         entry = NEXT_TX(entry);
3901
3902         /* Now loop through additional data fragments, and queue them. */
3903         if (skb_shinfo(skb)->nr_frags > 0) {
3904                 unsigned int i, last;
3905
3906                 last = skb_shinfo(skb)->nr_frags - 1;
3907                 for (i = 0; i <= last; i++) {
3908                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3909
3910                         len = frag->size;
3911                         mapping = pci_map_page(tp->pdev,
3912                                                frag->page,
3913                                                frag->page_offset,
3914                                                len, PCI_DMA_TODEVICE);
3915
3916                         tp->tx_buffers[entry].skb = NULL;
3917                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3918
3919                         tg3_set_txd(tp, entry, mapping, len,
3920                                     base_flags, (i == last) | (mss << 1));
3921
3922                         entry = NEXT_TX(entry);
3923                 }
3924         }
3925
3926         /* Packets are ready, update Tx producer idx local and on card. */
3927         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3928
3929         tp->tx_prod = entry;
3930         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
3931                 netif_stop_queue(dev);
3932                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
3933                         netif_wake_queue(tp->dev);
3934         }
3935
3936 out_unlock:
3937         mmiowb();
3938
3939         dev->trans_start = jiffies;
3940
3941         return NETDEV_TX_OK;
3942 }
3943
3944 #if TG3_TSO_SUPPORT != 0
3945 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3946
3947 /* Use GSO to workaround a rare TSO bug that may be triggered when the
3948  * TSO header is greater than 80 bytes.
3949  */
3950 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3951 {
3952         struct sk_buff *segs, *nskb;
3953
3954         /* Estimate the number of fragments in the worst case */
3955         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3956                 netif_stop_queue(tp->dev);
3957                 return NETDEV_TX_BUSY;
3958         }
3959
3960         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3961         if (unlikely(IS_ERR(segs)))
3962                 goto tg3_tso_bug_end;
3963
3964         do {
3965                 nskb = segs;
3966                 segs = segs->next;
3967                 nskb->next = NULL;
3968                 tg3_start_xmit_dma_bug(nskb, tp->dev);
3969         } while (segs);
3970
3971 tg3_tso_bug_end:
3972         dev_kfree_skb(skb);
3973
3974         return NETDEV_TX_OK;
3975 }
3976 #endif
3977
3978 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3979  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3980  */
3981 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3982 {
3983         struct tg3 *tp = netdev_priv(dev);
3984         dma_addr_t mapping;
3985         u32 len, entry, base_flags, mss;
3986         int would_hit_hwbug;
3987
3988         len = skb_headlen(skb);
3989
3990         /* We are running in BH disabled context with netif_tx_lock
3991          * and TX reclaim runs via tp->poll inside of a software
3992          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3993          * no IRQ context deadlocks to worry about either.  Rejoice!
3994          */
3995         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3996                 if (!netif_queue_stopped(dev)) {
3997                         netif_stop_queue(dev);
3998
3999                         /* This is a hard error, log it. */
4000                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4001                                "queue awake!\n", dev->name);
4002                 }
4003                 return NETDEV_TX_BUSY;
4004         }
4005
4006         entry = tp->tx_prod;
4007         base_flags = 0;
4008         if (skb->ip_summed == CHECKSUM_PARTIAL)
4009                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4010 #if TG3_TSO_SUPPORT != 0
4011         mss = 0;
4012         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
4013             (mss = skb_shinfo(skb)->gso_size) != 0) {
4014                 int tcp_opt_len, ip_tcp_len, hdr_len;
4015
4016                 if (skb_header_cloned(skb) &&
4017                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4018                         dev_kfree_skb(skb);
4019                         goto out_unlock;
4020                 }
4021
4022                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4023                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
4024
4025                 hdr_len = ip_tcp_len + tcp_opt_len;
4026                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4027                              (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
4028                         return (tg3_tso_bug(tp, skb));
4029
4030                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4031                                TXD_FLAG_CPU_POST_DMA);
4032
4033                 skb->nh.iph->check = 0;
4034                 skb->nh.iph->tot_len = htons(mss + hdr_len);
4035                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4036                         skb->h.th->check = 0;
4037                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4038                 }
4039                 else {
4040                         skb->h.th->check =
4041                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4042                                                    skb->nh.iph->daddr,
4043                                                    0, IPPROTO_TCP, 0);
4044                 }
4045
4046                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4047                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4048                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4049                                 int tsflags;
4050
4051                                 tsflags = ((skb->nh.iph->ihl - 5) +
4052                                            (tcp_opt_len >> 2));
4053                                 mss |= (tsflags << 11);
4054                         }
4055                 } else {
4056                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4057                                 int tsflags;
4058
4059                                 tsflags = ((skb->nh.iph->ihl - 5) +
4060                                            (tcp_opt_len >> 2));
4061                                 base_flags |= tsflags << 12;
4062                         }
4063                 }
4064         }
4065 #else
4066         mss = 0;
4067 #endif
4068 #if TG3_VLAN_TAG_USED
4069         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4070                 base_flags |= (TXD_FLAG_VLAN |
4071                                (vlan_tx_tag_get(skb) << 16));
4072 #endif
4073
4074         /* Queue skb data, a.k.a. the main skb fragment. */
4075         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4076
4077         tp->tx_buffers[entry].skb = skb;
4078         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4079
4080         would_hit_hwbug = 0;
4081
4082         if (tg3_4g_overflow_test(mapping, len))
4083                 would_hit_hwbug = 1;
4084
4085         tg3_set_txd(tp, entry, mapping, len, base_flags,
4086                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4087
4088         entry = NEXT_TX(entry);
4089
4090         /* Now loop through additional data fragments, and queue them. */
4091         if (skb_shinfo(skb)->nr_frags > 0) {
4092                 unsigned int i, last;
4093
4094                 last = skb_shinfo(skb)->nr_frags - 1;
4095                 for (i = 0; i <= last; i++) {
4096                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4097
4098                         len = frag->size;
4099                         mapping = pci_map_page(tp->pdev,
4100                                                frag->page,
4101                                                frag->page_offset,
4102                                                len, PCI_DMA_TODEVICE);
4103
4104                         tp->tx_buffers[entry].skb = NULL;
4105                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4106
4107                         if (tg3_4g_overflow_test(mapping, len))
4108                                 would_hit_hwbug = 1;
4109
4110                         if (tg3_40bit_overflow_test(tp, mapping, len))
4111                                 would_hit_hwbug = 1;
4112
4113                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4114                                 tg3_set_txd(tp, entry, mapping, len,
4115                                             base_flags, (i == last)|(mss << 1));
4116                         else
4117                                 tg3_set_txd(tp, entry, mapping, len,
4118                                             base_flags, (i == last));
4119
4120                         entry = NEXT_TX(entry);
4121                 }
4122         }
4123
4124         if (would_hit_hwbug) {
4125                 u32 last_plus_one = entry;
4126                 u32 start;
4127
4128                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4129                 start &= (TG3_TX_RING_SIZE - 1);
4130
4131                 /* If the workaround fails due to memory/mapping
4132                  * failure, silently drop this packet.
4133                  */
4134                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4135                                                 &start, base_flags, mss))
4136                         goto out_unlock;
4137
4138                 entry = start;
4139         }
4140
4141         /* Packets are ready, update Tx producer idx local and on card. */
4142         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4143
4144         tp->tx_prod = entry;
4145         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4146                 netif_stop_queue(dev);
4147                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4148                         netif_wake_queue(tp->dev);
4149         }
4150
4151 out_unlock:
4152         mmiowb();
4153
4154         dev->trans_start = jiffies;
4155
4156         return NETDEV_TX_OK;
4157 }
4158
4159 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4160                                int new_mtu)
4161 {
4162         dev->mtu = new_mtu;
4163
4164         if (new_mtu > ETH_DATA_LEN) {
4165                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4166                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4167                         ethtool_op_set_tso(dev, 0);
4168                 }
4169                 else
4170                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4171         } else {
4172                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4173                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4174                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4175         }
4176 }
4177
4178 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4179 {
4180         struct tg3 *tp = netdev_priv(dev);
4181         int err;
4182
4183         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4184                 return -EINVAL;
4185
4186         if (!netif_running(dev)) {
4187                 /* We'll just catch it later when the
4188                  * device is up'd.
4189                  */
4190                 tg3_set_mtu(dev, tp, new_mtu);
4191                 return 0;
4192         }
4193
4194         tg3_netif_stop(tp);
4195
4196         tg3_full_lock(tp, 1);
4197
4198         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4199
4200         tg3_set_mtu(dev, tp, new_mtu);
4201
4202         err = tg3_restart_hw(tp, 0);
4203
4204         if (!err)
4205                 tg3_netif_start(tp);
4206
4207         tg3_full_unlock(tp);
4208
4209         return err;
4210 }
4211
4212 /* Free up pending packets in all rx/tx rings.
4213  *
4214  * The chip has been shut down and the driver detached from
4215  * the networking, so no interrupts or new tx packets will
4216  * end up in the driver.  tp->{tx,}lock is not held and we are not
4217  * in an interrupt context and thus may sleep.
4218  */
4219 static void tg3_free_rings(struct tg3 *tp)
4220 {
4221         struct ring_info *rxp;
4222         int i;
4223
4224         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4225                 rxp = &tp->rx_std_buffers[i];
4226
4227                 if (rxp->skb == NULL)
4228                         continue;
4229                 pci_unmap_single(tp->pdev,
4230                                  pci_unmap_addr(rxp, mapping),
4231                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4232                                  PCI_DMA_FROMDEVICE);
4233                 dev_kfree_skb_any(rxp->skb);
4234                 rxp->skb = NULL;
4235         }
4236
4237         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4238                 rxp = &tp->rx_jumbo_buffers[i];
4239
4240                 if (rxp->skb == NULL)
4241                         continue;
4242                 pci_unmap_single(tp->pdev,
4243                                  pci_unmap_addr(rxp, mapping),
4244                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4245                                  PCI_DMA_FROMDEVICE);
4246                 dev_kfree_skb_any(rxp->skb);
4247                 rxp->skb = NULL;
4248         }
4249
4250         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4251                 struct tx_ring_info *txp;
4252                 struct sk_buff *skb;
4253                 int j;
4254
4255                 txp = &tp->tx_buffers[i];
4256                 skb = txp->skb;
4257
4258                 if (skb == NULL) {
4259                         i++;
4260                         continue;
4261                 }
4262
4263                 pci_unmap_single(tp->pdev,
4264                                  pci_unmap_addr(txp, mapping),
4265                                  skb_headlen(skb),
4266                                  PCI_DMA_TODEVICE);
4267                 txp->skb = NULL;
4268
4269                 i++;
4270
4271                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4272                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4273                         pci_unmap_page(tp->pdev,
4274                                        pci_unmap_addr(txp, mapping),
4275                                        skb_shinfo(skb)->frags[j].size,
4276                                        PCI_DMA_TODEVICE);
4277                         i++;
4278                 }
4279
4280                 dev_kfree_skb_any(skb);
4281         }
4282 }
4283
4284 /* Initialize tx/rx rings for packet processing.
4285  *
4286  * The chip has been shut down and the driver detached from
4287  * the networking, so no interrupts or new tx packets will
4288  * end up in the driver.  tp->{tx,}lock are held and thus
4289  * we may not sleep.
4290  */
4291 static int tg3_init_rings(struct tg3 *tp)
4292 {
4293         u32 i;
4294
4295         /* Free up all the SKBs. */
4296         tg3_free_rings(tp);
4297
4298         /* Zero out all descriptors. */
4299         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4300         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4301         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4302         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4303
4304         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4305         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4306             (tp->dev->mtu > ETH_DATA_LEN))
4307                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4308
4309         /* Initialize invariants of the rings, we only set this
4310          * stuff once.  This works because the card does not
4311          * write into the rx buffer posting rings.
4312          */
4313         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4314                 struct tg3_rx_buffer_desc *rxd;
4315
4316                 rxd = &tp->rx_std[i];
4317                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4318                         << RXD_LEN_SHIFT;
4319                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4320                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4321                                (i << RXD_OPAQUE_INDEX_SHIFT));
4322         }
4323
4324         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4325                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4326                         struct tg3_rx_buffer_desc *rxd;
4327
4328                         rxd = &tp->rx_jumbo[i];
4329                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4330                                 << RXD_LEN_SHIFT;
4331                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4332                                 RXD_FLAG_JUMBO;
4333                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4334                                (i << RXD_OPAQUE_INDEX_SHIFT));
4335                 }
4336         }
4337
4338         /* Now allocate fresh SKBs for each rx ring. */
4339         for (i = 0; i < tp->rx_pending; i++) {
4340                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4341                         printk(KERN_WARNING PFX
4342                                "%s: Using a smaller RX standard ring, "
4343                                "only %d out of %d buffers were allocated "
4344                                "successfully.\n",
4345                                tp->dev->name, i, tp->rx_pending);
4346                         if (i == 0)
4347                                 return -ENOMEM;
4348                         tp->rx_pending = i;
4349                         break;
4350                 }
4351         }
4352
4353         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4354                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4355                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4356                                              -1, i) < 0) {
4357                                 printk(KERN_WARNING PFX
4358                                        "%s: Using a smaller RX jumbo ring, "
4359                                        "only %d out of %d buffers were "
4360                                        "allocated successfully.\n",
4361                                        tp->dev->name, i, tp->rx_jumbo_pending);
4362                                 if (i == 0) {
4363                                         tg3_free_rings(tp);
4364                                         return -ENOMEM;
4365                                 }
4366                                 tp->rx_jumbo_pending = i;
4367                                 break;
4368                         }
4369                 }
4370         }
4371         return 0;
4372 }
4373
4374 /*
4375  * Must not be invoked with interrupt sources disabled and
4376  * the hardware shutdown down.
4377  */
4378 static void tg3_free_consistent(struct tg3 *tp)
4379 {
4380         kfree(tp->rx_std_buffers);
4381         tp->rx_std_buffers = NULL;
4382         if (tp->rx_std) {
4383                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4384                                     tp->rx_std, tp->rx_std_mapping);
4385                 tp->rx_std = NULL;
4386         }
4387         if (tp->rx_jumbo) {
4388                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4389                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4390                 tp->rx_jumbo = NULL;
4391         }
4392         if (tp->rx_rcb) {
4393                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4394                                     tp->rx_rcb, tp->rx_rcb_mapping);
4395                 tp->rx_rcb = NULL;
4396         }
4397         if (tp->tx_ring) {
4398                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4399                         tp->tx_ring, tp->tx_desc_mapping);
4400                 tp->tx_ring = NULL;
4401         }
4402         if (tp->hw_status) {
4403                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4404                                     tp->hw_status, tp->status_mapping);
4405                 tp->hw_status = NULL;
4406         }
4407         if (tp->hw_stats) {
4408                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4409                                     tp->hw_stats, tp->stats_mapping);
4410                 tp->hw_stats = NULL;
4411         }
4412 }
4413
4414 /*
4415  * Must not be invoked with interrupt sources disabled and
4416  * the hardware shutdown down.  Can sleep.
4417  */
4418 static int tg3_alloc_consistent(struct tg3 *tp)
4419 {
4420         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4421                                       (TG3_RX_RING_SIZE +
4422                                        TG3_RX_JUMBO_RING_SIZE)) +
4423                                      (sizeof(struct tx_ring_info) *
4424                                       TG3_TX_RING_SIZE),
4425                                      GFP_KERNEL);
4426         if (!tp->rx_std_buffers)
4427                 return -ENOMEM;
4428
4429         memset(tp->rx_std_buffers, 0,
4430                (sizeof(struct ring_info) *
4431                 (TG3_RX_RING_SIZE +
4432                  TG3_RX_JUMBO_RING_SIZE)) +
4433                (sizeof(struct tx_ring_info) *
4434                 TG3_TX_RING_SIZE));
4435
4436         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4437         tp->tx_buffers = (struct tx_ring_info *)
4438                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4439
4440         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4441                                           &tp->rx_std_mapping);
4442         if (!tp->rx_std)
4443                 goto err_out;
4444
4445         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4446                                             &tp->rx_jumbo_mapping);
4447
4448         if (!tp->rx_jumbo)
4449                 goto err_out;
4450
4451         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4452                                           &tp->rx_rcb_mapping);
4453         if (!tp->rx_rcb)
4454                 goto err_out;
4455
4456         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4457                                            &tp->tx_desc_mapping);
4458         if (!tp->tx_ring)
4459                 goto err_out;
4460
4461         tp->hw_status = pci_alloc_consistent(tp->pdev,
4462                                              TG3_HW_STATUS_SIZE,
4463                                              &tp->status_mapping);
4464         if (!tp->hw_status)
4465                 goto err_out;
4466
4467         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4468                                             sizeof(struct tg3_hw_stats),
4469                                             &tp->stats_mapping);
4470         if (!tp->hw_stats)
4471                 goto err_out;
4472
4473         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4474         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4475
4476         return 0;
4477
4478 err_out:
4479         tg3_free_consistent(tp);
4480         return -ENOMEM;
4481 }
4482
4483 #define MAX_WAIT_CNT 1000
4484
4485 /* To stop a block, clear the enable bit and poll till it
4486  * clears.  tp->lock is held.
4487  */
4488 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4489 {
4490         unsigned int i;
4491         u32 val;
4492
4493         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4494                 switch (ofs) {
4495                 case RCVLSC_MODE:
4496                 case DMAC_MODE:
4497                 case MBFREE_MODE:
4498                 case BUFMGR_MODE:
4499                 case MEMARB_MODE:
4500                         /* We can't enable/disable these bits of the
4501                          * 5705/5750, just say success.
4502                          */
4503                         return 0;
4504
4505                 default:
4506                         break;
4507                 };
4508         }
4509
4510         val = tr32(ofs);
4511         val &= ~enable_bit;
4512         tw32_f(ofs, val);
4513
4514         for (i = 0; i < MAX_WAIT_CNT; i++) {
4515                 udelay(100);
4516                 val = tr32(ofs);
4517                 if ((val & enable_bit) == 0)
4518                         break;
4519         }
4520
4521         if (i == MAX_WAIT_CNT && !silent) {
4522                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4523                        "ofs=%lx enable_bit=%x\n",
4524                        ofs, enable_bit);
4525                 return -ENODEV;
4526         }
4527
4528         return 0;
4529 }
4530
4531 /* tp->lock is held. */
4532 static int tg3_abort_hw(struct tg3 *tp, int silent)
4533 {
4534         int i, err;
4535
4536         tg3_disable_ints(tp);
4537
4538         tp->rx_mode &= ~RX_MODE_ENABLE;
4539         tw32_f(MAC_RX_MODE, tp->rx_mode);
4540         udelay(10);
4541
4542         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4543         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4544         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4545         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4546         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4547         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4548
4549         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4550         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4551         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4552         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4553         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4554         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4555         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4556
4557         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4558         tw32_f(MAC_MODE, tp->mac_mode);
4559         udelay(40);
4560
4561         tp->tx_mode &= ~TX_MODE_ENABLE;
4562         tw32_f(MAC_TX_MODE, tp->tx_mode);
4563
4564         for (i = 0; i < MAX_WAIT_CNT; i++) {
4565                 udelay(100);
4566                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4567                         break;
4568         }
4569         if (i >= MAX_WAIT_CNT) {
4570                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4571                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4572                        tp->dev->name, tr32(MAC_TX_MODE));
4573                 err |= -ENODEV;
4574         }
4575
4576         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4577         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4578         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4579
4580         tw32(FTQ_RESET, 0xffffffff);
4581         tw32(FTQ_RESET, 0x00000000);
4582
4583         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4584         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4585
4586         if (tp->hw_status)
4587                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4588         if (tp->hw_stats)
4589                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4590
4591         return err;
4592 }
4593
4594 /* tp->lock is held. */
4595 static int tg3_nvram_lock(struct tg3 *tp)
4596 {
4597         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4598                 int i;
4599
4600                 if (tp->nvram_lock_cnt == 0) {
4601                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4602                         for (i = 0; i < 8000; i++) {
4603                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4604                                         break;
4605                                 udelay(20);
4606                         }
4607                         if (i == 8000) {
4608                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4609                                 return -ENODEV;
4610                         }
4611                 }
4612                 tp->nvram_lock_cnt++;
4613         }
4614         return 0;
4615 }
4616
4617 /* tp->lock is held. */
4618 static void tg3_nvram_unlock(struct tg3 *tp)
4619 {
4620         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4621                 if (tp->nvram_lock_cnt > 0)
4622                         tp->nvram_lock_cnt--;
4623                 if (tp->nvram_lock_cnt == 0)
4624                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4625         }
4626 }
4627
4628 /* tp->lock is held. */
4629 static void tg3_enable_nvram_access(struct tg3 *tp)
4630 {
4631         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4632             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4633                 u32 nvaccess = tr32(NVRAM_ACCESS);
4634
4635                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4636         }
4637 }
4638
4639 /* tp->lock is held. */
4640 static void tg3_disable_nvram_access(struct tg3 *tp)
4641 {
4642         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4643             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4644                 u32 nvaccess = tr32(NVRAM_ACCESS);
4645
4646                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4647         }
4648 }
4649
4650 /* tp->lock is held. */
4651 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4652 {
4653         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4654                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4655
4656         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4657                 switch (kind) {
4658                 case RESET_KIND_INIT:
4659                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4660                                       DRV_STATE_START);
4661                         break;
4662
4663                 case RESET_KIND_SHUTDOWN:
4664                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4665                                       DRV_STATE_UNLOAD);
4666                         break;
4667
4668                 case RESET_KIND_SUSPEND:
4669                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4670                                       DRV_STATE_SUSPEND);
4671                         break;
4672
4673                 default:
4674                         break;
4675                 };
4676         }
4677 }
4678
4679 /* tp->lock is held. */
4680 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4681 {
4682         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4683                 switch (kind) {
4684                 case RESET_KIND_INIT:
4685                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4686                                       DRV_STATE_START_DONE);
4687                         break;
4688
4689                 case RESET_KIND_SHUTDOWN:
4690                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4691                                       DRV_STATE_UNLOAD_DONE);
4692                         break;
4693
4694                 default:
4695                         break;
4696                 };
4697         }
4698 }
4699
4700 /* tp->lock is held. */
4701 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4702 {
4703         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4704                 switch (kind) {
4705                 case RESET_KIND_INIT:
4706                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4707                                       DRV_STATE_START);
4708                         break;
4709
4710                 case RESET_KIND_SHUTDOWN:
4711                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4712                                       DRV_STATE_UNLOAD);
4713                         break;
4714
4715                 case RESET_KIND_SUSPEND:
4716                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4717                                       DRV_STATE_SUSPEND);
4718                         break;
4719
4720                 default:
4721                         break;
4722                 };
4723         }
4724 }
4725
4726 static int tg3_poll_fw(struct tg3 *tp)
4727 {
4728         int i;
4729         u32 val;
4730
4731         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4732                 /* Wait up to 20ms for init done. */
4733                 for (i = 0; i < 200; i++) {
4734                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
4735                                 return 0;
4736                         udelay(100);
4737                 }
4738                 return -ENODEV;
4739         }
4740
4741         /* Wait for firmware initialization to complete. */
4742         for (i = 0; i < 100000; i++) {
4743                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4744                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4745                         break;
4746                 udelay(10);
4747         }
4748
4749         /* Chip might not be fitted with firmware.  Some Sun onboard
4750          * parts are configured like that.  So don't signal the timeout
4751          * of the above loop as an error, but do report the lack of
4752          * running firmware once.
4753          */
4754         if (i >= 100000 &&
4755             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4756                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4757
4758                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4759                        tp->dev->name);
4760         }
4761
4762         return 0;
4763 }
4764
4765 static void tg3_stop_fw(struct tg3 *);
4766
4767 /* tp->lock is held. */
4768 static int tg3_chip_reset(struct tg3 *tp)
4769 {
4770         u32 val;
4771         void (*write_op)(struct tg3 *, u32, u32);
4772         int err;
4773
4774         tg3_nvram_lock(tp);
4775
4776         /* No matching tg3_nvram_unlock() after this because
4777          * chip reset below will undo the nvram lock.
4778          */
4779         tp->nvram_lock_cnt = 0;
4780
4781         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4782             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4783             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4784                 tw32(GRC_FASTBOOT_PC, 0);
4785
4786         /*
4787          * We must avoid the readl() that normally takes place.
4788          * It locks machines, causes machine checks, and other
4789          * fun things.  So, temporarily disable the 5701
4790          * hardware workaround, while we do the reset.
4791          */
4792         write_op = tp->write32;
4793         if (write_op == tg3_write_flush_reg32)
4794                 tp->write32 = tg3_write32;
4795
4796         /* do the reset */
4797         val = GRC_MISC_CFG_CORECLK_RESET;
4798
4799         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4800                 if (tr32(0x7e2c) == 0x60) {
4801                         tw32(0x7e2c, 0x20);
4802                 }
4803                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4804                         tw32(GRC_MISC_CFG, (1 << 29));
4805                         val |= (1 << 29);
4806                 }
4807         }
4808
4809         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4810                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
4811                 tw32(GRC_VCPU_EXT_CTRL,
4812                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
4813         }
4814
4815         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4816                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4817         tw32(GRC_MISC_CFG, val);
4818
4819         /* restore 5701 hardware bug workaround write method */
4820         tp->write32 = write_op;
4821
4822         /* Unfortunately, we have to delay before the PCI read back.
4823          * Some 575X chips even will not respond to a PCI cfg access
4824          * when the reset command is given to the chip.
4825          *
4826          * How do these hardware designers expect things to work
4827          * properly if the PCI write is posted for a long period
4828          * of time?  It is always necessary to have some method by
4829          * which a register read back can occur to push the write
4830          * out which does the reset.
4831          *
4832          * For most tg3 variants the trick below was working.
4833          * Ho hum...
4834          */
4835         udelay(120);
4836
4837         /* Flush PCI posted writes.  The normal MMIO registers
4838          * are inaccessible at this time so this is the only
4839          * way to make this reliably (actually, this is no longer
4840          * the case, see above).  I tried to use indirect
4841          * register read/write but this upset some 5701 variants.
4842          */
4843         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4844
4845         udelay(120);
4846
4847         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4848                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4849                         int i;
4850                         u32 cfg_val;
4851
4852                         /* Wait for link training to complete.  */
4853                         for (i = 0; i < 5000; i++)
4854                                 udelay(100);
4855
4856                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4857                         pci_write_config_dword(tp->pdev, 0xc4,
4858                                                cfg_val | (1 << 15));
4859                 }
4860                 /* Set PCIE max payload size and clear error status.  */
4861                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4862         }
4863
4864         /* Re-enable indirect register accesses. */
4865         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4866                                tp->misc_host_ctrl);
4867
4868         /* Set MAX PCI retry to zero. */
4869         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4870         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4871             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4872                 val |= PCISTATE_RETRY_SAME_DMA;
4873         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4874
4875         pci_restore_state(tp->pdev);
4876
4877         /* Make sure PCI-X relaxed ordering bit is clear. */
4878         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4879         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4880         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4881
4882         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4883                 u32 val;
4884
4885                 /* Chip reset on 5780 will reset MSI enable bit,
4886                  * so need to restore it.
4887                  */
4888                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4889                         u16 ctrl;
4890
4891                         pci_read_config_word(tp->pdev,
4892                                              tp->msi_cap + PCI_MSI_FLAGS,
4893                                              &ctrl);
4894                         pci_write_config_word(tp->pdev,
4895                                               tp->msi_cap + PCI_MSI_FLAGS,
4896                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4897                         val = tr32(MSGINT_MODE);
4898                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4899                 }
4900
4901                 val = tr32(MEMARB_MODE);
4902                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4903
4904         } else
4905                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4906
4907         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4908                 tg3_stop_fw(tp);
4909                 tw32(0x5000, 0x400);
4910         }
4911
4912         tw32(GRC_MODE, tp->grc_mode);
4913
4914         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4915                 u32 val = tr32(0xc4);
4916
4917                 tw32(0xc4, val | (1 << 15));
4918         }
4919
4920         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4921             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4922                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4923                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4924                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4925                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4926         }
4927
4928         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4929                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4930                 tw32_f(MAC_MODE, tp->mac_mode);
4931         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4932                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4933                 tw32_f(MAC_MODE, tp->mac_mode);
4934         } else
4935                 tw32_f(MAC_MODE, 0);
4936         udelay(40);
4937
4938         err = tg3_poll_fw(tp);
4939         if (err)
4940                 return err;
4941
4942         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4943             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4944                 u32 val = tr32(0x7c00);
4945
4946                 tw32(0x7c00, val | (1 << 25));
4947         }
4948
4949         /* Reprobe ASF enable state.  */
4950         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4951         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4952         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4953         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4954                 u32 nic_cfg;
4955
4956                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4957                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4958                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4959                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4960                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4961                 }
4962         }
4963
4964         return 0;
4965 }
4966
4967 /* tp->lock is held. */
4968 static void tg3_stop_fw(struct tg3 *tp)
4969 {
4970         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4971                 u32 val;
4972                 int i;
4973
4974                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4975                 val = tr32(GRC_RX_CPU_EVENT);
4976                 val |= (1 << 14);
4977                 tw32(GRC_RX_CPU_EVENT, val);
4978
4979                 /* Wait for RX cpu to ACK the event.  */
4980                 for (i = 0; i < 100; i++) {
4981                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4982                                 break;
4983                         udelay(1);
4984                 }
4985         }
4986 }
4987
4988 /* tp->lock is held. */
4989 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4990 {
4991         int err;
4992
4993         tg3_stop_fw(tp);
4994
4995         tg3_write_sig_pre_reset(tp, kind);
4996
4997         tg3_abort_hw(tp, silent);
4998         err = tg3_chip_reset(tp);
4999
5000         tg3_write_sig_legacy(tp, kind);
5001         tg3_write_sig_post_reset(tp, kind);
5002
5003         if (err)
5004                 return err;
5005
5006         return 0;
5007 }
5008
5009 #define TG3_FW_RELEASE_MAJOR    0x0
5010 #define TG3_FW_RELASE_MINOR     0x0
5011 #define TG3_FW_RELEASE_FIX      0x0
5012 #define TG3_FW_START_ADDR       0x08000000
5013 #define TG3_FW_TEXT_ADDR        0x08000000
5014 #define TG3_FW_TEXT_LEN         0x9c0
5015 #define TG3_FW_RODATA_ADDR      0x080009c0
5016 #define TG3_FW_RODATA_LEN       0x60
5017 #define TG3_FW_DATA_ADDR        0x08000a40
5018 #define TG3_FW_DATA_LEN         0x20
5019 #define TG3_FW_SBSS_ADDR        0x08000a60
5020 #define TG3_FW_SBSS_LEN         0xc
5021 #define TG3_FW_BSS_ADDR         0x08000a70
5022 #define TG3_FW_BSS_LEN          0x10
5023
5024 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5025         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5026         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5027         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5028         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5029         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5030         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5031         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5032         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5033         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5034         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5035         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5036         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5037         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5038         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5039         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5040         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5041         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5042         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5043         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5044         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5045         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5046         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5047         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5048         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5049         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5050         0, 0, 0, 0, 0, 0,
5051         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5052         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5053         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5054         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5055         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5056         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5057         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5058         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5059         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5060         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5061         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5062         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5063         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5064         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5065         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5066         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5067         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5068         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5069         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5070         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5071         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5072         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5073         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5074         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5075         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5076         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5077         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5078         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5079         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5080         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5081         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5082         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5083         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5084         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5085         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5086         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5087         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5088         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5089         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5090         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5091         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5092         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5093         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5094         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5095         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5096         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5097         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5098         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5099         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5100         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5101         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5102         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5103         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5104         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5105         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5106         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5107         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5108         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5109         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5110         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5111         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5112         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5113         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5114         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5115         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5116 };
5117
5118 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5119         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5120         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5121         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5122         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5123         0x00000000
5124 };
5125
5126 #if 0 /* All zeros, don't eat up space with it. */
5127 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5128         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5129         0x00000000, 0x00000000, 0x00000000, 0x00000000
5130 };
5131 #endif
5132
5133 #define RX_CPU_SCRATCH_BASE     0x30000
5134 #define RX_CPU_SCRATCH_SIZE     0x04000
5135 #define TX_CPU_SCRATCH_BASE     0x34000
5136 #define TX_CPU_SCRATCH_SIZE     0x04000
5137
5138 /* tp->lock is held. */
5139 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5140 {
5141         int i;
5142
5143         BUG_ON(offset == TX_CPU_BASE &&
5144             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5145
5146         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5147                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5148
5149                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5150                 return 0;
5151         }
5152         if (offset == RX_CPU_BASE) {
5153                 for (i = 0; i < 10000; i++) {
5154                         tw32(offset + CPU_STATE, 0xffffffff);
5155                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5156                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5157                                 break;
5158                 }
5159
5160                 tw32(offset + CPU_STATE, 0xffffffff);
5161                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5162                 udelay(10);
5163         } else {
5164                 for (i = 0; i < 10000; i++) {
5165                         tw32(offset + CPU_STATE, 0xffffffff);
5166                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5167                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5168                                 break;
5169                 }
5170         }
5171
5172         if (i >= 10000) {
5173                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5174                        "and %s CPU\n",
5175                        tp->dev->name,
5176                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5177                 return -ENODEV;
5178         }
5179
5180         /* Clear firmware's nvram arbitration. */
5181         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5182                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5183         return 0;
5184 }
5185
5186 struct fw_info {
5187         unsigned int text_base;
5188         unsigned int text_len;
5189         const u32 *text_data;
5190         unsigned int rodata_base;
5191         unsigned int rodata_len;
5192         const u32 *rodata_data;
5193         unsigned int data_base;
5194         unsigned int data_len;
5195         const u32 *data_data;
5196 };
5197
5198 /* tp->lock is held. */
5199 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5200                                  int cpu_scratch_size, struct fw_info *info)
5201 {
5202         int err, lock_err, i;
5203         void (*write_op)(struct tg3 *, u32, u32);
5204
5205         if (cpu_base == TX_CPU_BASE &&
5206             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5207                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5208                        "TX cpu firmware on %s which is 5705.\n",
5209                        tp->dev->name);
5210                 return -EINVAL;
5211         }
5212
5213         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5214                 write_op = tg3_write_mem;
5215         else
5216                 write_op = tg3_write_indirect_reg32;
5217
5218         /* It is possible that bootcode is still loading at this point.
5219          * Get the nvram lock first before halting the cpu.
5220          */
5221         lock_err = tg3_nvram_lock(tp);
5222         err = tg3_halt_cpu(tp, cpu_base);
5223         if (!lock_err)
5224                 tg3_nvram_unlock(tp);
5225         if (err)
5226                 goto out;
5227
5228         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5229                 write_op(tp, cpu_scratch_base + i, 0);
5230         tw32(cpu_base + CPU_STATE, 0xffffffff);
5231         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5232         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5233                 write_op(tp, (cpu_scratch_base +
5234                               (info->text_base & 0xffff) +
5235                               (i * sizeof(u32))),
5236                          (info->text_data ?
5237                           info->text_data[i] : 0));
5238         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5239                 write_op(tp, (cpu_scratch_base +
5240                               (info->rodata_base & 0xffff) +
5241                               (i * sizeof(u32))),
5242                          (info->rodata_data ?
5243                           info->rodata_data[i] : 0));
5244         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5245                 write_op(tp, (cpu_scratch_base +
5246                               (info->data_base & 0xffff) +
5247                               (i * sizeof(u32))),
5248                          (info->data_data ?
5249                           info->data_data[i] : 0));
5250
5251         err = 0;
5252
5253 out:
5254         return err;
5255 }
5256
5257 /* tp->lock is held. */
5258 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5259 {
5260         struct fw_info info;
5261         int err, i;
5262
5263         info.text_base = TG3_FW_TEXT_ADDR;
5264         info.text_len = TG3_FW_TEXT_LEN;
5265         info.text_data = &tg3FwText[0];
5266         info.rodata_base = TG3_FW_RODATA_ADDR;
5267         info.rodata_len = TG3_FW_RODATA_LEN;
5268         info.rodata_data = &tg3FwRodata[0];
5269         info.data_base = TG3_FW_DATA_ADDR;
5270         info.data_len = TG3_FW_DATA_LEN;
5271         info.data_data = NULL;
5272
5273         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5274                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5275                                     &info);
5276         if (err)
5277                 return err;
5278
5279         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5280                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5281                                     &info);
5282         if (err)
5283                 return err;
5284
5285         /* Now startup only the RX cpu. */
5286         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5287         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5288
5289         for (i = 0; i < 5; i++) {
5290                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5291                         break;
5292                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5293                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5294                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5295                 udelay(1000);
5296         }
5297         if (i >= 5) {
5298                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5299                        "to set RX CPU PC, is %08x should be %08x\n",
5300                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5301                        TG3_FW_TEXT_ADDR);
5302                 return -ENODEV;
5303         }
5304         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5305         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5306
5307         return 0;
5308 }
5309
5310 #if TG3_TSO_SUPPORT != 0
5311
5312 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5313 #define TG3_TSO_FW_RELASE_MINOR         0x6
5314 #define TG3_TSO_FW_RELEASE_FIX          0x0
5315 #define TG3_TSO_FW_START_ADDR           0x08000000
5316 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5317 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5318 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5319 #define TG3_TSO_FW_RODATA_LEN           0x60
5320 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5321 #define TG3_TSO_FW_DATA_LEN             0x30
5322 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5323 #define TG3_TSO_FW_SBSS_LEN             0x2c
5324 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5325 #define TG3_TSO_FW_BSS_LEN              0x894
5326
5327 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5328         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5329         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5330         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5331         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5332         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5333         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5334         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5335         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5336         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5337         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5338         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5339         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5340         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5341         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5342         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5343         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5344         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5345         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5346         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5347         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5348         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5349         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5350         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5351         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5352         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5353         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5354         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5355         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5356         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5357         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5358         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5359         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5360         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5361         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5362         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5363         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5364         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5365         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5366         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5367         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5368         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5369         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5370         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5371         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5372         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5373         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5374         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5375         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5376         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5377         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5378         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5379         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5380         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5381         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5382         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5383         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5384         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5385         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5386         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5387         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5388         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5389         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5390         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5391         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5392         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5393         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5394         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5395         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5396         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5397         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5398         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5399         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5400         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5401         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5402         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5403         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5404         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5405         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5406         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5407         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5408         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5409         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5410         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5411         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5412         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5413         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5414         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5415         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5416         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5417         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5418         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5419         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5420         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5421         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5422         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5423         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5424         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5425         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5426         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5427         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5428         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5429         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5430         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5431         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5432         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5433         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5434         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5435         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5436         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5437         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5438         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5439         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5440         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5441         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5442         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5443         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5444         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5445         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5446         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5447         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5448         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5449         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5450         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5451         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5452         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5453         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5454         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5455         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5456         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5457         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5458         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5459         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5460         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5461         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5462         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5463         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5464         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5465         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5466         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5467         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5468         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5469         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5470         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5471         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5472         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5473         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5474         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5475         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5476         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5477         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5478         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5479         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5480         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5481         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5482         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5483         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5484         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5485         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5486         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5487         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5488         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5489         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5490         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5491         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5492         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5493         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5494         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5495         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5496         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5497         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5498         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5499         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5500         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5501         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5502         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5503         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5504         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5505         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5506         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5507         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5508         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5509         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5510         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5511         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5512         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5513         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5514         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5515         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5516         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5517         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5518         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5519         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5520         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5521         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5522         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5523         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5524         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5525         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5526         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5527         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5528         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5529         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5530         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5531         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5532         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5533         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5534         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5535         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5536         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5537         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5538         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5539         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5540         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5541         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5542         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5543         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5544         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5545         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5546         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5547         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5548         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5549         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5550         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5551         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5552         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5553         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5554         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5555         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5556         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5557         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5558         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5559         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5560         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5561         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5562         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5563         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5564         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5565         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5566         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5567         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5568         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5569         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5570         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5571         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5572         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5573         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5574         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5575         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5576         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5577         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5578         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5579         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5580         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5581         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5582         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5583         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5584         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5585         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5586         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5587         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5588         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5589         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5590         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5591         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5592         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5593         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5594         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5595         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5596         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5597         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5598         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5599         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5600         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5601         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5602         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5603         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5604         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5605         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5606         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5607         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5608         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5609         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5610         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5611         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5612 };
5613
5614 static const u32 tg3TsoFwRodata[] = {
5615         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5616         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5617         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5618         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5619         0x00000000,
5620 };
5621
5622 static const u32 tg3TsoFwData[] = {
5623         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5624         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5625         0x00000000,
5626 };
5627
5628 /* 5705 needs a special version of the TSO firmware.  */
5629 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5630 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5631 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5632 #define TG3_TSO5_FW_START_ADDR          0x00010000
5633 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5634 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5635 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5636 #define TG3_TSO5_FW_RODATA_LEN          0x50
5637 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5638 #define TG3_TSO5_FW_DATA_LEN            0x20
5639 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5640 #define TG3_TSO5_FW_SBSS_LEN            0x28
5641 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5642 #define TG3_TSO5_FW_BSS_LEN             0x88
5643
5644 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5645         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5646         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5647         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5648         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5649         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5650         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5651         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5652         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5653         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5654         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5655         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5656         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5657         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5658         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5659         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5660         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5661         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5662         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5663         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5664         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5665         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5666         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5667         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5668         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5669         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5670         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5671         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5672         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5673         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5674         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5675         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5676         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5677         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5678         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5679         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5680         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5681         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5682         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5683         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5684         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5685         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5686         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5687         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5688         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5689         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5690         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5691         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5692         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5693         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5694         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5695         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5696         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5697         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5698         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5699         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5700         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5701         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5702         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5703         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5704         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5705         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5706         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5707         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5708         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5709         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5710         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5711         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5712         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5713         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5714         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5715         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5716         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5717         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5718         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5719         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5720         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5721         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5722         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5723         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5724         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5725         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5726         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5727         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5728         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5729         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5730         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5731         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5732         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5733         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5734         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5735         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5736         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5737         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5738         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5739         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5740         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5741         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5742         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5743         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5744         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5745         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5746         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5747         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5748         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5749         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5750         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5751         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5752         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5753         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5754         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5755         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5756         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5757         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5758         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5759         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5760         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5761         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5762         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5763         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5764         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5765         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5766         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5767         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5768         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5769         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5770         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5771         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5772         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5773         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5774         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5775         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5776         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5777         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5778         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5779         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5780         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5781         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5782         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5783         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5784         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5785         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5786         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5787         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5788         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5789         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5790         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5791         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5792         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5793         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5794         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5795         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5796         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5797         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5798         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5799         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5800         0x00000000, 0x00000000, 0x00000000,
5801 };
5802
5803 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5804         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5805         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5806         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5807         0x00000000, 0x00000000, 0x00000000,
5808 };
5809
5810 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5811         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5812         0x00000000, 0x00000000, 0x00000000,
5813 };
5814
5815 /* tp->lock is held. */
5816 static int tg3_load_tso_firmware(struct tg3 *tp)
5817 {
5818         struct fw_info info;
5819         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5820         int err, i;
5821
5822         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5823                 return 0;
5824
5825         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5826                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5827                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5828                 info.text_data = &tg3Tso5FwText[0];
5829                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5830                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5831                 info.rodata_data = &tg3Tso5FwRodata[0];
5832                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5833                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5834                 info.data_data = &tg3Tso5FwData[0];
5835                 cpu_base = RX_CPU_BASE;
5836                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5837                 cpu_scratch_size = (info.text_len +
5838                                     info.rodata_len +
5839                                     info.data_len +
5840                                     TG3_TSO5_FW_SBSS_LEN +
5841                                     TG3_TSO5_FW_BSS_LEN);
5842         } else {
5843                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5844                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5845                 info.text_data = &tg3TsoFwText[0];
5846                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5847                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5848                 info.rodata_data = &tg3TsoFwRodata[0];
5849                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5850                 info.data_len = TG3_TSO_FW_DATA_LEN;
5851                 info.data_data = &tg3TsoFwData[0];
5852                 cpu_base = TX_CPU_BASE;
5853                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5854                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5855         }
5856
5857         err = tg3_load_firmware_cpu(tp, cpu_base,
5858                                     cpu_scratch_base, cpu_scratch_size,
5859                                     &info);
5860         if (err)
5861                 return err;
5862
5863         /* Now startup the cpu. */
5864         tw32(cpu_base + CPU_STATE, 0xffffffff);
5865         tw32_f(cpu_base + CPU_PC,    info.text_base);
5866
5867         for (i = 0; i < 5; i++) {
5868                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5869                         break;
5870                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5871                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5872                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5873                 udelay(1000);
5874         }
5875         if (i >= 5) {
5876                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5877                        "to set CPU PC, is %08x should be %08x\n",
5878                        tp->dev->name, tr32(cpu_base + CPU_PC),
5879                        info.text_base);
5880                 return -ENODEV;
5881         }
5882         tw32(cpu_base + CPU_STATE, 0xffffffff);
5883         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5884         return 0;
5885 }
5886
5887 #endif /* TG3_TSO_SUPPORT != 0 */
5888
5889 /* tp->lock is held. */
5890 static void __tg3_set_mac_addr(struct tg3 *tp)
5891 {
5892         u32 addr_high, addr_low;
5893         int i;
5894
5895         addr_high = ((tp->dev->dev_addr[0] << 8) |
5896                      tp->dev->dev_addr[1]);
5897         addr_low = ((tp->dev->dev_addr[2] << 24) |
5898                     (tp->dev->dev_addr[3] << 16) |
5899                     (tp->dev->dev_addr[4] <<  8) |
5900                     (tp->dev->dev_addr[5] <<  0));
5901         for (i = 0; i < 4; i++) {
5902                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5903                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5904         }
5905
5906         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5907             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5908                 for (i = 0; i < 12; i++) {
5909                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5910                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5911                 }
5912         }
5913
5914         addr_high = (tp->dev->dev_addr[0] +
5915                      tp->dev->dev_addr[1] +
5916                      tp->dev->dev_addr[2] +
5917                      tp->dev->dev_addr[3] +
5918                      tp->dev->dev_addr[4] +
5919                      tp->dev->dev_addr[5]) &
5920                 TX_BACKOFF_SEED_MASK;
5921         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5922 }
5923
5924 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5925 {
5926         struct tg3 *tp = netdev_priv(dev);
5927         struct sockaddr *addr = p;
5928         int err = 0;
5929
5930         if (!is_valid_ether_addr(addr->sa_data))
5931                 return -EINVAL;
5932
5933         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5934
5935         if (!netif_running(dev))
5936                 return 0;
5937
5938         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5939                 /* Reset chip so that ASF can re-init any MAC addresses it
5940                  * needs.
5941                  */
5942                 tg3_netif_stop(tp);
5943                 tg3_full_lock(tp, 1);
5944
5945                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5946                 err = tg3_restart_hw(tp, 0);
5947                 if (!err)
5948                         tg3_netif_start(tp);
5949                 tg3_full_unlock(tp);
5950         } else {
5951                 spin_lock_bh(&tp->lock);
5952                 __tg3_set_mac_addr(tp);
5953                 spin_unlock_bh(&tp->lock);
5954         }
5955
5956         return err;
5957 }
5958
5959 /* tp->lock is held. */
5960 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5961                            dma_addr_t mapping, u32 maxlen_flags,
5962                            u32 nic_addr)
5963 {
5964         tg3_write_mem(tp,
5965                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5966                       ((u64) mapping >> 32));
5967         tg3_write_mem(tp,
5968                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5969                       ((u64) mapping & 0xffffffff));
5970         tg3_write_mem(tp,
5971                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5972                        maxlen_flags);
5973
5974         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5975                 tg3_write_mem(tp,
5976                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5977                               nic_addr);
5978 }
5979
5980 static void __tg3_set_rx_mode(struct net_device *);
5981 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5982 {
5983         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5984         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5985         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5986         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5987         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5988                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5989                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5990         }
5991         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5992         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5993         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5994                 u32 val = ec->stats_block_coalesce_usecs;
5995
5996                 if (!netif_carrier_ok(tp->dev))
5997                         val = 0;
5998
5999                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6000         }
6001 }
6002
6003 /* tp->lock is held. */
6004 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6005 {
6006         u32 val, rdmac_mode;
6007         int i, err, limit;
6008
6009         tg3_disable_ints(tp);
6010
6011         tg3_stop_fw(tp);
6012
6013         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6014
6015         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6016                 tg3_abort_hw(tp, 1);
6017         }
6018
6019         if (reset_phy)
6020                 tg3_phy_reset(tp);
6021
6022         err = tg3_chip_reset(tp);
6023         if (err)
6024                 return err;
6025
6026         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6027
6028         /* This works around an issue with Athlon chipsets on
6029          * B3 tigon3 silicon.  This bit has no effect on any
6030          * other revision.  But do not set this on PCI Express
6031          * chips.
6032          */
6033         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6034                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6035         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6036
6037         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6038             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6039                 val = tr32(TG3PCI_PCISTATE);
6040                 val |= PCISTATE_RETRY_SAME_DMA;
6041                 tw32(TG3PCI_PCISTATE, val);
6042         }
6043
6044         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6045                 /* Enable some hw fixes.  */
6046                 val = tr32(TG3PCI_MSI_DATA);
6047                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6048                 tw32(TG3PCI_MSI_DATA, val);
6049         }
6050
6051         /* Descriptor ring init may make accesses to the
6052          * NIC SRAM area to setup the TX descriptors, so we
6053          * can only do this after the hardware has been
6054          * successfully reset.
6055          */
6056         err = tg3_init_rings(tp);
6057         if (err)
6058                 return err;
6059
6060         /* This value is determined during the probe time DMA
6061          * engine test, tg3_test_dma.
6062          */
6063         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6064
6065         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6066                           GRC_MODE_4X_NIC_SEND_RINGS |
6067                           GRC_MODE_NO_TX_PHDR_CSUM |
6068                           GRC_MODE_NO_RX_PHDR_CSUM);
6069         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6070
6071         /* Pseudo-header checksum is done by hardware logic and not
6072          * the offload processers, so make the chip do the pseudo-
6073          * header checksums on receive.  For transmit it is more
6074          * convenient to do the pseudo-header checksum in software
6075          * as Linux does that on transmit for us in all cases.
6076          */
6077         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6078
6079         tw32(GRC_MODE,
6080              tp->grc_mode |
6081              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6082
6083         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6084         val = tr32(GRC_MISC_CFG);
6085         val &= ~0xff;
6086         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6087         tw32(GRC_MISC_CFG, val);
6088
6089         /* Initialize MBUF/DESC pool. */
6090         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6091                 /* Do nothing.  */
6092         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6093                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6094                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6095                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6096                 else
6097                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6098                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6099                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6100         }
6101 #if TG3_TSO_SUPPORT != 0
6102         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6103                 int fw_len;
6104
6105                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6106                           TG3_TSO5_FW_RODATA_LEN +
6107                           TG3_TSO5_FW_DATA_LEN +
6108                           TG3_TSO5_FW_SBSS_LEN +
6109                           TG3_TSO5_FW_BSS_LEN);
6110                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6111                 tw32(BUFMGR_MB_POOL_ADDR,
6112                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6113                 tw32(BUFMGR_MB_POOL_SIZE,
6114                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6115         }
6116 #endif
6117
6118         if (tp->dev->mtu <= ETH_DATA_LEN) {
6119                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6120                      tp->bufmgr_config.mbuf_read_dma_low_water);
6121                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6122                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6123                 tw32(BUFMGR_MB_HIGH_WATER,
6124                      tp->bufmgr_config.mbuf_high_water);
6125         } else {
6126                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6127                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6128                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6129                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6130                 tw32(BUFMGR_MB_HIGH_WATER,
6131                      tp->bufmgr_config.mbuf_high_water_jumbo);
6132         }
6133         tw32(BUFMGR_DMA_LOW_WATER,
6134              tp->bufmgr_config.dma_low_water);
6135         tw32(BUFMGR_DMA_HIGH_WATER,
6136              tp->bufmgr_config.dma_high_water);
6137
6138         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6139         for (i = 0; i < 2000; i++) {
6140                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6141                         break;
6142                 udelay(10);
6143         }
6144         if (i >= 2000) {
6145                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6146                        tp->dev->name);
6147                 return -ENODEV;
6148         }
6149
6150         /* Setup replenish threshold. */
6151         val = tp->rx_pending / 8;
6152         if (val == 0)
6153                 val = 1;
6154         else if (val > tp->rx_std_max_post)
6155                 val = tp->rx_std_max_post;
6156         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6157                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6158                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6159
6160                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6161                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6162         }
6163
6164         tw32(RCVBDI_STD_THRESH, val);
6165
6166         /* Initialize TG3_BDINFO's at:
6167          *  RCVDBDI_STD_BD:     standard eth size rx ring
6168          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6169          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6170          *
6171          * like so:
6172          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6173          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6174          *                              ring attribute flags
6175          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6176          *
6177          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6178          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6179          *
6180          * The size of each ring is fixed in the firmware, but the location is
6181          * configurable.
6182          */
6183         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6184              ((u64) tp->rx_std_mapping >> 32));
6185         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6186              ((u64) tp->rx_std_mapping & 0xffffffff));
6187         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6188              NIC_SRAM_RX_BUFFER_DESC);
6189
6190         /* Don't even try to program the JUMBO/MINI buffer descriptor
6191          * configs on 5705.
6192          */
6193         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6194                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6195                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6196         } else {
6197                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6198                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6199
6200                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6201                      BDINFO_FLAGS_DISABLED);
6202
6203                 /* Setup replenish threshold. */
6204                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6205
6206                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6207                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6208                              ((u64) tp->rx_jumbo_mapping >> 32));
6209                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6210                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6211                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6212                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6213                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6214                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6215                 } else {
6216                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6217                              BDINFO_FLAGS_DISABLED);
6218                 }
6219
6220         }
6221
6222         /* There is only one send ring on 5705/5750, no need to explicitly
6223          * disable the others.
6224          */
6225         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6226                 /* Clear out send RCB ring in SRAM. */
6227                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6228                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6229                                       BDINFO_FLAGS_DISABLED);
6230         }
6231
6232         tp->tx_prod = 0;
6233         tp->tx_cons = 0;
6234         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6235         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6236
6237         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6238                        tp->tx_desc_mapping,
6239                        (TG3_TX_RING_SIZE <<
6240                         BDINFO_FLAGS_MAXLEN_SHIFT),
6241                        NIC_SRAM_TX_BUFFER_DESC);
6242
6243         /* There is only one receive return ring on 5705/5750, no need
6244          * to explicitly disable the others.
6245          */
6246         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6247                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6248                      i += TG3_BDINFO_SIZE) {
6249                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6250                                       BDINFO_FLAGS_DISABLED);
6251                 }
6252         }
6253
6254         tp->rx_rcb_ptr = 0;
6255         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6256
6257         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6258                        tp->rx_rcb_mapping,
6259                        (TG3_RX_RCB_RING_SIZE(tp) <<
6260                         BDINFO_FLAGS_MAXLEN_SHIFT),
6261                        0);
6262
6263         tp->rx_std_ptr = tp->rx_pending;
6264         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6265                      tp->rx_std_ptr);
6266
6267         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6268                                                 tp->rx_jumbo_pending : 0;
6269         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6270                      tp->rx_jumbo_ptr);
6271
6272         /* Initialize MAC address and backoff seed. */
6273         __tg3_set_mac_addr(tp);
6274
6275         /* MTU + ethernet header + FCS + optional VLAN tag */
6276         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6277
6278         /* The slot time is changed by tg3_setup_phy if we
6279          * run at gigabit with half duplex.
6280          */
6281         tw32(MAC_TX_LENGTHS,
6282              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6283              (6 << TX_LENGTHS_IPG_SHIFT) |
6284              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6285
6286         /* Receive rules. */
6287         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6288         tw32(RCVLPC_CONFIG, 0x0181);
6289
6290         /* Calculate RDMAC_MODE setting early, we need it to determine
6291          * the RCVLPC_STATE_ENABLE mask.
6292          */
6293         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6294                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6295                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6296                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6297                       RDMAC_MODE_LNGREAD_ENAB);
6298         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6299                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6300
6301         /* If statement applies to 5705 and 5750 PCI devices only */
6302         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6303              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6304             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6305                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6306                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6307                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6308                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6309                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6310                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6311                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6312                 }
6313         }
6314
6315         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6316                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6317
6318 #if TG3_TSO_SUPPORT != 0
6319         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6320                 rdmac_mode |= (1 << 27);
6321 #endif
6322
6323         /* Receive/send statistics. */
6324         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6325                 val = tr32(RCVLPC_STATS_ENABLE);
6326                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6327                 tw32(RCVLPC_STATS_ENABLE, val);
6328         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6329                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6330                 val = tr32(RCVLPC_STATS_ENABLE);
6331                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6332                 tw32(RCVLPC_STATS_ENABLE, val);
6333         } else {
6334                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6335         }
6336         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6337         tw32(SNDDATAI_STATSENAB, 0xffffff);
6338         tw32(SNDDATAI_STATSCTRL,
6339              (SNDDATAI_SCTRL_ENABLE |
6340               SNDDATAI_SCTRL_FASTUPD));
6341
6342         /* Setup host coalescing engine. */
6343         tw32(HOSTCC_MODE, 0);
6344         for (i = 0; i < 2000; i++) {
6345                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6346                         break;
6347                 udelay(10);
6348         }
6349
6350         __tg3_set_coalesce(tp, &tp->coal);
6351
6352         /* set status block DMA address */
6353         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6354              ((u64) tp->status_mapping >> 32));
6355         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6356              ((u64) tp->status_mapping & 0xffffffff));
6357
6358         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6359                 /* Status/statistics block address.  See tg3_timer,
6360                  * the tg3_periodic_fetch_stats call there, and
6361                  * tg3_get_stats to see how this works for 5705/5750 chips.
6362                  */
6363                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6364                      ((u64) tp->stats_mapping >> 32));
6365                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6366                      ((u64) tp->stats_mapping & 0xffffffff));
6367                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6368                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6369         }
6370
6371         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6372
6373         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6374         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6375         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6376                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6377
6378         /* Clear statistics/status block in chip, and status block in ram. */
6379         for (i = NIC_SRAM_STATS_BLK;
6380              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6381              i += sizeof(u32)) {
6382                 tg3_write_mem(tp, i, 0);
6383                 udelay(40);
6384         }
6385         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6386
6387         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6388                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6389                 /* reset to prevent losing 1st rx packet intermittently */
6390                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6391                 udelay(10);
6392         }
6393
6394         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6395                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6396         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6397         udelay(40);
6398
6399         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6400          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6401          * register to preserve the GPIO settings for LOMs. The GPIOs,
6402          * whether used as inputs or outputs, are set by boot code after
6403          * reset.
6404          */
6405         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6406                 u32 gpio_mask;
6407
6408                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6409                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6410
6411                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6412                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6413                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6414
6415                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6416                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6417
6418                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6419
6420                 /* GPIO1 must be driven high for eeprom write protect */
6421                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6422                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6423         }
6424         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6425         udelay(100);
6426
6427         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6428         tp->last_tag = 0;
6429
6430         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6431                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6432                 udelay(40);
6433         }
6434
6435         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6436                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6437                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6438                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6439                WDMAC_MODE_LNGREAD_ENAB);
6440
6441         /* If statement applies to 5705 and 5750 PCI devices only */
6442         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6443              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6444             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6445                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6446                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6447                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6448                         /* nothing */
6449                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6450                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6451                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6452                         val |= WDMAC_MODE_RX_ACCEL;
6453                 }
6454         }
6455
6456         /* Enable host coalescing bug fix */
6457         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6458             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6459                 val |= (1 << 29);
6460
6461         tw32_f(WDMAC_MODE, val);
6462         udelay(40);
6463
6464         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6465                 val = tr32(TG3PCI_X_CAPS);
6466                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6467                         val &= ~PCIX_CAPS_BURST_MASK;
6468                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6469                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6470                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6471                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6472                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6473                                 val |= (tp->split_mode_max_reqs <<
6474                                         PCIX_CAPS_SPLIT_SHIFT);
6475                 }
6476                 tw32(TG3PCI_X_CAPS, val);
6477         }
6478
6479         tw32_f(RDMAC_MODE, rdmac_mode);
6480         udelay(40);
6481
6482         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6483         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6484                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6485         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6486         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6487         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6488         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6489         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6490 #if TG3_TSO_SUPPORT != 0
6491         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6492                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6493 #endif
6494         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6495         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6496
6497         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6498                 err = tg3_load_5701_a0_firmware_fix(tp);
6499                 if (err)
6500                         return err;
6501         }
6502
6503 #if TG3_TSO_SUPPORT != 0
6504         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6505                 err = tg3_load_tso_firmware(tp);
6506                 if (err)
6507                         return err;
6508         }
6509 #endif
6510
6511         tp->tx_mode = TX_MODE_ENABLE;
6512         tw32_f(MAC_TX_MODE, tp->tx_mode);
6513         udelay(100);
6514
6515         tp->rx_mode = RX_MODE_ENABLE;
6516         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6517                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6518
6519         tw32_f(MAC_RX_MODE, tp->rx_mode);
6520         udelay(10);
6521
6522         if (tp->link_config.phy_is_low_power) {
6523                 tp->link_config.phy_is_low_power = 0;
6524                 tp->link_config.speed = tp->link_config.orig_speed;
6525                 tp->link_config.duplex = tp->link_config.orig_duplex;
6526                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6527         }
6528
6529         tp->mi_mode = MAC_MI_MODE_BASE;
6530         tw32_f(MAC_MI_MODE, tp->mi_mode);
6531         udelay(80);
6532
6533         tw32(MAC_LED_CTRL, tp->led_ctrl);
6534
6535         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6536         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6537                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6538                 udelay(10);
6539         }
6540         tw32_f(MAC_RX_MODE, tp->rx_mode);
6541         udelay(10);
6542
6543         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6544                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6545                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6546                         /* Set drive transmission level to 1.2V  */
6547                         /* only if the signal pre-emphasis bit is not set  */
6548                         val = tr32(MAC_SERDES_CFG);
6549                         val &= 0xfffff000;
6550                         val |= 0x880;
6551                         tw32(MAC_SERDES_CFG, val);
6552                 }
6553                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6554                         tw32(MAC_SERDES_CFG, 0x616000);
6555         }
6556
6557         /* Prevent chip from dropping frames when flow control
6558          * is enabled.
6559          */
6560         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6561
6562         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6563             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6564                 /* Use hardware link auto-negotiation */
6565                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6566         }
6567
6568         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6569             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6570                 u32 tmp;
6571
6572                 tmp = tr32(SERDES_RX_CTRL);
6573                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6574                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6575                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6576                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6577         }
6578
6579         err = tg3_setup_phy(tp, 0);
6580         if (err)
6581                 return err;
6582
6583         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6584             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
6585                 u32 tmp;
6586
6587                 /* Clear CRC stats. */
6588                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6589                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6590                         tg3_readphy(tp, 0x14, &tmp);
6591                 }
6592         }
6593
6594         __tg3_set_rx_mode(tp->dev);
6595
6596         /* Initialize receive rules. */
6597         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6598         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6599         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6600         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6601
6602         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6603             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6604                 limit = 8;
6605         else
6606                 limit = 16;
6607         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6608                 limit -= 4;
6609         switch (limit) {
6610         case 16:
6611                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6612         case 15:
6613                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6614         case 14:
6615                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6616         case 13:
6617                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6618         case 12:
6619                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6620         case 11:
6621                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6622         case 10:
6623                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6624         case 9:
6625                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6626         case 8:
6627                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6628         case 7:
6629                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6630         case 6:
6631                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6632         case 5:
6633                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6634         case 4:
6635                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6636         case 3:
6637                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6638         case 2:
6639         case 1:
6640
6641         default:
6642                 break;
6643         };
6644
6645         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6646
6647         return 0;
6648 }
6649
6650 /* Called at device open time to get the chip ready for
6651  * packet processing.  Invoked with tp->lock held.
6652  */
6653 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6654 {
6655         int err;
6656
6657         /* Force the chip into D0. */
6658         err = tg3_set_power_state(tp, PCI_D0);
6659         if (err)
6660                 goto out;
6661
6662         tg3_switch_clocks(tp);
6663
6664         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6665
6666         err = tg3_reset_hw(tp, reset_phy);
6667
6668 out:
6669         return err;
6670 }
6671
6672 #define TG3_STAT_ADD32(PSTAT, REG) \
6673 do {    u32 __val = tr32(REG); \
6674         (PSTAT)->low += __val; \
6675         if ((PSTAT)->low < __val) \
6676                 (PSTAT)->high += 1; \
6677 } while (0)
6678
6679 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6680 {
6681         struct tg3_hw_stats *sp = tp->hw_stats;
6682
6683         if (!netif_carrier_ok(tp->dev))
6684                 return;
6685
6686         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6687         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6688         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6689         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6690         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6691         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6692         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6693         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6694         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6695         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6696         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6697         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6698         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6699
6700         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6701         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6702         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6703         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6704         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6705         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6706         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6707         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6708         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6709         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6710         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6711         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6712         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6713         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6714
6715         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6716         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6717         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6718 }
6719
6720 static void tg3_timer(unsigned long __opaque)
6721 {
6722         struct tg3 *tp = (struct tg3 *) __opaque;
6723
6724         if (tp->irq_sync)
6725                 goto restart_timer;
6726
6727         spin_lock(&tp->lock);
6728
6729         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6730                 /* All of this garbage is because when using non-tagged
6731                  * IRQ status the mailbox/status_block protocol the chip
6732                  * uses with the cpu is race prone.
6733                  */
6734                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6735                         tw32(GRC_LOCAL_CTRL,
6736                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6737                 } else {
6738                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6739                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6740                 }
6741
6742                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6743                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6744                         spin_unlock(&tp->lock);
6745                         schedule_work(&tp->reset_task);
6746                         return;
6747                 }
6748         }
6749
6750         /* This part only runs once per second. */
6751         if (!--tp->timer_counter) {
6752                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6753                         tg3_periodic_fetch_stats(tp);
6754
6755                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6756                         u32 mac_stat;
6757                         int phy_event;
6758
6759                         mac_stat = tr32(MAC_STATUS);
6760
6761                         phy_event = 0;
6762                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6763                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6764                                         phy_event = 1;
6765                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6766                                 phy_event = 1;
6767
6768                         if (phy_event)
6769                                 tg3_setup_phy(tp, 0);
6770                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6771                         u32 mac_stat = tr32(MAC_STATUS);
6772                         int need_setup = 0;
6773
6774                         if (netif_carrier_ok(tp->dev) &&
6775                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6776                                 need_setup = 1;
6777                         }
6778                         if (! netif_carrier_ok(tp->dev) &&
6779                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6780                                          MAC_STATUS_SIGNAL_DET))) {
6781                                 need_setup = 1;
6782                         }
6783                         if (need_setup) {
6784                                 if (!tp->serdes_counter) {
6785                                         tw32_f(MAC_MODE,
6786                                              (tp->mac_mode &
6787                                               ~MAC_MODE_PORT_MODE_MASK));
6788                                         udelay(40);
6789                                         tw32_f(MAC_MODE, tp->mac_mode);
6790                                         udelay(40);
6791                                 }
6792                                 tg3_setup_phy(tp, 0);
6793                         }
6794                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6795                         tg3_serdes_parallel_detect(tp);
6796
6797                 tp->timer_counter = tp->timer_multiplier;
6798         }
6799
6800         /* Heartbeat is only sent once every 2 seconds.
6801          *
6802          * The heartbeat is to tell the ASF firmware that the host
6803          * driver is still alive.  In the event that the OS crashes,
6804          * ASF needs to reset the hardware to free up the FIFO space
6805          * that may be filled with rx packets destined for the host.
6806          * If the FIFO is full, ASF will no longer function properly.
6807          *
6808          * Unintended resets have been reported on real time kernels
6809          * where the timer doesn't run on time.  Netpoll will also have
6810          * same problem.
6811          *
6812          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
6813          * to check the ring condition when the heartbeat is expiring
6814          * before doing the reset.  This will prevent most unintended
6815          * resets.
6816          */
6817         if (!--tp->asf_counter) {
6818                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6819                         u32 val;
6820
6821                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6822                                       FWCMD_NICDRV_ALIVE3);
6823                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6824                         /* 5 seconds timeout */
6825                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6826                         val = tr32(GRC_RX_CPU_EVENT);
6827                         val |= (1 << 14);
6828                         tw32(GRC_RX_CPU_EVENT, val);
6829                 }
6830                 tp->asf_counter = tp->asf_multiplier;
6831         }
6832
6833         spin_unlock(&tp->lock);
6834
6835 restart_timer:
6836         tp->timer.expires = jiffies + tp->timer_offset;
6837         add_timer(&tp->timer);
6838 }
6839
6840 static int tg3_request_irq(struct tg3 *tp)
6841 {
6842         irq_handler_t fn;
6843         unsigned long flags;
6844         struct net_device *dev = tp->dev;
6845
6846         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6847                 fn = tg3_msi;
6848                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6849                         fn = tg3_msi_1shot;
6850                 flags = IRQF_SAMPLE_RANDOM;
6851         } else {
6852                 fn = tg3_interrupt;
6853                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6854                         fn = tg3_interrupt_tagged;
6855                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
6856         }
6857         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6858 }
6859
6860 static int tg3_test_interrupt(struct tg3 *tp)
6861 {
6862         struct net_device *dev = tp->dev;
6863         int err, i, intr_ok = 0;
6864
6865         if (!netif_running(dev))
6866                 return -ENODEV;
6867
6868         tg3_disable_ints(tp);
6869
6870         free_irq(tp->pdev->irq, dev);
6871
6872         err = request_irq(tp->pdev->irq, tg3_test_isr,
6873                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
6874         if (err)
6875                 return err;
6876
6877         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6878         tg3_enable_ints(tp);
6879
6880         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6881                HOSTCC_MODE_NOW);
6882
6883         for (i = 0; i < 5; i++) {
6884                 u32 int_mbox, misc_host_ctrl;
6885
6886                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6887                                         TG3_64BIT_REG_LOW);
6888                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
6889
6890                 if ((int_mbox != 0) ||
6891                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
6892                         intr_ok = 1;
6893                         break;
6894                 }
6895
6896                 msleep(10);
6897         }
6898
6899         tg3_disable_ints(tp);
6900
6901         free_irq(tp->pdev->irq, dev);
6902
6903         err = tg3_request_irq(tp);
6904
6905         if (err)
6906                 return err;
6907
6908         if (intr_ok)
6909                 return 0;
6910
6911         return -EIO;
6912 }
6913
6914 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6915  * successfully restored
6916  */
6917 static int tg3_test_msi(struct tg3 *tp)
6918 {
6919         struct net_device *dev = tp->dev;
6920         int err;
6921         u16 pci_cmd;
6922
6923         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6924                 return 0;
6925
6926         /* Turn off SERR reporting in case MSI terminates with Master
6927          * Abort.
6928          */
6929         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6930         pci_write_config_word(tp->pdev, PCI_COMMAND,
6931                               pci_cmd & ~PCI_COMMAND_SERR);
6932
6933         err = tg3_test_interrupt(tp);
6934
6935         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6936
6937         if (!err)
6938                 return 0;
6939
6940         /* other failures */
6941         if (err != -EIO)
6942                 return err;
6943
6944         /* MSI test failed, go back to INTx mode */
6945         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6946                "switching to INTx mode. Please report this failure to "
6947                "the PCI maintainer and include system chipset information.\n",
6948                        tp->dev->name);
6949
6950         free_irq(tp->pdev->irq, dev);
6951         pci_disable_msi(tp->pdev);
6952
6953         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6954
6955         err = tg3_request_irq(tp);
6956         if (err)
6957                 return err;
6958
6959         /* Need to reset the chip because the MSI cycle may have terminated
6960          * with Master Abort.
6961          */
6962         tg3_full_lock(tp, 1);
6963
6964         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6965         err = tg3_init_hw(tp, 1);
6966
6967         tg3_full_unlock(tp);
6968
6969         if (err)
6970                 free_irq(tp->pdev->irq, dev);
6971
6972         return err;
6973 }
6974
6975 static int tg3_open(struct net_device *dev)
6976 {
6977         struct tg3 *tp = netdev_priv(dev);
6978         int err;
6979
6980         tg3_full_lock(tp, 0);
6981
6982         err = tg3_set_power_state(tp, PCI_D0);
6983         if (err) {
6984                 tg3_full_unlock(tp);
6985                 return err;
6986         }
6987
6988         tg3_disable_ints(tp);
6989         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6990
6991         tg3_full_unlock(tp);
6992
6993         /* The placement of this call is tied
6994          * to the setup and use of Host TX descriptors.
6995          */
6996         err = tg3_alloc_consistent(tp);
6997         if (err)
6998                 return err;
6999
7000         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
7001             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
7002             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
7003             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
7004               (tp->pdev_peer == tp->pdev))) {
7005                 /* All MSI supporting chips should support tagged
7006                  * status.  Assert that this is the case.
7007                  */
7008                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7009                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7010                                "Not using MSI.\n", tp->dev->name);
7011                 } else if (pci_enable_msi(tp->pdev) == 0) {
7012                         u32 msi_mode;
7013
7014                         msi_mode = tr32(MSGINT_MODE);
7015                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7016                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7017                 }
7018         }
7019         err = tg3_request_irq(tp);
7020
7021         if (err) {
7022                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7023                         pci_disable_msi(tp->pdev);
7024                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7025                 }
7026                 tg3_free_consistent(tp);
7027                 return err;
7028         }
7029
7030         tg3_full_lock(tp, 0);
7031
7032         err = tg3_init_hw(tp, 1);
7033         if (err) {
7034                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7035                 tg3_free_rings(tp);
7036         } else {
7037                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7038                         tp->timer_offset = HZ;
7039                 else
7040                         tp->timer_offset = HZ / 10;
7041
7042                 BUG_ON(tp->timer_offset > HZ);
7043                 tp->timer_counter = tp->timer_multiplier =
7044                         (HZ / tp->timer_offset);
7045                 tp->asf_counter = tp->asf_multiplier =
7046                         ((HZ / tp->timer_offset) * 2);
7047
7048                 init_timer(&tp->timer);
7049                 tp->timer.expires = jiffies + tp->timer_offset;
7050                 tp->timer.data = (unsigned long) tp;
7051                 tp->timer.function = tg3_timer;
7052         }
7053
7054         tg3_full_unlock(tp);
7055
7056         if (err) {
7057                 free_irq(tp->pdev->irq, dev);
7058                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7059                         pci_disable_msi(tp->pdev);
7060                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7061                 }
7062                 tg3_free_consistent(tp);
7063                 return err;
7064         }
7065
7066         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7067                 err = tg3_test_msi(tp);
7068
7069                 if (err) {
7070                         tg3_full_lock(tp, 0);
7071
7072                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7073                                 pci_disable_msi(tp->pdev);
7074                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7075                         }
7076                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7077                         tg3_free_rings(tp);
7078                         tg3_free_consistent(tp);
7079
7080                         tg3_full_unlock(tp);
7081
7082                         return err;
7083                 }
7084
7085                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7086                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7087                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7088
7089                                 tw32(PCIE_TRANSACTION_CFG,
7090                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7091                         }
7092                 }
7093         }
7094
7095         tg3_full_lock(tp, 0);
7096
7097         add_timer(&tp->timer);
7098         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7099         tg3_enable_ints(tp);
7100
7101         tg3_full_unlock(tp);
7102
7103         netif_start_queue(dev);
7104
7105         return 0;
7106 }
7107
7108 #if 0
7109 /*static*/ void tg3_dump_state(struct tg3 *tp)
7110 {
7111         u32 val32, val32_2, val32_3, val32_4, val32_5;
7112         u16 val16;
7113         int i;
7114
7115         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7116         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7117         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7118                val16, val32);
7119
7120         /* MAC block */
7121         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7122                tr32(MAC_MODE), tr32(MAC_STATUS));
7123         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7124                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7125         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7126                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7127         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7128                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7129
7130         /* Send data initiator control block */
7131         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7132                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7133         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7134                tr32(SNDDATAI_STATSCTRL));
7135
7136         /* Send data completion control block */
7137         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7138
7139         /* Send BD ring selector block */
7140         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7141                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7142
7143         /* Send BD initiator control block */
7144         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7145                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7146
7147         /* Send BD completion control block */
7148         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7149
7150         /* Receive list placement control block */
7151         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7152                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7153         printk("       RCVLPC_STATSCTRL[%08x]\n",
7154                tr32(RCVLPC_STATSCTRL));
7155
7156         /* Receive data and receive BD initiator control block */
7157         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7158                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7159
7160         /* Receive data completion control block */
7161         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7162                tr32(RCVDCC_MODE));
7163
7164         /* Receive BD initiator control block */
7165         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7166                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7167
7168         /* Receive BD completion control block */
7169         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7170                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7171
7172         /* Receive list selector control block */
7173         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7174                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7175
7176         /* Mbuf cluster free block */
7177         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7178                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7179
7180         /* Host coalescing control block */
7181         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7182                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7183         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7184                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7185                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7186         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7187                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7188                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7189         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7190                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7191         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7192                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7193
7194         /* Memory arbiter control block */
7195         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7196                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7197
7198         /* Buffer manager control block */
7199         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7200                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7201         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7202                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7203         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7204                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7205                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7206                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7207
7208         /* Read DMA control block */
7209         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7210                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7211
7212         /* Write DMA control block */
7213         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7214                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7215
7216         /* DMA completion block */
7217         printk("DEBUG: DMAC_MODE[%08x]\n",
7218                tr32(DMAC_MODE));
7219
7220         /* GRC block */
7221         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7222                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7223         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7224                tr32(GRC_LOCAL_CTRL));
7225
7226         /* TG3_BDINFOs */
7227         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7228                tr32(RCVDBDI_JUMBO_BD + 0x0),
7229                tr32(RCVDBDI_JUMBO_BD + 0x4),
7230                tr32(RCVDBDI_JUMBO_BD + 0x8),
7231                tr32(RCVDBDI_JUMBO_BD + 0xc));
7232         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7233                tr32(RCVDBDI_STD_BD + 0x0),
7234                tr32(RCVDBDI_STD_BD + 0x4),
7235                tr32(RCVDBDI_STD_BD + 0x8),
7236                tr32(RCVDBDI_STD_BD + 0xc));
7237         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7238                tr32(RCVDBDI_MINI_BD + 0x0),
7239                tr32(RCVDBDI_MINI_BD + 0x4),
7240                tr32(RCVDBDI_MINI_BD + 0x8),
7241                tr32(RCVDBDI_MINI_BD + 0xc));
7242
7243         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7244         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7245         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7246         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7247         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7248                val32, val32_2, val32_3, val32_4);
7249
7250         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7251         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7252         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7253         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7254         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7255                val32, val32_2, val32_3, val32_4);
7256
7257         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7258         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7259         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7260         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7261         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7262         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7263                val32, val32_2, val32_3, val32_4, val32_5);
7264
7265         /* SW status block */
7266         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7267                tp->hw_status->status,
7268                tp->hw_status->status_tag,
7269                tp->hw_status->rx_jumbo_consumer,
7270                tp->hw_status->rx_consumer,
7271                tp->hw_status->rx_mini_consumer,
7272                tp->hw_status->idx[0].rx_producer,
7273                tp->hw_status->idx[0].tx_consumer);
7274
7275         /* SW statistics block */
7276         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7277                ((u32 *)tp->hw_stats)[0],
7278                ((u32 *)tp->hw_stats)[1],
7279                ((u32 *)tp->hw_stats)[2],
7280                ((u32 *)tp->hw_stats)[3]);
7281
7282         /* Mailboxes */
7283         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7284                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7285                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7286                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7287                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7288
7289         /* NIC side send descriptors. */
7290         for (i = 0; i < 6; i++) {
7291                 unsigned long txd;
7292
7293                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7294                         + (i * sizeof(struct tg3_tx_buffer_desc));
7295                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7296                        i,
7297                        readl(txd + 0x0), readl(txd + 0x4),
7298                        readl(txd + 0x8), readl(txd + 0xc));
7299         }
7300
7301         /* NIC side RX descriptors. */
7302         for (i = 0; i < 6; i++) {
7303                 unsigned long rxd;
7304
7305                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7306                         + (i * sizeof(struct tg3_rx_buffer_desc));
7307                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7308                        i,
7309                        readl(rxd + 0x0), readl(rxd + 0x4),
7310                        readl(rxd + 0x8), readl(rxd + 0xc));
7311                 rxd += (4 * sizeof(u32));
7312                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7313                        i,
7314                        readl(rxd + 0x0), readl(rxd + 0x4),
7315                        readl(rxd + 0x8), readl(rxd + 0xc));
7316         }
7317
7318         for (i = 0; i < 6; i++) {
7319                 unsigned long rxd;
7320
7321                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7322                         + (i * sizeof(struct tg3_rx_buffer_desc));
7323                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7324                        i,
7325                        readl(rxd + 0x0), readl(rxd + 0x4),
7326                        readl(rxd + 0x8), readl(rxd + 0xc));
7327                 rxd += (4 * sizeof(u32));
7328                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7329                        i,
7330                        readl(rxd + 0x0), readl(rxd + 0x4),
7331                        readl(rxd + 0x8), readl(rxd + 0xc));
7332         }
7333 }
7334 #endif
7335
7336 static struct net_device_stats *tg3_get_stats(struct net_device *);
7337 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7338
7339 static int tg3_close(struct net_device *dev)
7340 {
7341         struct tg3 *tp = netdev_priv(dev);
7342
7343         /* Calling flush_scheduled_work() may deadlock because
7344          * linkwatch_event() may be on the workqueue and it will try to get
7345          * the rtnl_lock which we are holding.
7346          */
7347         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7348                 msleep(1);
7349
7350         netif_stop_queue(dev);
7351
7352         del_timer_sync(&tp->timer);
7353
7354         tg3_full_lock(tp, 1);
7355 #if 0
7356         tg3_dump_state(tp);
7357 #endif
7358
7359         tg3_disable_ints(tp);
7360
7361         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7362         tg3_free_rings(tp);
7363         tp->tg3_flags &=
7364                 ~(TG3_FLAG_INIT_COMPLETE |
7365                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7366
7367         tg3_full_unlock(tp);
7368
7369         free_irq(tp->pdev->irq, dev);
7370         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7371                 pci_disable_msi(tp->pdev);
7372                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7373         }
7374
7375         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7376                sizeof(tp->net_stats_prev));
7377         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7378                sizeof(tp->estats_prev));
7379
7380         tg3_free_consistent(tp);
7381
7382         tg3_set_power_state(tp, PCI_D3hot);
7383
7384         netif_carrier_off(tp->dev);
7385
7386         return 0;
7387 }
7388
7389 static inline unsigned long get_stat64(tg3_stat64_t *val)
7390 {
7391         unsigned long ret;
7392
7393 #if (BITS_PER_LONG == 32)
7394         ret = val->low;
7395 #else
7396         ret = ((u64)val->high << 32) | ((u64)val->low);
7397 #endif
7398         return ret;
7399 }
7400
7401 static unsigned long calc_crc_errors(struct tg3 *tp)
7402 {
7403         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7404
7405         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7406             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7407              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7408                 u32 val;
7409
7410                 spin_lock_bh(&tp->lock);
7411                 if (!tg3_readphy(tp, 0x1e, &val)) {
7412                         tg3_writephy(tp, 0x1e, val | 0x8000);
7413                         tg3_readphy(tp, 0x14, &val);
7414                 } else
7415                         val = 0;
7416                 spin_unlock_bh(&tp->lock);
7417
7418                 tp->phy_crc_errors += val;
7419
7420                 return tp->phy_crc_errors;
7421         }
7422
7423         return get_stat64(&hw_stats->rx_fcs_errors);
7424 }
7425
7426 #define ESTAT_ADD(member) \
7427         estats->member =        old_estats->member + \
7428                                 get_stat64(&hw_stats->member)
7429
7430 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7431 {
7432         struct tg3_ethtool_stats *estats = &tp->estats;
7433         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7434         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7435
7436         if (!hw_stats)
7437                 return old_estats;
7438
7439         ESTAT_ADD(rx_octets);
7440         ESTAT_ADD(rx_fragments);
7441         ESTAT_ADD(rx_ucast_packets);
7442         ESTAT_ADD(rx_mcast_packets);
7443         ESTAT_ADD(rx_bcast_packets);
7444         ESTAT_ADD(rx_fcs_errors);
7445         ESTAT_ADD(rx_align_errors);
7446         ESTAT_ADD(rx_xon_pause_rcvd);
7447         ESTAT_ADD(rx_xoff_pause_rcvd);
7448         ESTAT_ADD(rx_mac_ctrl_rcvd);
7449         ESTAT_ADD(rx_xoff_entered);
7450         ESTAT_ADD(rx_frame_too_long_errors);
7451         ESTAT_ADD(rx_jabbers);
7452         ESTAT_ADD(rx_undersize_packets);
7453         ESTAT_ADD(rx_in_length_errors);
7454         ESTAT_ADD(rx_out_length_errors);
7455         ESTAT_ADD(rx_64_or_less_octet_packets);
7456         ESTAT_ADD(rx_65_to_127_octet_packets);
7457         ESTAT_ADD(rx_128_to_255_octet_packets);
7458         ESTAT_ADD(rx_256_to_511_octet_packets);
7459         ESTAT_ADD(rx_512_to_1023_octet_packets);
7460         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7461         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7462         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7463         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7464         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7465
7466         ESTAT_ADD(tx_octets);
7467         ESTAT_ADD(tx_collisions);
7468         ESTAT_ADD(tx_xon_sent);
7469         ESTAT_ADD(tx_xoff_sent);
7470         ESTAT_ADD(tx_flow_control);
7471         ESTAT_ADD(tx_mac_errors);
7472         ESTAT_ADD(tx_single_collisions);
7473         ESTAT_ADD(tx_mult_collisions);
7474         ESTAT_ADD(tx_deferred);
7475         ESTAT_ADD(tx_excessive_collisions);
7476         ESTAT_ADD(tx_late_collisions);
7477         ESTAT_ADD(tx_collide_2times);
7478         ESTAT_ADD(tx_collide_3times);
7479         ESTAT_ADD(tx_collide_4times);
7480         ESTAT_ADD(tx_collide_5times);
7481         ESTAT_ADD(tx_collide_6times);
7482         ESTAT_ADD(tx_collide_7times);
7483         ESTAT_ADD(tx_collide_8times);
7484         ESTAT_ADD(tx_collide_9times);
7485         ESTAT_ADD(tx_collide_10times);
7486         ESTAT_ADD(tx_collide_11times);
7487         ESTAT_ADD(tx_collide_12times);
7488         ESTAT_ADD(tx_collide_13times);
7489         ESTAT_ADD(tx_collide_14times);
7490         ESTAT_ADD(tx_collide_15times);
7491         ESTAT_ADD(tx_ucast_packets);
7492         ESTAT_ADD(tx_mcast_packets);
7493         ESTAT_ADD(tx_bcast_packets);
7494         ESTAT_ADD(tx_carrier_sense_errors);
7495         ESTAT_ADD(tx_discards);
7496         ESTAT_ADD(tx_errors);
7497
7498         ESTAT_ADD(dma_writeq_full);
7499         ESTAT_ADD(dma_write_prioq_full);
7500         ESTAT_ADD(rxbds_empty);
7501         ESTAT_ADD(rx_discards);
7502         ESTAT_ADD(rx_errors);
7503         ESTAT_ADD(rx_threshold_hit);
7504
7505         ESTAT_ADD(dma_readq_full);
7506         ESTAT_ADD(dma_read_prioq_full);
7507         ESTAT_ADD(tx_comp_queue_full);
7508
7509         ESTAT_ADD(ring_set_send_prod_index);
7510         ESTAT_ADD(ring_status_update);
7511         ESTAT_ADD(nic_irqs);
7512         ESTAT_ADD(nic_avoided_irqs);
7513         ESTAT_ADD(nic_tx_threshold_hit);
7514
7515         return estats;
7516 }
7517
7518 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7519 {
7520         struct tg3 *tp = netdev_priv(dev);
7521         struct net_device_stats *stats = &tp->net_stats;
7522         struct net_device_stats *old_stats = &tp->net_stats_prev;
7523         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7524
7525         if (!hw_stats)
7526                 return old_stats;
7527
7528         stats->rx_packets = old_stats->rx_packets +
7529                 get_stat64(&hw_stats->rx_ucast_packets) +
7530                 get_stat64(&hw_stats->rx_mcast_packets) +
7531                 get_stat64(&hw_stats->rx_bcast_packets);
7532
7533         stats->tx_packets = old_stats->tx_packets +
7534                 get_stat64(&hw_stats->tx_ucast_packets) +
7535                 get_stat64(&hw_stats->tx_mcast_packets) +
7536                 get_stat64(&hw_stats->tx_bcast_packets);
7537
7538         stats->rx_bytes = old_stats->rx_bytes +
7539                 get_stat64(&hw_stats->rx_octets);
7540         stats->tx_bytes = old_stats->tx_bytes +
7541                 get_stat64(&hw_stats->tx_octets);
7542
7543         stats->rx_errors = old_stats->rx_errors +
7544                 get_stat64(&hw_stats->rx_errors);
7545         stats->tx_errors = old_stats->tx_errors +
7546                 get_stat64(&hw_stats->tx_errors) +
7547                 get_stat64(&hw_stats->tx_mac_errors) +
7548                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7549                 get_stat64(&hw_stats->tx_discards);
7550
7551         stats->multicast = old_stats->multicast +
7552                 get_stat64(&hw_stats->rx_mcast_packets);
7553         stats->collisions = old_stats->collisions +
7554                 get_stat64(&hw_stats->tx_collisions);
7555
7556         stats->rx_length_errors = old_stats->rx_length_errors +
7557                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7558                 get_stat64(&hw_stats->rx_undersize_packets);
7559
7560         stats->rx_over_errors = old_stats->rx_over_errors +
7561                 get_stat64(&hw_stats->rxbds_empty);
7562         stats->rx_frame_errors = old_stats->rx_frame_errors +
7563                 get_stat64(&hw_stats->rx_align_errors);
7564         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7565                 get_stat64(&hw_stats->tx_discards);
7566         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7567                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7568
7569         stats->rx_crc_errors = old_stats->rx_crc_errors +
7570                 calc_crc_errors(tp);
7571
7572         stats->rx_missed_errors = old_stats->rx_missed_errors +
7573                 get_stat64(&hw_stats->rx_discards);
7574
7575         return stats;
7576 }
7577
7578 static inline u32 calc_crc(unsigned char *buf, int len)
7579 {
7580         u32 reg;
7581         u32 tmp;
7582         int j, k;
7583
7584         reg = 0xffffffff;
7585
7586         for (j = 0; j < len; j++) {
7587                 reg ^= buf[j];
7588
7589                 for (k = 0; k < 8; k++) {
7590                         tmp = reg & 0x01;
7591
7592                         reg >>= 1;
7593
7594                         if (tmp) {
7595                                 reg ^= 0xedb88320;
7596                         }
7597                 }
7598         }
7599
7600         return ~reg;
7601 }
7602
7603 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7604 {
7605         /* accept or reject all multicast frames */
7606         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7607         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7608         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7609         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7610 }
7611
7612 static void __tg3_set_rx_mode(struct net_device *dev)
7613 {
7614         struct tg3 *tp = netdev_priv(dev);
7615         u32 rx_mode;
7616
7617         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7618                                   RX_MODE_KEEP_VLAN_TAG);
7619
7620         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7621          * flag clear.
7622          */
7623 #if TG3_VLAN_TAG_USED
7624         if (!tp->vlgrp &&
7625             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7626                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7627 #else
7628         /* By definition, VLAN is disabled always in this
7629          * case.
7630          */
7631         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7632                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7633 #endif
7634
7635         if (dev->flags & IFF_PROMISC) {
7636                 /* Promiscuous mode. */
7637                 rx_mode |= RX_MODE_PROMISC;
7638         } else if (dev->flags & IFF_ALLMULTI) {
7639                 /* Accept all multicast. */
7640                 tg3_set_multi (tp, 1);
7641         } else if (dev->mc_count < 1) {
7642                 /* Reject all multicast. */
7643                 tg3_set_multi (tp, 0);
7644         } else {
7645                 /* Accept one or more multicast(s). */
7646                 struct dev_mc_list *mclist;
7647                 unsigned int i;
7648                 u32 mc_filter[4] = { 0, };
7649                 u32 regidx;
7650                 u32 bit;
7651                 u32 crc;
7652
7653                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7654                      i++, mclist = mclist->next) {
7655
7656                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7657                         bit = ~crc & 0x7f;
7658                         regidx = (bit & 0x60) >> 5;
7659                         bit &= 0x1f;
7660                         mc_filter[regidx] |= (1 << bit);
7661                 }
7662
7663                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7664                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7665                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7666                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7667         }
7668
7669         if (rx_mode != tp->rx_mode) {
7670                 tp->rx_mode = rx_mode;
7671                 tw32_f(MAC_RX_MODE, rx_mode);
7672                 udelay(10);
7673         }
7674 }
7675
7676 static void tg3_set_rx_mode(struct net_device *dev)
7677 {
7678         struct tg3 *tp = netdev_priv(dev);
7679
7680         if (!netif_running(dev))
7681                 return;
7682
7683         tg3_full_lock(tp, 0);
7684         __tg3_set_rx_mode(dev);
7685         tg3_full_unlock(tp);
7686 }
7687
7688 #define TG3_REGDUMP_LEN         (32 * 1024)
7689
7690 static int tg3_get_regs_len(struct net_device *dev)
7691 {
7692         return TG3_REGDUMP_LEN;
7693 }
7694
7695 static void tg3_get_regs(struct net_device *dev,
7696                 struct ethtool_regs *regs, void *_p)
7697 {
7698         u32 *p = _p;
7699         struct tg3 *tp = netdev_priv(dev);
7700         u8 *orig_p = _p;
7701         int i;
7702
7703         regs->version = 0;
7704
7705         memset(p, 0, TG3_REGDUMP_LEN);
7706
7707         if (tp->link_config.phy_is_low_power)
7708                 return;
7709
7710         tg3_full_lock(tp, 0);
7711
7712 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7713 #define GET_REG32_LOOP(base,len)                \
7714 do {    p = (u32 *)(orig_p + (base));           \
7715         for (i = 0; i < len; i += 4)            \
7716                 __GET_REG32((base) + i);        \
7717 } while (0)
7718 #define GET_REG32_1(reg)                        \
7719 do {    p = (u32 *)(orig_p + (reg));            \
7720         __GET_REG32((reg));                     \
7721 } while (0)
7722
7723         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7724         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7725         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7726         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7727         GET_REG32_1(SNDDATAC_MODE);
7728         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7729         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7730         GET_REG32_1(SNDBDC_MODE);
7731         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7732         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7733         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7734         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7735         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7736         GET_REG32_1(RCVDCC_MODE);
7737         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7738         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7739         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7740         GET_REG32_1(MBFREE_MODE);
7741         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7742         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7743         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7744         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7745         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7746         GET_REG32_1(RX_CPU_MODE);
7747         GET_REG32_1(RX_CPU_STATE);
7748         GET_REG32_1(RX_CPU_PGMCTR);
7749         GET_REG32_1(RX_CPU_HWBKPT);
7750         GET_REG32_1(TX_CPU_MODE);
7751         GET_REG32_1(TX_CPU_STATE);
7752         GET_REG32_1(TX_CPU_PGMCTR);
7753         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7754         GET_REG32_LOOP(FTQ_RESET, 0x120);
7755         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7756         GET_REG32_1(DMAC_MODE);
7757         GET_REG32_LOOP(GRC_MODE, 0x4c);
7758         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7759                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7760
7761 #undef __GET_REG32
7762 #undef GET_REG32_LOOP
7763 #undef GET_REG32_1
7764
7765         tg3_full_unlock(tp);
7766 }
7767
7768 static int tg3_get_eeprom_len(struct net_device *dev)
7769 {
7770         struct tg3 *tp = netdev_priv(dev);
7771
7772         return tp->nvram_size;
7773 }
7774
7775 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7776 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7777
7778 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7779 {
7780         struct tg3 *tp = netdev_priv(dev);
7781         int ret;
7782         u8  *pd;
7783         u32 i, offset, len, val, b_offset, b_count;
7784
7785         if (tp->link_config.phy_is_low_power)
7786                 return -EAGAIN;
7787
7788         offset = eeprom->offset;
7789         len = eeprom->len;
7790         eeprom->len = 0;
7791
7792         eeprom->magic = TG3_EEPROM_MAGIC;
7793
7794         if (offset & 3) {
7795                 /* adjustments to start on required 4 byte boundary */
7796                 b_offset = offset & 3;
7797                 b_count = 4 - b_offset;
7798                 if (b_count > len) {
7799                         /* i.e. offset=1 len=2 */
7800                         b_count = len;
7801                 }
7802                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7803                 if (ret)
7804                         return ret;
7805                 val = cpu_to_le32(val);
7806                 memcpy(data, ((char*)&val) + b_offset, b_count);
7807                 len -= b_count;
7808                 offset += b_count;
7809                 eeprom->len += b_count;
7810         }
7811
7812         /* read bytes upto the last 4 byte boundary */
7813         pd = &data[eeprom->len];
7814         for (i = 0; i < (len - (len & 3)); i += 4) {
7815                 ret = tg3_nvram_read(tp, offset + i, &val);
7816                 if (ret) {
7817                         eeprom->len += i;
7818                         return ret;
7819                 }
7820                 val = cpu_to_le32(val);
7821                 memcpy(pd + i, &val, 4);
7822         }
7823         eeprom->len += i;
7824
7825         if (len & 3) {
7826                 /* read last bytes not ending on 4 byte boundary */
7827                 pd = &data[eeprom->len];
7828                 b_count = len & 3;
7829                 b_offset = offset + len - b_count;
7830                 ret = tg3_nvram_read(tp, b_offset, &val);
7831                 if (ret)
7832                         return ret;
7833                 val = cpu_to_le32(val);
7834                 memcpy(pd, ((char*)&val), b_count);
7835                 eeprom->len += b_count;
7836         }
7837         return 0;
7838 }
7839
7840 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7841
7842 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7843 {
7844         struct tg3 *tp = netdev_priv(dev);
7845         int ret;
7846         u32 offset, len, b_offset, odd_len, start, end;
7847         u8 *buf;
7848
7849         if (tp->link_config.phy_is_low_power)
7850                 return -EAGAIN;
7851
7852         if (eeprom->magic != TG3_EEPROM_MAGIC)
7853                 return -EINVAL;
7854
7855         offset = eeprom->offset;
7856         len = eeprom->len;
7857
7858         if ((b_offset = (offset & 3))) {
7859                 /* adjustments to start on required 4 byte boundary */
7860                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7861                 if (ret)
7862                         return ret;
7863                 start = cpu_to_le32(start);
7864                 len += b_offset;
7865                 offset &= ~3;
7866                 if (len < 4)
7867                         len = 4;
7868         }
7869
7870         odd_len = 0;
7871         if (len & 3) {
7872                 /* adjustments to end on required 4 byte boundary */
7873                 odd_len = 1;
7874                 len = (len + 3) & ~3;
7875                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7876                 if (ret)
7877                         return ret;
7878                 end = cpu_to_le32(end);
7879         }
7880
7881         buf = data;
7882         if (b_offset || odd_len) {
7883                 buf = kmalloc(len, GFP_KERNEL);
7884                 if (buf == 0)
7885                         return -ENOMEM;
7886                 if (b_offset)
7887                         memcpy(buf, &start, 4);
7888                 if (odd_len)
7889                         memcpy(buf+len-4, &end, 4);
7890                 memcpy(buf + b_offset, data, eeprom->len);
7891         }
7892
7893         ret = tg3_nvram_write_block(tp, offset, len, buf);
7894
7895         if (buf != data)
7896                 kfree(buf);
7897
7898         return ret;
7899 }
7900
7901 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7902 {
7903         struct tg3 *tp = netdev_priv(dev);
7904
7905         cmd->supported = (SUPPORTED_Autoneg);
7906
7907         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7908                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7909                                    SUPPORTED_1000baseT_Full);
7910
7911         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7912                 cmd->supported |= (SUPPORTED_100baseT_Half |
7913                                   SUPPORTED_100baseT_Full |
7914                                   SUPPORTED_10baseT_Half |
7915                                   SUPPORTED_10baseT_Full |
7916                                   SUPPORTED_MII);
7917                 cmd->port = PORT_TP;
7918         } else {
7919                 cmd->supported |= SUPPORTED_FIBRE;
7920                 cmd->port = PORT_FIBRE;
7921         }
7922
7923         cmd->advertising = tp->link_config.advertising;
7924         if (netif_running(dev)) {
7925                 cmd->speed = tp->link_config.active_speed;
7926                 cmd->duplex = tp->link_config.active_duplex;
7927         }
7928         cmd->phy_address = PHY_ADDR;
7929         cmd->transceiver = 0;
7930         cmd->autoneg = tp->link_config.autoneg;
7931         cmd->maxtxpkt = 0;
7932         cmd->maxrxpkt = 0;
7933         return 0;
7934 }
7935
7936 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7937 {
7938         struct tg3 *tp = netdev_priv(dev);
7939
7940         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
7941                 /* These are the only valid advertisement bits allowed.  */
7942                 if (cmd->autoneg == AUTONEG_ENABLE &&
7943                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7944                                           ADVERTISED_1000baseT_Full |
7945                                           ADVERTISED_Autoneg |
7946                                           ADVERTISED_FIBRE)))
7947                         return -EINVAL;
7948                 /* Fiber can only do SPEED_1000.  */
7949                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7950                          (cmd->speed != SPEED_1000))
7951                         return -EINVAL;
7952         /* Copper cannot force SPEED_1000.  */
7953         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7954                    (cmd->speed == SPEED_1000))
7955                 return -EINVAL;
7956         else if ((cmd->speed == SPEED_1000) &&
7957                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7958                 return -EINVAL;
7959
7960         tg3_full_lock(tp, 0);
7961
7962         tp->link_config.autoneg = cmd->autoneg;
7963         if (cmd->autoneg == AUTONEG_ENABLE) {
7964                 tp->link_config.advertising = cmd->advertising;
7965                 tp->link_config.speed = SPEED_INVALID;
7966                 tp->link_config.duplex = DUPLEX_INVALID;
7967         } else {
7968                 tp->link_config.advertising = 0;
7969                 tp->link_config.speed = cmd->speed;
7970                 tp->link_config.duplex = cmd->duplex;
7971         }
7972
7973         if (netif_running(dev))
7974                 tg3_setup_phy(tp, 1);
7975
7976         tg3_full_unlock(tp);
7977
7978         return 0;
7979 }
7980
7981 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7982 {
7983         struct tg3 *tp = netdev_priv(dev);
7984
7985         strcpy(info->driver, DRV_MODULE_NAME);
7986         strcpy(info->version, DRV_MODULE_VERSION);
7987         strcpy(info->fw_version, tp->fw_ver);
7988         strcpy(info->bus_info, pci_name(tp->pdev));
7989 }
7990
7991 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7992 {
7993         struct tg3 *tp = netdev_priv(dev);
7994
7995         wol->supported = WAKE_MAGIC;
7996         wol->wolopts = 0;
7997         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7998                 wol->wolopts = WAKE_MAGIC;
7999         memset(&wol->sopass, 0, sizeof(wol->sopass));
8000 }
8001
8002 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8003 {
8004         struct tg3 *tp = netdev_priv(dev);
8005
8006         if (wol->wolopts & ~WAKE_MAGIC)
8007                 return -EINVAL;
8008         if ((wol->wolopts & WAKE_MAGIC) &&
8009             tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
8010             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
8011                 return -EINVAL;
8012
8013         spin_lock_bh(&tp->lock);
8014         if (wol->wolopts & WAKE_MAGIC)
8015                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8016         else
8017                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8018         spin_unlock_bh(&tp->lock);
8019
8020         return 0;
8021 }
8022
8023 static u32 tg3_get_msglevel(struct net_device *dev)
8024 {
8025         struct tg3 *tp = netdev_priv(dev);
8026         return tp->msg_enable;
8027 }
8028
8029 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8030 {
8031         struct tg3 *tp = netdev_priv(dev);
8032         tp->msg_enable = value;
8033 }
8034
8035 #if TG3_TSO_SUPPORT != 0
8036 static int tg3_set_tso(struct net_device *dev, u32 value)
8037 {
8038         struct tg3 *tp = netdev_priv(dev);
8039
8040         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8041                 if (value)
8042                         return -EINVAL;
8043                 return 0;
8044         }
8045         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8046             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8047                 if (value)
8048                         dev->features |= NETIF_F_TSO6;
8049                 else
8050                         dev->features &= ~NETIF_F_TSO6;
8051         }
8052         return ethtool_op_set_tso(dev, value);
8053 }
8054 #endif
8055
8056 static int tg3_nway_reset(struct net_device *dev)
8057 {
8058         struct tg3 *tp = netdev_priv(dev);
8059         u32 bmcr;
8060         int r;
8061
8062         if (!netif_running(dev))
8063                 return -EAGAIN;
8064
8065         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8066                 return -EINVAL;
8067
8068         spin_lock_bh(&tp->lock);
8069         r = -EINVAL;
8070         tg3_readphy(tp, MII_BMCR, &bmcr);
8071         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8072             ((bmcr & BMCR_ANENABLE) ||
8073              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8074                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8075                                            BMCR_ANENABLE);
8076                 r = 0;
8077         }
8078         spin_unlock_bh(&tp->lock);
8079
8080         return r;
8081 }
8082
8083 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8084 {
8085         struct tg3 *tp = netdev_priv(dev);
8086
8087         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8088         ering->rx_mini_max_pending = 0;
8089         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8090                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8091         else
8092                 ering->rx_jumbo_max_pending = 0;
8093
8094         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8095
8096         ering->rx_pending = tp->rx_pending;
8097         ering->rx_mini_pending = 0;
8098         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8099                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8100         else
8101                 ering->rx_jumbo_pending = 0;
8102
8103         ering->tx_pending = tp->tx_pending;
8104 }
8105
8106 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8107 {
8108         struct tg3 *tp = netdev_priv(dev);
8109         int irq_sync = 0, err = 0;
8110
8111         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8112             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8113             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8114             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8115             ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG) &&
8116              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8117                 return -EINVAL;
8118
8119         if (netif_running(dev)) {
8120                 tg3_netif_stop(tp);
8121                 irq_sync = 1;
8122         }
8123
8124         tg3_full_lock(tp, irq_sync);
8125
8126         tp->rx_pending = ering->rx_pending;
8127
8128         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8129             tp->rx_pending > 63)
8130                 tp->rx_pending = 63;
8131         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8132         tp->tx_pending = ering->tx_pending;
8133
8134         if (netif_running(dev)) {
8135                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8136                 err = tg3_restart_hw(tp, 1);
8137                 if (!err)
8138                         tg3_netif_start(tp);
8139         }
8140
8141         tg3_full_unlock(tp);
8142
8143         return err;
8144 }
8145
8146 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8147 {
8148         struct tg3 *tp = netdev_priv(dev);
8149
8150         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8151         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8152         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8153 }
8154
8155 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8156 {
8157         struct tg3 *tp = netdev_priv(dev);
8158         int irq_sync = 0, err = 0;
8159
8160         if (netif_running(dev)) {
8161                 tg3_netif_stop(tp);
8162                 irq_sync = 1;
8163         }
8164
8165         tg3_full_lock(tp, irq_sync);
8166
8167         if (epause->autoneg)
8168                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8169         else
8170                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8171         if (epause->rx_pause)
8172                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8173         else
8174                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8175         if (epause->tx_pause)
8176                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8177         else
8178                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8179
8180         if (netif_running(dev)) {
8181                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8182                 err = tg3_restart_hw(tp, 1);
8183                 if (!err)
8184                         tg3_netif_start(tp);
8185         }
8186
8187         tg3_full_unlock(tp);
8188
8189         return err;
8190 }
8191
8192 static u32 tg3_get_rx_csum(struct net_device *dev)
8193 {
8194         struct tg3 *tp = netdev_priv(dev);
8195         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8196 }
8197
8198 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8199 {
8200         struct tg3 *tp = netdev_priv(dev);
8201
8202         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8203                 if (data != 0)
8204                         return -EINVAL;
8205                 return 0;
8206         }
8207
8208         spin_lock_bh(&tp->lock);
8209         if (data)
8210                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8211         else
8212                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8213         spin_unlock_bh(&tp->lock);
8214
8215         return 0;
8216 }
8217
8218 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8219 {
8220         struct tg3 *tp = netdev_priv(dev);
8221
8222         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8223                 if (data != 0)
8224                         return -EINVAL;
8225                 return 0;
8226         }
8227
8228         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8229             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8230                 ethtool_op_set_tx_hw_csum(dev, data);
8231         else
8232                 ethtool_op_set_tx_csum(dev, data);
8233
8234         return 0;
8235 }
8236
8237 static int tg3_get_stats_count (struct net_device *dev)
8238 {
8239         return TG3_NUM_STATS;
8240 }
8241
8242 static int tg3_get_test_count (struct net_device *dev)
8243 {
8244         return TG3_NUM_TEST;
8245 }
8246
8247 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8248 {
8249         switch (stringset) {
8250         case ETH_SS_STATS:
8251                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8252                 break;
8253         case ETH_SS_TEST:
8254                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8255                 break;
8256         default:
8257                 WARN_ON(1);     /* we need a WARN() */
8258                 break;
8259         }
8260 }
8261
8262 static int tg3_phys_id(struct net_device *dev, u32 data)
8263 {
8264         struct tg3 *tp = netdev_priv(dev);
8265         int i;
8266
8267         if (!netif_running(tp->dev))
8268                 return -EAGAIN;
8269
8270         if (data == 0)
8271                 data = 2;
8272
8273         for (i = 0; i < (data * 2); i++) {
8274                 if ((i % 2) == 0)
8275                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8276                                            LED_CTRL_1000MBPS_ON |
8277                                            LED_CTRL_100MBPS_ON |
8278                                            LED_CTRL_10MBPS_ON |
8279                                            LED_CTRL_TRAFFIC_OVERRIDE |
8280                                            LED_CTRL_TRAFFIC_BLINK |
8281                                            LED_CTRL_TRAFFIC_LED);
8282
8283                 else
8284                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8285                                            LED_CTRL_TRAFFIC_OVERRIDE);
8286
8287                 if (msleep_interruptible(500))
8288                         break;
8289         }
8290         tw32(MAC_LED_CTRL, tp->led_ctrl);
8291         return 0;
8292 }
8293
8294 static void tg3_get_ethtool_stats (struct net_device *dev,
8295                                    struct ethtool_stats *estats, u64 *tmp_stats)
8296 {
8297         struct tg3 *tp = netdev_priv(dev);
8298         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8299 }
8300
8301 #define NVRAM_TEST_SIZE 0x100
8302 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8303 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8304 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8305
8306 static int tg3_test_nvram(struct tg3 *tp)
8307 {
8308         u32 *buf, csum, magic;
8309         int i, j, err = 0, size;
8310
8311         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8312                 return -EIO;
8313
8314         if (magic == TG3_EEPROM_MAGIC)
8315                 size = NVRAM_TEST_SIZE;
8316         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8317                 if ((magic & 0xe00000) == 0x200000)
8318                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8319                 else
8320                         return 0;
8321         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8322                 size = NVRAM_SELFBOOT_HW_SIZE;
8323         else
8324                 return -EIO;
8325
8326         buf = kmalloc(size, GFP_KERNEL);
8327         if (buf == NULL)
8328                 return -ENOMEM;
8329
8330         err = -EIO;
8331         for (i = 0, j = 0; i < size; i += 4, j++) {
8332                 u32 val;
8333
8334                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8335                         break;
8336                 buf[j] = cpu_to_le32(val);
8337         }
8338         if (i < size)
8339                 goto out;
8340
8341         /* Selfboot format */
8342         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
8343             TG3_EEPROM_MAGIC_FW) {
8344                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8345
8346                 for (i = 0; i < size; i++)
8347                         csum8 += buf8[i];
8348
8349                 if (csum8 == 0) {
8350                         err = 0;
8351                         goto out;
8352                 }
8353
8354                 err = -EIO;
8355                 goto out;
8356         }
8357
8358         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
8359             TG3_EEPROM_MAGIC_HW) {
8360                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8361                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8362                 u8 *buf8 = (u8 *) buf;
8363                 int j, k;
8364
8365                 /* Separate the parity bits and the data bytes.  */
8366                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8367                         if ((i == 0) || (i == 8)) {
8368                                 int l;
8369                                 u8 msk;
8370
8371                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8372                                         parity[k++] = buf8[i] & msk;
8373                                 i++;
8374                         }
8375                         else if (i == 16) {
8376                                 int l;
8377                                 u8 msk;
8378
8379                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8380                                         parity[k++] = buf8[i] & msk;
8381                                 i++;
8382
8383                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8384                                         parity[k++] = buf8[i] & msk;
8385                                 i++;
8386                         }
8387                         data[j++] = buf8[i];
8388                 }
8389
8390                 err = -EIO;
8391                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8392                         u8 hw8 = hweight8(data[i]);
8393
8394                         if ((hw8 & 0x1) && parity[i])
8395                                 goto out;
8396                         else if (!(hw8 & 0x1) && !parity[i])
8397                                 goto out;
8398                 }
8399                 err = 0;
8400                 goto out;
8401         }
8402
8403         /* Bootstrap checksum at offset 0x10 */
8404         csum = calc_crc((unsigned char *) buf, 0x10);
8405         if(csum != cpu_to_le32(buf[0x10/4]))
8406                 goto out;
8407
8408         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8409         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8410         if (csum != cpu_to_le32(buf[0xfc/4]))
8411                  goto out;
8412
8413         err = 0;
8414
8415 out:
8416         kfree(buf);
8417         return err;
8418 }
8419
8420 #define TG3_SERDES_TIMEOUT_SEC  2
8421 #define TG3_COPPER_TIMEOUT_SEC  6
8422
8423 static int tg3_test_link(struct tg3 *tp)
8424 {
8425         int i, max;
8426
8427         if (!netif_running(tp->dev))
8428                 return -ENODEV;
8429
8430         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8431                 max = TG3_SERDES_TIMEOUT_SEC;
8432         else
8433                 max = TG3_COPPER_TIMEOUT_SEC;
8434
8435         for (i = 0; i < max; i++) {
8436                 if (netif_carrier_ok(tp->dev))
8437                         return 0;
8438
8439                 if (msleep_interruptible(1000))
8440                         break;
8441         }
8442
8443         return -EIO;
8444 }
8445
8446 /* Only test the commonly used registers */
8447 static int tg3_test_registers(struct tg3 *tp)
8448 {
8449         int i, is_5705, is_5750;
8450         u32 offset, read_mask, write_mask, val, save_val, read_val;
8451         static struct {
8452                 u16 offset;
8453                 u16 flags;
8454 #define TG3_FL_5705     0x1
8455 #define TG3_FL_NOT_5705 0x2
8456 #define TG3_FL_NOT_5788 0x4
8457 #define TG3_FL_NOT_5750 0x8
8458                 u32 read_mask;
8459                 u32 write_mask;
8460         } reg_tbl[] = {
8461                 /* MAC Control Registers */
8462                 { MAC_MODE, TG3_FL_NOT_5705,
8463                         0x00000000, 0x00ef6f8c },
8464                 { MAC_MODE, TG3_FL_5705,
8465                         0x00000000, 0x01ef6b8c },
8466                 { MAC_STATUS, TG3_FL_NOT_5705,
8467                         0x03800107, 0x00000000 },
8468                 { MAC_STATUS, TG3_FL_5705,
8469                         0x03800100, 0x00000000 },
8470                 { MAC_ADDR_0_HIGH, 0x0000,
8471                         0x00000000, 0x0000ffff },
8472                 { MAC_ADDR_0_LOW, 0x0000,
8473                         0x00000000, 0xffffffff },
8474                 { MAC_RX_MTU_SIZE, 0x0000,
8475                         0x00000000, 0x0000ffff },
8476                 { MAC_TX_MODE, 0x0000,
8477                         0x00000000, 0x00000070 },
8478                 { MAC_TX_LENGTHS, 0x0000,
8479                         0x00000000, 0x00003fff },
8480                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8481                         0x00000000, 0x000007fc },
8482                 { MAC_RX_MODE, TG3_FL_5705,
8483                         0x00000000, 0x000007dc },
8484                 { MAC_HASH_REG_0, 0x0000,
8485                         0x00000000, 0xffffffff },
8486                 { MAC_HASH_REG_1, 0x0000,
8487                         0x00000000, 0xffffffff },
8488                 { MAC_HASH_REG_2, 0x0000,
8489                         0x00000000, 0xffffffff },
8490                 { MAC_HASH_REG_3, 0x0000,
8491                         0x00000000, 0xffffffff },
8492
8493                 /* Receive Data and Receive BD Initiator Control Registers. */
8494                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8495                         0x00000000, 0xffffffff },
8496                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8497                         0x00000000, 0xffffffff },
8498                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8499                         0x00000000, 0x00000003 },
8500                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8501                         0x00000000, 0xffffffff },
8502                 { RCVDBDI_STD_BD+0, 0x0000,
8503                         0x00000000, 0xffffffff },
8504                 { RCVDBDI_STD_BD+4, 0x0000,
8505                         0x00000000, 0xffffffff },
8506                 { RCVDBDI_STD_BD+8, 0x0000,
8507                         0x00000000, 0xffff0002 },
8508                 { RCVDBDI_STD_BD+0xc, 0x0000,
8509                         0x00000000, 0xffffffff },
8510
8511                 /* Receive BD Initiator Control Registers. */
8512                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8513                         0x00000000, 0xffffffff },
8514                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8515                         0x00000000, 0x000003ff },
8516                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8517                         0x00000000, 0xffffffff },
8518
8519                 /* Host Coalescing Control Registers. */
8520                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8521                         0x00000000, 0x00000004 },
8522                 { HOSTCC_MODE, TG3_FL_5705,
8523                         0x00000000, 0x000000f6 },
8524                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8525                         0x00000000, 0xffffffff },
8526                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8527                         0x00000000, 0x000003ff },
8528                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8529                         0x00000000, 0xffffffff },
8530                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8531                         0x00000000, 0x000003ff },
8532                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8533                         0x00000000, 0xffffffff },
8534                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8535                         0x00000000, 0x000000ff },
8536                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8537                         0x00000000, 0xffffffff },
8538                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8539                         0x00000000, 0x000000ff },
8540                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8541                         0x00000000, 0xffffffff },
8542                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8543                         0x00000000, 0xffffffff },
8544                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8545                         0x00000000, 0xffffffff },
8546                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8547                         0x00000000, 0x000000ff },
8548                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8549                         0x00000000, 0xffffffff },
8550                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8551                         0x00000000, 0x000000ff },
8552                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8553                         0x00000000, 0xffffffff },
8554                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8555                         0x00000000, 0xffffffff },
8556                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8557                         0x00000000, 0xffffffff },
8558                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8559                         0x00000000, 0xffffffff },
8560                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8561                         0x00000000, 0xffffffff },
8562                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8563                         0xffffffff, 0x00000000 },
8564                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8565                         0xffffffff, 0x00000000 },
8566
8567                 /* Buffer Manager Control Registers. */
8568                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
8569                         0x00000000, 0x007fff80 },
8570                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
8571                         0x00000000, 0x007fffff },
8572                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8573                         0x00000000, 0x0000003f },
8574                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8575                         0x00000000, 0x000001ff },
8576                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8577                         0x00000000, 0x000001ff },
8578                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8579                         0xffffffff, 0x00000000 },
8580                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8581                         0xffffffff, 0x00000000 },
8582
8583                 /* Mailbox Registers */
8584                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8585                         0x00000000, 0x000001ff },
8586                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8587                         0x00000000, 0x000001ff },
8588                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8589                         0x00000000, 0x000007ff },
8590                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8591                         0x00000000, 0x000001ff },
8592
8593                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8594         };
8595
8596         is_5705 = is_5750 = 0;
8597         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8598                 is_5705 = 1;
8599                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8600                         is_5750 = 1;
8601         }
8602
8603         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8604                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8605                         continue;
8606
8607                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8608                         continue;
8609
8610                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8611                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8612                         continue;
8613
8614                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
8615                         continue;
8616
8617                 offset = (u32) reg_tbl[i].offset;
8618                 read_mask = reg_tbl[i].read_mask;
8619                 write_mask = reg_tbl[i].write_mask;
8620
8621                 /* Save the original register content */
8622                 save_val = tr32(offset);
8623
8624                 /* Determine the read-only value. */
8625                 read_val = save_val & read_mask;
8626
8627                 /* Write zero to the register, then make sure the read-only bits
8628                  * are not changed and the read/write bits are all zeros.
8629                  */
8630                 tw32(offset, 0);
8631
8632                 val = tr32(offset);
8633
8634                 /* Test the read-only and read/write bits. */
8635                 if (((val & read_mask) != read_val) || (val & write_mask))
8636                         goto out;
8637
8638                 /* Write ones to all the bits defined by RdMask and WrMask, then
8639                  * make sure the read-only bits are not changed and the
8640                  * read/write bits are all ones.
8641                  */
8642                 tw32(offset, read_mask | write_mask);
8643
8644                 val = tr32(offset);
8645
8646                 /* Test the read-only bits. */
8647                 if ((val & read_mask) != read_val)
8648                         goto out;
8649
8650                 /* Test the read/write bits. */
8651                 if ((val & write_mask) != write_mask)
8652                         goto out;
8653
8654                 tw32(offset, save_val);
8655         }
8656
8657         return 0;
8658
8659 out:
8660         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8661         tw32(offset, save_val);
8662         return -EIO;
8663 }
8664
8665 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8666 {
8667         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8668         int i;
8669         u32 j;
8670
8671         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8672                 for (j = 0; j < len; j += 4) {
8673                         u32 val;
8674
8675                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8676                         tg3_read_mem(tp, offset + j, &val);
8677                         if (val != test_pattern[i])
8678                                 return -EIO;
8679                 }
8680         }
8681         return 0;
8682 }
8683
8684 static int tg3_test_memory(struct tg3 *tp)
8685 {
8686         static struct mem_entry {
8687                 u32 offset;
8688                 u32 len;
8689         } mem_tbl_570x[] = {
8690                 { 0x00000000, 0x00b50},
8691                 { 0x00002000, 0x1c000},
8692                 { 0xffffffff, 0x00000}
8693         }, mem_tbl_5705[] = {
8694                 { 0x00000100, 0x0000c},
8695                 { 0x00000200, 0x00008},
8696                 { 0x00004000, 0x00800},
8697                 { 0x00006000, 0x01000},
8698                 { 0x00008000, 0x02000},
8699                 { 0x00010000, 0x0e000},
8700                 { 0xffffffff, 0x00000}
8701         }, mem_tbl_5755[] = {
8702                 { 0x00000200, 0x00008},
8703                 { 0x00004000, 0x00800},
8704                 { 0x00006000, 0x00800},
8705                 { 0x00008000, 0x02000},
8706                 { 0x00010000, 0x0c000},
8707                 { 0xffffffff, 0x00000}
8708         }, mem_tbl_5906[] = {
8709                 { 0x00000200, 0x00008},
8710                 { 0x00004000, 0x00400},
8711                 { 0x00006000, 0x00400},
8712                 { 0x00008000, 0x01000},
8713                 { 0x00010000, 0x01000},
8714                 { 0xffffffff, 0x00000}
8715         };
8716         struct mem_entry *mem_tbl;
8717         int err = 0;
8718         int i;
8719
8720         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8721                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8722                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8723                         mem_tbl = mem_tbl_5755;
8724                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8725                         mem_tbl = mem_tbl_5906;
8726                 else
8727                         mem_tbl = mem_tbl_5705;
8728         } else
8729                 mem_tbl = mem_tbl_570x;
8730
8731         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8732                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8733                     mem_tbl[i].len)) != 0)
8734                         break;
8735         }
8736
8737         return err;
8738 }
8739
8740 #define TG3_MAC_LOOPBACK        0
8741 #define TG3_PHY_LOOPBACK        1
8742
8743 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8744 {
8745         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8746         u32 desc_idx;
8747         struct sk_buff *skb, *rx_skb;
8748         u8 *tx_data;
8749         dma_addr_t map;
8750         int num_pkts, tx_len, rx_len, i, err;
8751         struct tg3_rx_buffer_desc *desc;
8752
8753         if (loopback_mode == TG3_MAC_LOOPBACK) {
8754                 /* HW errata - mac loopback fails in some cases on 5780.
8755                  * Normal traffic and PHY loopback are not affected by
8756                  * errata.
8757                  */
8758                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8759                         return 0;
8760
8761                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8762                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY;
8763                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8764                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8765                 else
8766                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8767                 tw32(MAC_MODE, mac_mode);
8768         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8769                 u32 val;
8770
8771                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8772                         u32 phytest;
8773
8774                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
8775                                 u32 phy;
8776
8777                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
8778                                              phytest | MII_TG3_EPHY_SHADOW_EN);
8779                                 if (!tg3_readphy(tp, 0x1b, &phy))
8780                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
8781                                 if (!tg3_readphy(tp, 0x10, &phy))
8782                                         tg3_writephy(tp, 0x10, phy & ~0x4000);
8783                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
8784                         }
8785                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
8786                 } else
8787                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
8788
8789                 tg3_writephy(tp, MII_BMCR, val);
8790                 udelay(40);
8791
8792                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8793                            MAC_MODE_LINK_POLARITY;
8794                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8795                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
8796                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8797                 } else
8798                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8799
8800                 /* reset to prevent losing 1st rx packet intermittently */
8801                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8802                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8803                         udelay(10);
8804                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8805                 }
8806                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8807                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8808                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8809                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8810                 }
8811                 tw32(MAC_MODE, mac_mode);
8812         }
8813         else
8814                 return -EINVAL;
8815
8816         err = -EIO;
8817
8818         tx_len = 1514;
8819         skb = netdev_alloc_skb(tp->dev, tx_len);
8820         if (!skb)
8821                 return -ENOMEM;
8822
8823         tx_data = skb_put(skb, tx_len);
8824         memcpy(tx_data, tp->dev->dev_addr, 6);
8825         memset(tx_data + 6, 0x0, 8);
8826
8827         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8828
8829         for (i = 14; i < tx_len; i++)
8830                 tx_data[i] = (u8) (i & 0xff);
8831
8832         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8833
8834         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8835              HOSTCC_MODE_NOW);
8836
8837         udelay(10);
8838
8839         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8840
8841         num_pkts = 0;
8842
8843         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8844
8845         tp->tx_prod++;
8846         num_pkts++;
8847
8848         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8849                      tp->tx_prod);
8850         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8851
8852         udelay(10);
8853
8854         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
8855         for (i = 0; i < 25; i++) {
8856                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8857                        HOSTCC_MODE_NOW);
8858
8859                 udelay(10);
8860
8861                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8862                 rx_idx = tp->hw_status->idx[0].rx_producer;
8863                 if ((tx_idx == tp->tx_prod) &&
8864                     (rx_idx == (rx_start_idx + num_pkts)))
8865                         break;
8866         }
8867
8868         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8869         dev_kfree_skb(skb);
8870
8871         if (tx_idx != tp->tx_prod)
8872                 goto out;
8873
8874         if (rx_idx != rx_start_idx + num_pkts)
8875                 goto out;
8876
8877         desc = &tp->rx_rcb[rx_start_idx];
8878         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8879         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8880         if (opaque_key != RXD_OPAQUE_RING_STD)
8881                 goto out;
8882
8883         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8884             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8885                 goto out;
8886
8887         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8888         if (rx_len != tx_len)
8889                 goto out;
8890
8891         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8892
8893         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8894         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8895
8896         for (i = 14; i < tx_len; i++) {
8897                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8898                         goto out;
8899         }
8900         err = 0;
8901
8902         /* tg3_free_rings will unmap and free the rx_skb */
8903 out:
8904         return err;
8905 }
8906
8907 #define TG3_MAC_LOOPBACK_FAILED         1
8908 #define TG3_PHY_LOOPBACK_FAILED         2
8909 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8910                                          TG3_PHY_LOOPBACK_FAILED)
8911
8912 static int tg3_test_loopback(struct tg3 *tp)
8913 {
8914         int err = 0;
8915
8916         if (!netif_running(tp->dev))
8917                 return TG3_LOOPBACK_FAILED;
8918
8919         err = tg3_reset_hw(tp, 1);
8920         if (err)
8921                 return TG3_LOOPBACK_FAILED;
8922
8923         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8924                 err |= TG3_MAC_LOOPBACK_FAILED;
8925         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8926                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8927                         err |= TG3_PHY_LOOPBACK_FAILED;
8928         }
8929
8930         return err;
8931 }
8932
8933 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8934                           u64 *data)
8935 {
8936         struct tg3 *tp = netdev_priv(dev);
8937
8938         if (tp->link_config.phy_is_low_power)
8939                 tg3_set_power_state(tp, PCI_D0);
8940
8941         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8942
8943         if (tg3_test_nvram(tp) != 0) {
8944                 etest->flags |= ETH_TEST_FL_FAILED;
8945                 data[0] = 1;
8946         }
8947         if (tg3_test_link(tp) != 0) {
8948                 etest->flags |= ETH_TEST_FL_FAILED;
8949                 data[1] = 1;
8950         }
8951         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8952                 int err, irq_sync = 0;
8953
8954                 if (netif_running(dev)) {
8955                         tg3_netif_stop(tp);
8956                         irq_sync = 1;
8957                 }
8958
8959                 tg3_full_lock(tp, irq_sync);
8960
8961                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8962                 err = tg3_nvram_lock(tp);
8963                 tg3_halt_cpu(tp, RX_CPU_BASE);
8964                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8965                         tg3_halt_cpu(tp, TX_CPU_BASE);
8966                 if (!err)
8967                         tg3_nvram_unlock(tp);
8968
8969                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8970                         tg3_phy_reset(tp);
8971
8972                 if (tg3_test_registers(tp) != 0) {
8973                         etest->flags |= ETH_TEST_FL_FAILED;
8974                         data[2] = 1;
8975                 }
8976                 if (tg3_test_memory(tp) != 0) {
8977                         etest->flags |= ETH_TEST_FL_FAILED;
8978                         data[3] = 1;
8979                 }
8980                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8981                         etest->flags |= ETH_TEST_FL_FAILED;
8982
8983                 tg3_full_unlock(tp);
8984
8985                 if (tg3_test_interrupt(tp) != 0) {
8986                         etest->flags |= ETH_TEST_FL_FAILED;
8987                         data[5] = 1;
8988                 }
8989
8990                 tg3_full_lock(tp, 0);
8991
8992                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8993                 if (netif_running(dev)) {
8994                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8995                         if (!tg3_restart_hw(tp, 1))
8996                                 tg3_netif_start(tp);
8997                 }
8998
8999                 tg3_full_unlock(tp);
9000         }
9001         if (tp->link_config.phy_is_low_power)
9002                 tg3_set_power_state(tp, PCI_D3hot);
9003
9004 }
9005
9006 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9007 {
9008         struct mii_ioctl_data *data = if_mii(ifr);
9009         struct tg3 *tp = netdev_priv(dev);
9010         int err;
9011
9012         switch(cmd) {
9013         case SIOCGMIIPHY:
9014                 data->phy_id = PHY_ADDR;
9015
9016                 /* fallthru */
9017         case SIOCGMIIREG: {
9018                 u32 mii_regval;
9019
9020                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9021                         break;                  /* We have no PHY */
9022
9023                 if (tp->link_config.phy_is_low_power)
9024                         return -EAGAIN;
9025
9026                 spin_lock_bh(&tp->lock);
9027                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9028                 spin_unlock_bh(&tp->lock);
9029
9030                 data->val_out = mii_regval;
9031
9032                 return err;
9033         }
9034
9035         case SIOCSMIIREG:
9036                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9037                         break;                  /* We have no PHY */
9038
9039                 if (!capable(CAP_NET_ADMIN))
9040                         return -EPERM;
9041
9042                 if (tp->link_config.phy_is_low_power)
9043                         return -EAGAIN;
9044
9045                 spin_lock_bh(&tp->lock);
9046                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9047                 spin_unlock_bh(&tp->lock);
9048
9049                 return err;
9050
9051         default:
9052                 /* do nothing */
9053                 break;
9054         }
9055         return -EOPNOTSUPP;
9056 }
9057
9058 #if TG3_VLAN_TAG_USED
9059 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9060 {
9061         struct tg3 *tp = netdev_priv(dev);
9062
9063         if (netif_running(dev))
9064                 tg3_netif_stop(tp);
9065
9066         tg3_full_lock(tp, 0);
9067
9068         tp->vlgrp = grp;
9069
9070         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9071         __tg3_set_rx_mode(dev);
9072
9073         tg3_full_unlock(tp);
9074
9075         if (netif_running(dev))
9076                 tg3_netif_start(tp);
9077 }
9078
9079 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
9080 {
9081         struct tg3 *tp = netdev_priv(dev);
9082
9083         if (netif_running(dev))
9084                 tg3_netif_stop(tp);
9085
9086         tg3_full_lock(tp, 0);
9087         if (tp->vlgrp)
9088                 tp->vlgrp->vlan_devices[vid] = NULL;
9089         tg3_full_unlock(tp);
9090
9091         if (netif_running(dev))
9092                 tg3_netif_start(tp);
9093 }
9094 #endif
9095
9096 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9097 {
9098         struct tg3 *tp = netdev_priv(dev);
9099
9100         memcpy(ec, &tp->coal, sizeof(*ec));
9101         return 0;
9102 }
9103
9104 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9105 {
9106         struct tg3 *tp = netdev_priv(dev);
9107         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9108         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9109
9110         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9111                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9112                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9113                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9114                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9115         }
9116
9117         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9118             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9119             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9120             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9121             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9122             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9123             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9124             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9125             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9126             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9127                 return -EINVAL;
9128
9129         /* No rx interrupts will be generated if both are zero */
9130         if ((ec->rx_coalesce_usecs == 0) &&
9131             (ec->rx_max_coalesced_frames == 0))
9132                 return -EINVAL;
9133
9134         /* No tx interrupts will be generated if both are zero */
9135         if ((ec->tx_coalesce_usecs == 0) &&
9136             (ec->tx_max_coalesced_frames == 0))
9137                 return -EINVAL;
9138
9139         /* Only copy relevant parameters, ignore all others. */
9140         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9141         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9142         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9143         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9144         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9145         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9146         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9147         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9148         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9149
9150         if (netif_running(dev)) {
9151                 tg3_full_lock(tp, 0);
9152                 __tg3_set_coalesce(tp, &tp->coal);
9153                 tg3_full_unlock(tp);
9154         }
9155         return 0;
9156 }
9157
9158 static const struct ethtool_ops tg3_ethtool_ops = {
9159         .get_settings           = tg3_get_settings,
9160         .set_settings           = tg3_set_settings,
9161         .get_drvinfo            = tg3_get_drvinfo,
9162         .get_regs_len           = tg3_get_regs_len,
9163         .get_regs               = tg3_get_regs,
9164         .get_wol                = tg3_get_wol,
9165         .set_wol                = tg3_set_wol,
9166         .get_msglevel           = tg3_get_msglevel,
9167         .set_msglevel           = tg3_set_msglevel,
9168         .nway_reset             = tg3_nway_reset,
9169         .get_link               = ethtool_op_get_link,
9170         .get_eeprom_len         = tg3_get_eeprom_len,
9171         .get_eeprom             = tg3_get_eeprom,
9172         .set_eeprom             = tg3_set_eeprom,
9173         .get_ringparam          = tg3_get_ringparam,
9174         .set_ringparam          = tg3_set_ringparam,
9175         .get_pauseparam         = tg3_get_pauseparam,
9176         .set_pauseparam         = tg3_set_pauseparam,
9177         .get_rx_csum            = tg3_get_rx_csum,
9178         .set_rx_csum            = tg3_set_rx_csum,
9179         .get_tx_csum            = ethtool_op_get_tx_csum,
9180         .set_tx_csum            = tg3_set_tx_csum,
9181         .get_sg                 = ethtool_op_get_sg,
9182         .set_sg                 = ethtool_op_set_sg,
9183 #if TG3_TSO_SUPPORT != 0
9184         .get_tso                = ethtool_op_get_tso,
9185         .set_tso                = tg3_set_tso,
9186 #endif
9187         .self_test_count        = tg3_get_test_count,
9188         .self_test              = tg3_self_test,
9189         .get_strings            = tg3_get_strings,
9190         .phys_id                = tg3_phys_id,
9191         .get_stats_count        = tg3_get_stats_count,
9192         .get_ethtool_stats      = tg3_get_ethtool_stats,
9193         .get_coalesce           = tg3_get_coalesce,
9194         .set_coalesce           = tg3_set_coalesce,
9195         .get_perm_addr          = ethtool_op_get_perm_addr,
9196 };
9197
9198 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9199 {
9200         u32 cursize, val, magic;
9201
9202         tp->nvram_size = EEPROM_CHIP_SIZE;
9203
9204         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9205                 return;
9206
9207         if ((magic != TG3_EEPROM_MAGIC) &&
9208             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9209             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9210                 return;
9211
9212         /*
9213          * Size the chip by reading offsets at increasing powers of two.
9214          * When we encounter our validation signature, we know the addressing
9215          * has wrapped around, and thus have our chip size.
9216          */
9217         cursize = 0x10;
9218
9219         while (cursize < tp->nvram_size) {
9220                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9221                         return;
9222
9223                 if (val == magic)
9224                         break;
9225
9226                 cursize <<= 1;
9227         }
9228
9229         tp->nvram_size = cursize;
9230 }
9231
9232 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9233 {
9234         u32 val;
9235
9236         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9237                 return;
9238
9239         /* Selfboot format */
9240         if (val != TG3_EEPROM_MAGIC) {
9241                 tg3_get_eeprom_size(tp);
9242                 return;
9243         }
9244
9245         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9246                 if (val != 0) {
9247                         tp->nvram_size = (val >> 16) * 1024;
9248                         return;
9249                 }
9250         }
9251         tp->nvram_size = 0x20000;
9252 }
9253
9254 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9255 {
9256         u32 nvcfg1;
9257
9258         nvcfg1 = tr32(NVRAM_CFG1);
9259         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9260                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9261         }
9262         else {
9263                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9264                 tw32(NVRAM_CFG1, nvcfg1);
9265         }
9266
9267         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9268             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9269                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9270                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9271                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9272                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9273                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9274                                 break;
9275                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9276                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9277                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9278                                 break;
9279                         case FLASH_VENDOR_ATMEL_EEPROM:
9280                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9281                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9282                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9283                                 break;
9284                         case FLASH_VENDOR_ST:
9285                                 tp->nvram_jedecnum = JEDEC_ST;
9286                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9287                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9288                                 break;
9289                         case FLASH_VENDOR_SAIFUN:
9290                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9291                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9292                                 break;
9293                         case FLASH_VENDOR_SST_SMALL:
9294                         case FLASH_VENDOR_SST_LARGE:
9295                                 tp->nvram_jedecnum = JEDEC_SST;
9296                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9297                                 break;
9298                 }
9299         }
9300         else {
9301                 tp->nvram_jedecnum = JEDEC_ATMEL;
9302                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9303                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9304         }
9305 }
9306
9307 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9308 {
9309         u32 nvcfg1;
9310
9311         nvcfg1 = tr32(NVRAM_CFG1);
9312
9313         /* NVRAM protection for TPM */
9314         if (nvcfg1 & (1 << 27))
9315                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9316
9317         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9318                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9319                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9320                         tp->nvram_jedecnum = JEDEC_ATMEL;
9321                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9322                         break;
9323                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9324                         tp->nvram_jedecnum = JEDEC_ATMEL;
9325                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9326                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9327                         break;
9328                 case FLASH_5752VENDOR_ST_M45PE10:
9329                 case FLASH_5752VENDOR_ST_M45PE20:
9330                 case FLASH_5752VENDOR_ST_M45PE40:
9331                         tp->nvram_jedecnum = JEDEC_ST;
9332                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9333                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9334                         break;
9335         }
9336
9337         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9338                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9339                         case FLASH_5752PAGE_SIZE_256:
9340                                 tp->nvram_pagesize = 256;
9341                                 break;
9342                         case FLASH_5752PAGE_SIZE_512:
9343                                 tp->nvram_pagesize = 512;
9344                                 break;
9345                         case FLASH_5752PAGE_SIZE_1K:
9346                                 tp->nvram_pagesize = 1024;
9347                                 break;
9348                         case FLASH_5752PAGE_SIZE_2K:
9349                                 tp->nvram_pagesize = 2048;
9350                                 break;
9351                         case FLASH_5752PAGE_SIZE_4K:
9352                                 tp->nvram_pagesize = 4096;
9353                                 break;
9354                         case FLASH_5752PAGE_SIZE_264:
9355                                 tp->nvram_pagesize = 264;
9356                                 break;
9357                 }
9358         }
9359         else {
9360                 /* For eeprom, set pagesize to maximum eeprom size */
9361                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9362
9363                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9364                 tw32(NVRAM_CFG1, nvcfg1);
9365         }
9366 }
9367
9368 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9369 {
9370         u32 nvcfg1;
9371
9372         nvcfg1 = tr32(NVRAM_CFG1);
9373
9374         /* NVRAM protection for TPM */
9375         if (nvcfg1 & (1 << 27))
9376                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9377
9378         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9379                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9380                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9381                         tp->nvram_jedecnum = JEDEC_ATMEL;
9382                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9383                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9384
9385                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9386                         tw32(NVRAM_CFG1, nvcfg1);
9387                         break;
9388                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9389                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9390                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9391                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9392                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9393                         tp->nvram_jedecnum = JEDEC_ATMEL;
9394                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9395                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9396                         tp->nvram_pagesize = 264;
9397                         break;
9398                 case FLASH_5752VENDOR_ST_M45PE10:
9399                 case FLASH_5752VENDOR_ST_M45PE20:
9400                 case FLASH_5752VENDOR_ST_M45PE40:
9401                         tp->nvram_jedecnum = JEDEC_ST;
9402                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9403                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9404                         tp->nvram_pagesize = 256;
9405                         break;
9406         }
9407 }
9408
9409 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9410 {
9411         u32 nvcfg1;
9412
9413         nvcfg1 = tr32(NVRAM_CFG1);
9414
9415         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9416                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9417                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9418                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9419                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9420                         tp->nvram_jedecnum = JEDEC_ATMEL;
9421                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9422                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9423
9424                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9425                         tw32(NVRAM_CFG1, nvcfg1);
9426                         break;
9427                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9428                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9429                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9430                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9431                         tp->nvram_jedecnum = JEDEC_ATMEL;
9432                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9433                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9434                         tp->nvram_pagesize = 264;
9435                         break;
9436                 case FLASH_5752VENDOR_ST_M45PE10:
9437                 case FLASH_5752VENDOR_ST_M45PE20:
9438                 case FLASH_5752VENDOR_ST_M45PE40:
9439                         tp->nvram_jedecnum = JEDEC_ST;
9440                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9441                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9442                         tp->nvram_pagesize = 256;
9443                         break;
9444         }
9445 }
9446
9447 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9448 {
9449         tp->nvram_jedecnum = JEDEC_ATMEL;
9450         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9451         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9452 }
9453
9454 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9455 static void __devinit tg3_nvram_init(struct tg3 *tp)
9456 {
9457         int j;
9458
9459         tw32_f(GRC_EEPROM_ADDR,
9460              (EEPROM_ADDR_FSM_RESET |
9461               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9462                EEPROM_ADDR_CLKPERD_SHIFT)));
9463
9464         /* XXX schedule_timeout() ... */
9465         for (j = 0; j < 100; j++)
9466                 udelay(10);
9467
9468         /* Enable seeprom accesses. */
9469         tw32_f(GRC_LOCAL_CTRL,
9470              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9471         udelay(100);
9472
9473         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9474             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9475                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9476
9477                 if (tg3_nvram_lock(tp)) {
9478                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9479                                "tg3_nvram_init failed.\n", tp->dev->name);
9480                         return;
9481                 }
9482                 tg3_enable_nvram_access(tp);
9483
9484                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9485                         tg3_get_5752_nvram_info(tp);
9486                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9487                         tg3_get_5755_nvram_info(tp);
9488                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9489                         tg3_get_5787_nvram_info(tp);
9490                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9491                         tg3_get_5906_nvram_info(tp);
9492                 else
9493                         tg3_get_nvram_info(tp);
9494
9495                 tg3_get_nvram_size(tp);
9496
9497                 tg3_disable_nvram_access(tp);
9498                 tg3_nvram_unlock(tp);
9499
9500         } else {
9501                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9502
9503                 tg3_get_eeprom_size(tp);
9504         }
9505 }
9506
9507 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9508                                         u32 offset, u32 *val)
9509 {
9510         u32 tmp;
9511         int i;
9512
9513         if (offset > EEPROM_ADDR_ADDR_MASK ||
9514             (offset % 4) != 0)
9515                 return -EINVAL;
9516
9517         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9518                                         EEPROM_ADDR_DEVID_MASK |
9519                                         EEPROM_ADDR_READ);
9520         tw32(GRC_EEPROM_ADDR,
9521              tmp |
9522              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9523              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9524               EEPROM_ADDR_ADDR_MASK) |
9525              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9526
9527         for (i = 0; i < 10000; i++) {
9528                 tmp = tr32(GRC_EEPROM_ADDR);
9529
9530                 if (tmp & EEPROM_ADDR_COMPLETE)
9531                         break;
9532                 udelay(100);
9533         }
9534         if (!(tmp & EEPROM_ADDR_COMPLETE))
9535                 return -EBUSY;
9536
9537         *val = tr32(GRC_EEPROM_DATA);
9538         return 0;
9539 }
9540
9541 #define NVRAM_CMD_TIMEOUT 10000
9542
9543 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9544 {
9545         int i;
9546
9547         tw32(NVRAM_CMD, nvram_cmd);
9548         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9549                 udelay(10);
9550                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9551                         udelay(10);
9552                         break;
9553                 }
9554         }
9555         if (i == NVRAM_CMD_TIMEOUT) {
9556                 return -EBUSY;
9557         }
9558         return 0;
9559 }
9560
9561 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9562 {
9563         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9564             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9565             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9566             (tp->nvram_jedecnum == JEDEC_ATMEL))
9567
9568                 addr = ((addr / tp->nvram_pagesize) <<
9569                         ATMEL_AT45DB0X1B_PAGE_POS) +
9570                        (addr % tp->nvram_pagesize);
9571
9572         return addr;
9573 }
9574
9575 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9576 {
9577         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9578             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9579             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9580             (tp->nvram_jedecnum == JEDEC_ATMEL))
9581
9582                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9583                         tp->nvram_pagesize) +
9584                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9585
9586         return addr;
9587 }
9588
9589 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9590 {
9591         int ret;
9592
9593         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9594                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9595
9596         offset = tg3_nvram_phys_addr(tp, offset);
9597
9598         if (offset > NVRAM_ADDR_MSK)
9599                 return -EINVAL;
9600
9601         ret = tg3_nvram_lock(tp);
9602         if (ret)
9603                 return ret;
9604
9605         tg3_enable_nvram_access(tp);
9606
9607         tw32(NVRAM_ADDR, offset);
9608         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9609                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9610
9611         if (ret == 0)
9612                 *val = swab32(tr32(NVRAM_RDDATA));
9613
9614         tg3_disable_nvram_access(tp);
9615
9616         tg3_nvram_unlock(tp);
9617
9618         return ret;
9619 }
9620
9621 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9622 {
9623         int err;
9624         u32 tmp;
9625
9626         err = tg3_nvram_read(tp, offset, &tmp);
9627         *val = swab32(tmp);
9628         return err;
9629 }
9630
9631 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9632                                     u32 offset, u32 len, u8 *buf)
9633 {
9634         int i, j, rc = 0;
9635         u32 val;
9636
9637         for (i = 0; i < len; i += 4) {
9638                 u32 addr, data;
9639
9640                 addr = offset + i;
9641
9642                 memcpy(&data, buf + i, 4);
9643
9644                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9645
9646                 val = tr32(GRC_EEPROM_ADDR);
9647                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9648
9649                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9650                         EEPROM_ADDR_READ);
9651                 tw32(GRC_EEPROM_ADDR, val |
9652                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9653                         (addr & EEPROM_ADDR_ADDR_MASK) |
9654                         EEPROM_ADDR_START |
9655                         EEPROM_ADDR_WRITE);
9656
9657                 for (j = 0; j < 10000; j++) {
9658                         val = tr32(GRC_EEPROM_ADDR);
9659
9660                         if (val & EEPROM_ADDR_COMPLETE)
9661                                 break;
9662                         udelay(100);
9663                 }
9664                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9665                         rc = -EBUSY;
9666                         break;
9667                 }
9668         }
9669
9670         return rc;
9671 }
9672
9673 /* offset and length are dword aligned */
9674 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9675                 u8 *buf)
9676 {
9677         int ret = 0;
9678         u32 pagesize = tp->nvram_pagesize;
9679         u32 pagemask = pagesize - 1;
9680         u32 nvram_cmd;
9681         u8 *tmp;
9682
9683         tmp = kmalloc(pagesize, GFP_KERNEL);
9684         if (tmp == NULL)
9685                 return -ENOMEM;
9686
9687         while (len) {
9688                 int j;
9689                 u32 phy_addr, page_off, size;
9690
9691                 phy_addr = offset & ~pagemask;
9692
9693                 for (j = 0; j < pagesize; j += 4) {
9694                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9695                                                 (u32 *) (tmp + j))))
9696                                 break;
9697                 }
9698                 if (ret)
9699                         break;
9700
9701                 page_off = offset & pagemask;
9702                 size = pagesize;
9703                 if (len < size)
9704                         size = len;
9705
9706                 len -= size;
9707
9708                 memcpy(tmp + page_off, buf, size);
9709
9710                 offset = offset + (pagesize - page_off);
9711
9712                 tg3_enable_nvram_access(tp);
9713
9714                 /*
9715                  * Before we can erase the flash page, we need
9716                  * to issue a special "write enable" command.
9717                  */
9718                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9719
9720                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9721                         break;
9722
9723                 /* Erase the target page */
9724                 tw32(NVRAM_ADDR, phy_addr);
9725
9726                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9727                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9728
9729                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9730                         break;
9731
9732                 /* Issue another write enable to start the write. */
9733                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9734
9735                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9736                         break;
9737
9738                 for (j = 0; j < pagesize; j += 4) {
9739                         u32 data;
9740
9741                         data = *((u32 *) (tmp + j));
9742                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9743
9744                         tw32(NVRAM_ADDR, phy_addr + j);
9745
9746                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9747                                 NVRAM_CMD_WR;
9748
9749                         if (j == 0)
9750                                 nvram_cmd |= NVRAM_CMD_FIRST;
9751                         else if (j == (pagesize - 4))
9752                                 nvram_cmd |= NVRAM_CMD_LAST;
9753
9754                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9755                                 break;
9756                 }
9757                 if (ret)
9758                         break;
9759         }
9760
9761         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9762         tg3_nvram_exec_cmd(tp, nvram_cmd);
9763
9764         kfree(tmp);
9765
9766         return ret;
9767 }
9768
9769 /* offset and length are dword aligned */
9770 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9771                 u8 *buf)
9772 {
9773         int i, ret = 0;
9774
9775         for (i = 0; i < len; i += 4, offset += 4) {
9776                 u32 data, page_off, phy_addr, nvram_cmd;
9777
9778                 memcpy(&data, buf + i, 4);
9779                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9780
9781                 page_off = offset % tp->nvram_pagesize;
9782
9783                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9784
9785                 tw32(NVRAM_ADDR, phy_addr);
9786
9787                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9788
9789                 if ((page_off == 0) || (i == 0))
9790                         nvram_cmd |= NVRAM_CMD_FIRST;
9791                 if (page_off == (tp->nvram_pagesize - 4))
9792                         nvram_cmd |= NVRAM_CMD_LAST;
9793
9794                 if (i == (len - 4))
9795                         nvram_cmd |= NVRAM_CMD_LAST;
9796
9797                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9798                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9799                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9800                     (tp->nvram_jedecnum == JEDEC_ST) &&
9801                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9802
9803                         if ((ret = tg3_nvram_exec_cmd(tp,
9804                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9805                                 NVRAM_CMD_DONE)))
9806
9807                                 break;
9808                 }
9809                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9810                         /* We always do complete word writes to eeprom. */
9811                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9812                 }
9813
9814                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9815                         break;
9816         }
9817         return ret;
9818 }
9819
9820 /* offset and length are dword aligned */
9821 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9822 {
9823         int ret;
9824
9825         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9826                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9827                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9828                 udelay(40);
9829         }
9830
9831         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9832                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9833         }
9834         else {
9835                 u32 grc_mode;
9836
9837                 ret = tg3_nvram_lock(tp);
9838                 if (ret)
9839                         return ret;
9840
9841                 tg3_enable_nvram_access(tp);
9842                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9843                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9844                         tw32(NVRAM_WRITE1, 0x406);
9845
9846                 grc_mode = tr32(GRC_MODE);
9847                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9848
9849                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9850                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9851
9852                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9853                                 buf);
9854                 }
9855                 else {
9856                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9857                                 buf);
9858                 }
9859
9860                 grc_mode = tr32(GRC_MODE);
9861                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9862
9863                 tg3_disable_nvram_access(tp);
9864                 tg3_nvram_unlock(tp);
9865         }
9866
9867         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9868                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9869                 udelay(40);
9870         }
9871
9872         return ret;
9873 }
9874
9875 struct subsys_tbl_ent {
9876         u16 subsys_vendor, subsys_devid;
9877         u32 phy_id;
9878 };
9879
9880 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9881         /* Broadcom boards. */
9882         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9883         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9884         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9885         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9886         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9887         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9888         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9889         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9890         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9891         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9892         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9893
9894         /* 3com boards. */
9895         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9896         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9897         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9898         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9899         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9900
9901         /* DELL boards. */
9902         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9903         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9904         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9905         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9906
9907         /* Compaq boards. */
9908         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9909         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9910         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9911         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9912         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9913
9914         /* IBM boards. */
9915         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9916 };
9917
9918 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9919 {
9920         int i;
9921
9922         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9923                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9924                      tp->pdev->subsystem_vendor) &&
9925                     (subsys_id_to_phy_id[i].subsys_devid ==
9926                      tp->pdev->subsystem_device))
9927                         return &subsys_id_to_phy_id[i];
9928         }
9929         return NULL;
9930 }
9931
9932 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9933 {
9934         u32 val;
9935         u16 pmcsr;
9936
9937         /* On some early chips the SRAM cannot be accessed in D3hot state,
9938          * so need make sure we're in D0.
9939          */
9940         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9941         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9942         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9943         msleep(1);
9944
9945         /* Make sure register accesses (indirect or otherwise)
9946          * will function correctly.
9947          */
9948         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9949                                tp->misc_host_ctrl);
9950
9951         /* The memory arbiter has to be enabled in order for SRAM accesses
9952          * to succeed.  Normally on powerup the tg3 chip firmware will make
9953          * sure it is enabled, but other entities such as system netboot
9954          * code might disable it.
9955          */
9956         val = tr32(MEMARB_MODE);
9957         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9958
9959         tp->phy_id = PHY_ID_INVALID;
9960         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9961
9962         /* Assume an onboard device by default.  */
9963         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9964
9965         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9966                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM))
9967                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9968                 return;
9969         }
9970
9971         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9972         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9973                 u32 nic_cfg, led_cfg;
9974                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9975                 int eeprom_phy_serdes = 0;
9976
9977                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9978                 tp->nic_sram_data_cfg = nic_cfg;
9979
9980                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9981                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9982                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9983                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9984                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9985                     (ver > 0) && (ver < 0x100))
9986                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9987
9988                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9989                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9990                         eeprom_phy_serdes = 1;
9991
9992                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9993                 if (nic_phy_id != 0) {
9994                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9995                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9996
9997                         eeprom_phy_id  = (id1 >> 16) << 10;
9998                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9999                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10000                 } else
10001                         eeprom_phy_id = 0;
10002
10003                 tp->phy_id = eeprom_phy_id;
10004                 if (eeprom_phy_serdes) {
10005                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10006                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10007                         else
10008                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10009                 }
10010
10011                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10012                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10013                                     SHASTA_EXT_LED_MODE_MASK);
10014                 else
10015                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10016
10017                 switch (led_cfg) {
10018                 default:
10019                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10020                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10021                         break;
10022
10023                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10024                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10025                         break;
10026
10027                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10028                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10029
10030                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10031                          * read on some older 5700/5701 bootcode.
10032                          */
10033                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10034                             ASIC_REV_5700 ||
10035                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10036                             ASIC_REV_5701)
10037                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10038
10039                         break;
10040
10041                 case SHASTA_EXT_LED_SHARED:
10042                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10043                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10044                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10045                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10046                                                  LED_CTRL_MODE_PHY_2);
10047                         break;
10048
10049                 case SHASTA_EXT_LED_MAC:
10050                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10051                         break;
10052
10053                 case SHASTA_EXT_LED_COMBO:
10054                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10055                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10056                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10057                                                  LED_CTRL_MODE_PHY_2);
10058                         break;
10059
10060                 };
10061
10062                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10063                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10064                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10065                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10066
10067                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
10068                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10069                 else
10070                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10071
10072                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10073                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10074                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10075                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10076                 }
10077                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
10078                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
10079
10080                 if (cfg2 & (1 << 17))
10081                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10082
10083                 /* serdes signal pre-emphasis in register 0x590 set by */
10084                 /* bootcode if bit 18 is set */
10085                 if (cfg2 & (1 << 18))
10086                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10087         }
10088 }
10089
10090 static int __devinit tg3_phy_probe(struct tg3 *tp)
10091 {
10092         u32 hw_phy_id_1, hw_phy_id_2;
10093         u32 hw_phy_id, hw_phy_id_masked;
10094         int err;
10095
10096         /* Reading the PHY ID register can conflict with ASF
10097          * firwmare access to the PHY hardware.
10098          */
10099         err = 0;
10100         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
10101                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10102         } else {
10103                 /* Now read the physical PHY_ID from the chip and verify
10104                  * that it is sane.  If it doesn't look good, we fall back
10105                  * to either the hard-coded table based PHY_ID and failing
10106                  * that the value found in the eeprom area.
10107                  */
10108                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10109                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10110
10111                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
10112                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10113                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
10114
10115                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10116         }
10117
10118         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10119                 tp->phy_id = hw_phy_id;
10120                 if (hw_phy_id_masked == PHY_ID_BCM8002)
10121                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10122                 else
10123                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10124         } else {
10125                 if (tp->phy_id != PHY_ID_INVALID) {
10126                         /* Do nothing, phy ID already set up in
10127                          * tg3_get_eeprom_hw_cfg().
10128                          */
10129                 } else {
10130                         struct subsys_tbl_ent *p;
10131
10132                         /* No eeprom signature?  Try the hardcoded
10133                          * subsys device table.
10134                          */
10135                         p = lookup_by_subsys(tp);
10136                         if (!p)
10137                                 return -ENODEV;
10138
10139                         tp->phy_id = p->phy_id;
10140                         if (!tp->phy_id ||
10141                             tp->phy_id == PHY_ID_BCM8002)
10142                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10143                 }
10144         }
10145
10146         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10147             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10148                 u32 bmsr, adv_reg, tg3_ctrl;
10149
10150                 tg3_readphy(tp, MII_BMSR, &bmsr);
10151                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10152                     (bmsr & BMSR_LSTATUS))
10153                         goto skip_phy_reset;
10154
10155                 err = tg3_phy_reset(tp);
10156                 if (err)
10157                         return err;
10158
10159                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10160                            ADVERTISE_100HALF | ADVERTISE_100FULL |
10161                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10162                 tg3_ctrl = 0;
10163                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10164                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10165                                     MII_TG3_CTRL_ADV_1000_FULL);
10166                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10167                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10168                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10169                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
10170                 }
10171
10172                 if (!tg3_copper_is_advertising_all(tp)) {
10173                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10174
10175                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10176                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10177
10178                         tg3_writephy(tp, MII_BMCR,
10179                                      BMCR_ANENABLE | BMCR_ANRESTART);
10180                 }
10181                 tg3_phy_set_wirespeed(tp);
10182
10183                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10184                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10185                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10186         }
10187
10188 skip_phy_reset:
10189         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10190                 err = tg3_init_5401phy_dsp(tp);
10191                 if (err)
10192                         return err;
10193         }
10194
10195         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10196                 err = tg3_init_5401phy_dsp(tp);
10197         }
10198
10199         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10200                 tp->link_config.advertising =
10201                         (ADVERTISED_1000baseT_Half |
10202                          ADVERTISED_1000baseT_Full |
10203                          ADVERTISED_Autoneg |
10204                          ADVERTISED_FIBRE);
10205         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10206                 tp->link_config.advertising &=
10207                         ~(ADVERTISED_1000baseT_Half |
10208                           ADVERTISED_1000baseT_Full);
10209
10210         return err;
10211 }
10212
10213 static void __devinit tg3_read_partno(struct tg3 *tp)
10214 {
10215         unsigned char vpd_data[256];
10216         unsigned int i;
10217         u32 magic;
10218
10219         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10220                 goto out_not_found;
10221
10222         if (magic == TG3_EEPROM_MAGIC) {
10223                 for (i = 0; i < 256; i += 4) {
10224                         u32 tmp;
10225
10226                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10227                                 goto out_not_found;
10228
10229                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10230                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10231                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10232                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10233                 }
10234         } else {
10235                 int vpd_cap;
10236
10237                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10238                 for (i = 0; i < 256; i += 4) {
10239                         u32 tmp, j = 0;
10240                         u16 tmp16;
10241
10242                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10243                                               i);
10244                         while (j++ < 100) {
10245                                 pci_read_config_word(tp->pdev, vpd_cap +
10246                                                      PCI_VPD_ADDR, &tmp16);
10247                                 if (tmp16 & 0x8000)
10248                                         break;
10249                                 msleep(1);
10250                         }
10251                         if (!(tmp16 & 0x8000))
10252                                 goto out_not_found;
10253
10254                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10255                                               &tmp);
10256                         tmp = cpu_to_le32(tmp);
10257                         memcpy(&vpd_data[i], &tmp, 4);
10258                 }
10259         }
10260
10261         /* Now parse and find the part number. */
10262         for (i = 0; i < 254; ) {
10263                 unsigned char val = vpd_data[i];
10264                 unsigned int block_end;
10265
10266                 if (val == 0x82 || val == 0x91) {
10267                         i = (i + 3 +
10268                              (vpd_data[i + 1] +
10269                               (vpd_data[i + 2] << 8)));
10270                         continue;
10271                 }
10272
10273                 if (val != 0x90)
10274                         goto out_not_found;
10275
10276                 block_end = (i + 3 +
10277                              (vpd_data[i + 1] +
10278                               (vpd_data[i + 2] << 8)));
10279                 i += 3;
10280
10281                 if (block_end > 256)
10282                         goto out_not_found;
10283
10284                 while (i < (block_end - 2)) {
10285                         if (vpd_data[i + 0] == 'P' &&
10286                             vpd_data[i + 1] == 'N') {
10287                                 int partno_len = vpd_data[i + 2];
10288
10289                                 i += 3;
10290                                 if (partno_len > 24 || (partno_len + i) > 256)
10291                                         goto out_not_found;
10292
10293                                 memcpy(tp->board_part_number,
10294                                        &vpd_data[i], partno_len);
10295
10296                                 /* Success. */
10297                                 return;
10298                         }
10299                         i += 3 + vpd_data[i + 2];
10300                 }
10301
10302                 /* Part number not found. */
10303                 goto out_not_found;
10304         }
10305
10306 out_not_found:
10307         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10308                 strcpy(tp->board_part_number, "BCM95906");
10309         else
10310                 strcpy(tp->board_part_number, "none");
10311 }
10312
10313 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10314 {
10315         u32 val, offset, start;
10316
10317         if (tg3_nvram_read_swab(tp, 0, &val))
10318                 return;
10319
10320         if (val != TG3_EEPROM_MAGIC)
10321                 return;
10322
10323         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10324             tg3_nvram_read_swab(tp, 0x4, &start))
10325                 return;
10326
10327         offset = tg3_nvram_logical_addr(tp, offset);
10328         if (tg3_nvram_read_swab(tp, offset, &val))
10329                 return;
10330
10331         if ((val & 0xfc000000) == 0x0c000000) {
10332                 u32 ver_offset, addr;
10333                 int i;
10334
10335                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10336                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10337                         return;
10338
10339                 if (val != 0)
10340                         return;
10341
10342                 addr = offset + ver_offset - start;
10343                 for (i = 0; i < 16; i += 4) {
10344                         if (tg3_nvram_read(tp, addr + i, &val))
10345                                 return;
10346
10347                         val = cpu_to_le32(val);
10348                         memcpy(tp->fw_ver + i, &val, 4);
10349                 }
10350         }
10351 }
10352
10353 static int __devinit tg3_get_invariants(struct tg3 *tp)
10354 {
10355         static struct pci_device_id write_reorder_chipsets[] = {
10356                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10357                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10358                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10359                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10360                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10361                              PCI_DEVICE_ID_VIA_8385_0) },
10362                 { },
10363         };
10364         u32 misc_ctrl_reg;
10365         u32 cacheline_sz_reg;
10366         u32 pci_state_reg, grc_misc_cfg;
10367         u32 val;
10368         u16 pci_cmd;
10369         int err, pcie_cap;
10370
10371         /* Force memory write invalidate off.  If we leave it on,
10372          * then on 5700_BX chips we have to enable a workaround.
10373          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10374          * to match the cacheline size.  The Broadcom driver have this
10375          * workaround but turns MWI off all the times so never uses
10376          * it.  This seems to suggest that the workaround is insufficient.
10377          */
10378         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10379         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10380         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10381
10382         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10383          * has the register indirect write enable bit set before
10384          * we try to access any of the MMIO registers.  It is also
10385          * critical that the PCI-X hw workaround situation is decided
10386          * before that as well.
10387          */
10388         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10389                               &misc_ctrl_reg);
10390
10391         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10392                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10393
10394         /* Wrong chip ID in 5752 A0. This code can be removed later
10395          * as A0 is not in production.
10396          */
10397         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10398                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10399
10400         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10401          * we need to disable memory and use config. cycles
10402          * only to access all registers. The 5702/03 chips
10403          * can mistakenly decode the special cycles from the
10404          * ICH chipsets as memory write cycles, causing corruption
10405          * of register and memory space. Only certain ICH bridges
10406          * will drive special cycles with non-zero data during the
10407          * address phase which can fall within the 5703's address
10408          * range. This is not an ICH bug as the PCI spec allows
10409          * non-zero address during special cycles. However, only
10410          * these ICH bridges are known to drive non-zero addresses
10411          * during special cycles.
10412          *
10413          * Since special cycles do not cross PCI bridges, we only
10414          * enable this workaround if the 5703 is on the secondary
10415          * bus of these ICH bridges.
10416          */
10417         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10418             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10419                 static struct tg3_dev_id {
10420                         u32     vendor;
10421                         u32     device;
10422                         u32     rev;
10423                 } ich_chipsets[] = {
10424                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10425                           PCI_ANY_ID },
10426                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10427                           PCI_ANY_ID },
10428                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10429                           0xa },
10430                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10431                           PCI_ANY_ID },
10432                         { },
10433                 };
10434                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10435                 struct pci_dev *bridge = NULL;
10436
10437                 while (pci_id->vendor != 0) {
10438                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10439                                                 bridge);
10440                         if (!bridge) {
10441                                 pci_id++;
10442                                 continue;
10443                         }
10444                         if (pci_id->rev != PCI_ANY_ID) {
10445                                 u8 rev;
10446
10447                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10448                                                      &rev);
10449                                 if (rev > pci_id->rev)
10450                                         continue;
10451                         }
10452                         if (bridge->subordinate &&
10453                             (bridge->subordinate->number ==
10454                              tp->pdev->bus->number)) {
10455
10456                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10457                                 pci_dev_put(bridge);
10458                                 break;
10459                         }
10460                 }
10461         }
10462
10463         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10464          * DMA addresses > 40-bit. This bridge may have other additional
10465          * 57xx devices behind it in some 4-port NIC designs for example.
10466          * Any tg3 device found behind the bridge will also need the 40-bit
10467          * DMA workaround.
10468          */
10469         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10470             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10471                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10472                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10473                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10474         }
10475         else {
10476                 struct pci_dev *bridge = NULL;
10477
10478                 do {
10479                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10480                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10481                                                 bridge);
10482                         if (bridge && bridge->subordinate &&
10483                             (bridge->subordinate->number <=
10484                              tp->pdev->bus->number) &&
10485                             (bridge->subordinate->subordinate >=
10486                              tp->pdev->bus->number)) {
10487                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10488                                 pci_dev_put(bridge);
10489                                 break;
10490                         }
10491                 } while (bridge);
10492         }
10493
10494         /* Initialize misc host control in PCI block. */
10495         tp->misc_host_ctrl |= (misc_ctrl_reg &
10496                                MISC_HOST_CTRL_CHIPREV);
10497         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10498                                tp->misc_host_ctrl);
10499
10500         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10501                               &cacheline_sz_reg);
10502
10503         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10504         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10505         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10506         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10507
10508         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10509             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10510             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10511             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10512             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
10513             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10514                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10515
10516         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10517             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10518                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10519
10520         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10521                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10522                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10523                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10524                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10525                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10526                 } else {
10527                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
10528                                           TG3_FLG2_HW_TSO_1_BUG;
10529                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10530                                 ASIC_REV_5750 &&
10531                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10532                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
10533                 }
10534         }
10535
10536         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10537             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10538             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10539             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10540             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
10541             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
10542                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10543
10544         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
10545         if (pcie_cap != 0) {
10546                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10547                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10548                         u16 lnkctl;
10549
10550                         pci_read_config_word(tp->pdev,
10551                                              pcie_cap + PCI_EXP_LNKCTL,
10552                                              &lnkctl);
10553                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
10554                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
10555                 }
10556         }
10557
10558         /* If we have an AMD 762 or VIA K8T800 chipset, write
10559          * reordering to the mailbox registers done by the host
10560          * controller can cause major troubles.  We read back from
10561          * every mailbox register write to force the writes to be
10562          * posted to the chip in order.
10563          */
10564         if (pci_dev_present(write_reorder_chipsets) &&
10565             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10566                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10567
10568         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10569             tp->pci_lat_timer < 64) {
10570                 tp->pci_lat_timer = 64;
10571
10572                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10573                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10574                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10575                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10576
10577                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10578                                        cacheline_sz_reg);
10579         }
10580
10581         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10582                               &pci_state_reg);
10583
10584         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10585                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10586
10587                 /* If this is a 5700 BX chipset, and we are in PCI-X
10588                  * mode, enable register write workaround.
10589                  *
10590                  * The workaround is to use indirect register accesses
10591                  * for all chip writes not to mailbox registers.
10592                  */
10593                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10594                         u32 pm_reg;
10595                         u16 pci_cmd;
10596
10597                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10598
10599                         /* The chip can have it's power management PCI config
10600                          * space registers clobbered due to this bug.
10601                          * So explicitly force the chip into D0 here.
10602                          */
10603                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10604                                               &pm_reg);
10605                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10606                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10607                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10608                                                pm_reg);
10609
10610                         /* Also, force SERR#/PERR# in PCI command. */
10611                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10612                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10613                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10614                 }
10615         }
10616
10617         /* 5700 BX chips need to have their TX producer index mailboxes
10618          * written twice to workaround a bug.
10619          */
10620         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10621                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10622
10623         /* Back to back register writes can cause problems on this chip,
10624          * the workaround is to read back all reg writes except those to
10625          * mailbox regs.  See tg3_write_indirect_reg32().
10626          *
10627          * PCI Express 5750_A0 rev chips need this workaround too.
10628          */
10629         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10630             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10631              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10632                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10633
10634         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10635                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10636         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10637                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10638
10639         /* Chip-specific fixup from Broadcom driver */
10640         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10641             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10642                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10643                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10644         }
10645
10646         /* Default fast path register access methods */
10647         tp->read32 = tg3_read32;
10648         tp->write32 = tg3_write32;
10649         tp->read32_mbox = tg3_read32;
10650         tp->write32_mbox = tg3_write32;
10651         tp->write32_tx_mbox = tg3_write32;
10652         tp->write32_rx_mbox = tg3_write32;
10653
10654         /* Various workaround register access methods */
10655         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10656                 tp->write32 = tg3_write_indirect_reg32;
10657         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10658                 tp->write32 = tg3_write_flush_reg32;
10659
10660         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10661             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10662                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10663                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10664                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10665         }
10666
10667         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10668                 tp->read32 = tg3_read_indirect_reg32;
10669                 tp->write32 = tg3_write_indirect_reg32;
10670                 tp->read32_mbox = tg3_read_indirect_mbox;
10671                 tp->write32_mbox = tg3_write_indirect_mbox;
10672                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10673                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10674
10675                 iounmap(tp->regs);
10676                 tp->regs = NULL;
10677
10678                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10679                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10680                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10681         }
10682         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10683                 tp->read32_mbox = tg3_read32_mbox_5906;
10684                 tp->write32_mbox = tg3_write32_mbox_5906;
10685                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
10686                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
10687         }
10688
10689         if (tp->write32 == tg3_write_indirect_reg32 ||
10690             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10691              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10692               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10693                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10694
10695         /* Get eeprom hw config before calling tg3_set_power_state().
10696          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10697          * determined before calling tg3_set_power_state() so that
10698          * we know whether or not to switch out of Vaux power.
10699          * When the flag is set, it means that GPIO1 is used for eeprom
10700          * write protect and also implies that it is a LOM where GPIOs
10701          * are not used to switch power.
10702          */
10703         tg3_get_eeprom_hw_cfg(tp);
10704
10705         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10706          * GPIO1 driven high will bring 5700's external PHY out of reset.
10707          * It is also used as eeprom write protect on LOMs.
10708          */
10709         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10710         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10711             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10712                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10713                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10714         /* Unused GPIO3 must be driven as output on 5752 because there
10715          * are no pull-up resistors on unused GPIO pins.
10716          */
10717         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10718                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10719
10720         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10721                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10722
10723         /* Force the chip into D0. */
10724         err = tg3_set_power_state(tp, PCI_D0);
10725         if (err) {
10726                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10727                        pci_name(tp->pdev));
10728                 return err;
10729         }
10730
10731         /* 5700 B0 chips do not support checksumming correctly due
10732          * to hardware bugs.
10733          */
10734         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10735                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10736
10737         /* Derive initial jumbo mode from MTU assigned in
10738          * ether_setup() via the alloc_etherdev() call
10739          */
10740         if (tp->dev->mtu > ETH_DATA_LEN &&
10741             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10742                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10743
10744         /* Determine WakeOnLan speed to use. */
10745         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10746             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10747             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10748             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10749                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10750         } else {
10751                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10752         }
10753
10754         /* A few boards don't want Ethernet@WireSpeed phy feature */
10755         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10756             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10757              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10758              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10759             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
10760             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10761                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10762
10763         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10764             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10765                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10766         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10767                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10768
10769         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10770                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10771                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10772                         tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10773                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
10774                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10775         }
10776
10777         tp->coalesce_mode = 0;
10778         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10779             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10780                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10781
10782         /* Initialize MAC MI mode, polling disabled. */
10783         tw32_f(MAC_MI_MODE, tp->mi_mode);
10784         udelay(80);
10785
10786         /* Initialize data/descriptor byte/word swapping. */
10787         val = tr32(GRC_MODE);
10788         val &= GRC_MODE_HOST_STACKUP;
10789         tw32(GRC_MODE, val | tp->grc_mode);
10790
10791         tg3_switch_clocks(tp);
10792
10793         /* Clear this out for sanity. */
10794         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10795
10796         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10797                               &pci_state_reg);
10798         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10799             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10800                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10801
10802                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10803                     chiprevid == CHIPREV_ID_5701_B0 ||
10804                     chiprevid == CHIPREV_ID_5701_B2 ||
10805                     chiprevid == CHIPREV_ID_5701_B5) {
10806                         void __iomem *sram_base;
10807
10808                         /* Write some dummy words into the SRAM status block
10809                          * area, see if it reads back correctly.  If the return
10810                          * value is bad, force enable the PCIX workaround.
10811                          */
10812                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10813
10814                         writel(0x00000000, sram_base);
10815                         writel(0x00000000, sram_base + 4);
10816                         writel(0xffffffff, sram_base + 4);
10817                         if (readl(sram_base) != 0x00000000)
10818                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10819                 }
10820         }
10821
10822         udelay(50);
10823         tg3_nvram_init(tp);
10824
10825         grc_misc_cfg = tr32(GRC_MISC_CFG);
10826         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10827
10828         /* Broadcom's driver says that CIOBE multisplit has a bug */
10829 #if 0
10830         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10831             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10832                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10833                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10834         }
10835 #endif
10836         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10837             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10838              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10839                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10840
10841         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10842             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10843                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10844         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10845                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10846                                       HOSTCC_MODE_CLRTICK_TXBD);
10847
10848                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10849                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10850                                        tp->misc_host_ctrl);
10851         }
10852
10853         /* these are limited to 10/100 only */
10854         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10855              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10856             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10857              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10858              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10859               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10860               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10861             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10862              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10863               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
10864               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
10865             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10866                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10867
10868         err = tg3_phy_probe(tp);
10869         if (err) {
10870                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10871                        pci_name(tp->pdev), err);
10872                 /* ... but do not return immediately ... */
10873         }
10874
10875         tg3_read_partno(tp);
10876         tg3_read_fw_ver(tp);
10877
10878         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10879                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10880         } else {
10881                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10882                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10883                 else
10884                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10885         }
10886
10887         /* 5700 {AX,BX} chips have a broken status block link
10888          * change bit implementation, so we must use the
10889          * status register in those cases.
10890          */
10891         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10892                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10893         else
10894                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10895
10896         /* The led_ctrl is set during tg3_phy_probe, here we might
10897          * have to force the link status polling mechanism based
10898          * upon subsystem IDs.
10899          */
10900         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10901             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10902                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10903                                   TG3_FLAG_USE_LINKCHG_REG);
10904         }
10905
10906         /* For all SERDES we poll the MAC status register. */
10907         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10908                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10909         else
10910                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10911
10912         /* All chips before 5787 can get confused if TX buffers
10913          * straddle the 4GB address boundary in some cases.
10914          */
10915         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10916             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10917             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10918                 tp->dev->hard_start_xmit = tg3_start_xmit;
10919         else
10920                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10921
10922         tp->rx_offset = 2;
10923         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10924             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10925                 tp->rx_offset = 0;
10926
10927         tp->rx_std_max_post = TG3_RX_RING_SIZE;
10928
10929         /* Increment the rx prod index on the rx std ring by at most
10930          * 8 for these chips to workaround hw errata.
10931          */
10932         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10933             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10934             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10935                 tp->rx_std_max_post = 8;
10936
10937         /* By default, disable wake-on-lan.  User can change this
10938          * using ETHTOOL_SWOL.
10939          */
10940         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10941
10942         return err;
10943 }
10944
10945 #ifdef CONFIG_SPARC64
10946 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10947 {
10948         struct net_device *dev = tp->dev;
10949         struct pci_dev *pdev = tp->pdev;
10950         struct pcidev_cookie *pcp = pdev->sysdata;
10951
10952         if (pcp != NULL) {
10953                 unsigned char *addr;
10954                 int len;
10955
10956                 addr = of_get_property(pcp->prom_node, "local-mac-address",
10957                                         &len);
10958                 if (addr && len == 6) {
10959                         memcpy(dev->dev_addr, addr, 6);
10960                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10961                         return 0;
10962                 }
10963         }
10964         return -ENODEV;
10965 }
10966
10967 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10968 {
10969         struct net_device *dev = tp->dev;
10970
10971         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10972         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10973         return 0;
10974 }
10975 #endif
10976
10977 static int __devinit tg3_get_device_address(struct tg3 *tp)
10978 {
10979         struct net_device *dev = tp->dev;
10980         u32 hi, lo, mac_offset;
10981         int addr_ok = 0;
10982
10983 #ifdef CONFIG_SPARC64
10984         if (!tg3_get_macaddr_sparc(tp))
10985                 return 0;
10986 #endif
10987
10988         mac_offset = 0x7c;
10989         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10990             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10991                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10992                         mac_offset = 0xcc;
10993                 if (tg3_nvram_lock(tp))
10994                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10995                 else
10996                         tg3_nvram_unlock(tp);
10997         }
10998         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10999                 mac_offset = 0x10;
11000
11001         /* First try to get it from MAC address mailbox. */
11002         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11003         if ((hi >> 16) == 0x484b) {
11004                 dev->dev_addr[0] = (hi >>  8) & 0xff;
11005                 dev->dev_addr[1] = (hi >>  0) & 0xff;
11006
11007                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11008                 dev->dev_addr[2] = (lo >> 24) & 0xff;
11009                 dev->dev_addr[3] = (lo >> 16) & 0xff;
11010                 dev->dev_addr[4] = (lo >>  8) & 0xff;
11011                 dev->dev_addr[5] = (lo >>  0) & 0xff;
11012
11013                 /* Some old bootcode may report a 0 MAC address in SRAM */
11014                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11015         }
11016         if (!addr_ok) {
11017                 /* Next, try NVRAM. */
11018                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
11019                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11020                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
11021                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
11022                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
11023                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
11024                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
11025                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
11026                 }
11027                 /* Finally just fetch it out of the MAC control regs. */
11028                 else {
11029                         hi = tr32(MAC_ADDR_0_HIGH);
11030                         lo = tr32(MAC_ADDR_0_LOW);
11031
11032                         dev->dev_addr[5] = lo & 0xff;
11033                         dev->dev_addr[4] = (lo >> 8) & 0xff;
11034                         dev->dev_addr[3] = (lo >> 16) & 0xff;
11035                         dev->dev_addr[2] = (lo >> 24) & 0xff;
11036                         dev->dev_addr[1] = hi & 0xff;
11037                         dev->dev_addr[0] = (hi >> 8) & 0xff;
11038                 }
11039         }
11040
11041         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11042 #ifdef CONFIG_SPARC64
11043                 if (!tg3_get_default_macaddr_sparc(tp))
11044                         return 0;
11045 #endif
11046                 return -EINVAL;
11047         }
11048         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
11049         return 0;
11050 }
11051
11052 #define BOUNDARY_SINGLE_CACHELINE       1
11053 #define BOUNDARY_MULTI_CACHELINE        2
11054
11055 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11056 {
11057         int cacheline_size;
11058         u8 byte;
11059         int goal;
11060
11061         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11062         if (byte == 0)
11063                 cacheline_size = 1024;
11064         else
11065                 cacheline_size = (int) byte * 4;
11066
11067         /* On 5703 and later chips, the boundary bits have no
11068          * effect.
11069          */
11070         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11071             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11072             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11073                 goto out;
11074
11075 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11076         goal = BOUNDARY_MULTI_CACHELINE;
11077 #else
11078 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11079         goal = BOUNDARY_SINGLE_CACHELINE;
11080 #else
11081         goal = 0;
11082 #endif
11083 #endif
11084
11085         if (!goal)
11086                 goto out;
11087
11088         /* PCI controllers on most RISC systems tend to disconnect
11089          * when a device tries to burst across a cache-line boundary.
11090          * Therefore, letting tg3 do so just wastes PCI bandwidth.
11091          *
11092          * Unfortunately, for PCI-E there are only limited
11093          * write-side controls for this, and thus for reads
11094          * we will still get the disconnects.  We'll also waste
11095          * these PCI cycles for both read and write for chips
11096          * other than 5700 and 5701 which do not implement the
11097          * boundary bits.
11098          */
11099         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11100             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11101                 switch (cacheline_size) {
11102                 case 16:
11103                 case 32:
11104                 case 64:
11105                 case 128:
11106                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11107                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11108                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11109                         } else {
11110                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11111                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11112                         }
11113                         break;
11114
11115                 case 256:
11116                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11117                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11118                         break;
11119
11120                 default:
11121                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11122                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11123                         break;
11124                 };
11125         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11126                 switch (cacheline_size) {
11127                 case 16:
11128                 case 32:
11129                 case 64:
11130                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11131                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11132                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11133                                 break;
11134                         }
11135                         /* fallthrough */
11136                 case 128:
11137                 default:
11138                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11139                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11140                         break;
11141                 };
11142         } else {
11143                 switch (cacheline_size) {
11144                 case 16:
11145                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11146                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11147                                         DMA_RWCTRL_WRITE_BNDRY_16);
11148                                 break;
11149                         }
11150                         /* fallthrough */
11151                 case 32:
11152                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11153                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11154                                         DMA_RWCTRL_WRITE_BNDRY_32);
11155                                 break;
11156                         }
11157                         /* fallthrough */
11158                 case 64:
11159                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11160                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11161                                         DMA_RWCTRL_WRITE_BNDRY_64);
11162                                 break;
11163                         }
11164                         /* fallthrough */
11165                 case 128:
11166                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11167                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11168                                         DMA_RWCTRL_WRITE_BNDRY_128);
11169                                 break;
11170                         }
11171                         /* fallthrough */
11172                 case 256:
11173                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
11174                                 DMA_RWCTRL_WRITE_BNDRY_256);
11175                         break;
11176                 case 512:
11177                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
11178                                 DMA_RWCTRL_WRITE_BNDRY_512);
11179                         break;
11180                 case 1024:
11181                 default:
11182                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11183                                 DMA_RWCTRL_WRITE_BNDRY_1024);
11184                         break;
11185                 };
11186         }
11187
11188 out:
11189         return val;
11190 }
11191
11192 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11193 {
11194         struct tg3_internal_buffer_desc test_desc;
11195         u32 sram_dma_descs;
11196         int i, ret;
11197
11198         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11199
11200         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11201         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11202         tw32(RDMAC_STATUS, 0);
11203         tw32(WDMAC_STATUS, 0);
11204
11205         tw32(BUFMGR_MODE, 0);
11206         tw32(FTQ_RESET, 0);
11207
11208         test_desc.addr_hi = ((u64) buf_dma) >> 32;
11209         test_desc.addr_lo = buf_dma & 0xffffffff;
11210         test_desc.nic_mbuf = 0x00002100;
11211         test_desc.len = size;
11212
11213         /*
11214          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11215          * the *second* time the tg3 driver was getting loaded after an
11216          * initial scan.
11217          *
11218          * Broadcom tells me:
11219          *   ...the DMA engine is connected to the GRC block and a DMA
11220          *   reset may affect the GRC block in some unpredictable way...
11221          *   The behavior of resets to individual blocks has not been tested.
11222          *
11223          * Broadcom noted the GRC reset will also reset all sub-components.
11224          */
11225         if (to_device) {
11226                 test_desc.cqid_sqid = (13 << 8) | 2;
11227
11228                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11229                 udelay(40);
11230         } else {
11231                 test_desc.cqid_sqid = (16 << 8) | 7;
11232
11233                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11234                 udelay(40);
11235         }
11236         test_desc.flags = 0x00000005;
11237
11238         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11239                 u32 val;
11240
11241                 val = *(((u32 *)&test_desc) + i);
11242                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11243                                        sram_dma_descs + (i * sizeof(u32)));
11244                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11245         }
11246         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11247
11248         if (to_device) {
11249                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11250         } else {
11251                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11252         }
11253
11254         ret = -ENODEV;
11255         for (i = 0; i < 40; i++) {
11256                 u32 val;
11257
11258                 if (to_device)
11259                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11260                 else
11261                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11262                 if ((val & 0xffff) == sram_dma_descs) {
11263                         ret = 0;
11264                         break;
11265                 }
11266
11267                 udelay(100);
11268         }
11269
11270         return ret;
11271 }
11272
11273 #define TEST_BUFFER_SIZE        0x2000
11274
11275 static int __devinit tg3_test_dma(struct tg3 *tp)
11276 {
11277         dma_addr_t buf_dma;
11278         u32 *buf, saved_dma_rwctrl;
11279         int ret;
11280
11281         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11282         if (!buf) {
11283                 ret = -ENOMEM;
11284                 goto out_nofree;
11285         }
11286
11287         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11288                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11289
11290         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11291
11292         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11293                 /* DMA read watermark not used on PCIE */
11294                 tp->dma_rwctrl |= 0x00180000;
11295         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11296                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11297                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11298                         tp->dma_rwctrl |= 0x003f0000;
11299                 else
11300                         tp->dma_rwctrl |= 0x003f000f;
11301         } else {
11302                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11303                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11304                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11305
11306                         /* If the 5704 is behind the EPB bridge, we can
11307                          * do the less restrictive ONE_DMA workaround for
11308                          * better performance.
11309                          */
11310                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11311                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11312                                 tp->dma_rwctrl |= 0x8000;
11313                         else if (ccval == 0x6 || ccval == 0x7)
11314                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11315
11316                         /* Set bit 23 to enable PCIX hw bug fix */
11317                         tp->dma_rwctrl |= 0x009f0000;
11318                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11319                         /* 5780 always in PCIX mode */
11320                         tp->dma_rwctrl |= 0x00144000;
11321                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11322                         /* 5714 always in PCIX mode */
11323                         tp->dma_rwctrl |= 0x00148000;
11324                 } else {
11325                         tp->dma_rwctrl |= 0x001b000f;
11326                 }
11327         }
11328
11329         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11330             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11331                 tp->dma_rwctrl &= 0xfffffff0;
11332
11333         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11334             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11335                 /* Remove this if it causes problems for some boards. */
11336                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11337
11338                 /* On 5700/5701 chips, we need to set this bit.
11339                  * Otherwise the chip will issue cacheline transactions
11340                  * to streamable DMA memory with not all the byte
11341                  * enables turned on.  This is an error on several
11342                  * RISC PCI controllers, in particular sparc64.
11343                  *
11344                  * On 5703/5704 chips, this bit has been reassigned
11345                  * a different meaning.  In particular, it is used
11346                  * on those chips to enable a PCI-X workaround.
11347                  */
11348                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11349         }
11350
11351         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11352
11353 #if 0
11354         /* Unneeded, already done by tg3_get_invariants.  */
11355         tg3_switch_clocks(tp);
11356 #endif
11357
11358         ret = 0;
11359         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11360             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11361                 goto out;
11362
11363         /* It is best to perform DMA test with maximum write burst size
11364          * to expose the 5700/5701 write DMA bug.
11365          */
11366         saved_dma_rwctrl = tp->dma_rwctrl;
11367         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11368         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11369
11370         while (1) {
11371                 u32 *p = buf, i;
11372
11373                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11374                         p[i] = i;
11375
11376                 /* Send the buffer to the chip. */
11377                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11378                 if (ret) {
11379                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11380                         break;
11381                 }
11382
11383 #if 0
11384                 /* validate data reached card RAM correctly. */
11385                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11386                         u32 val;
11387                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
11388                         if (le32_to_cpu(val) != p[i]) {
11389                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
11390                                 /* ret = -ENODEV here? */
11391                         }
11392                         p[i] = 0;
11393                 }
11394 #endif
11395                 /* Now read it back. */
11396                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11397                 if (ret) {
11398                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11399
11400                         break;
11401                 }
11402
11403                 /* Verify it. */
11404                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11405                         if (p[i] == i)
11406                                 continue;
11407
11408                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11409                             DMA_RWCTRL_WRITE_BNDRY_16) {
11410                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11411                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11412                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11413                                 break;
11414                         } else {
11415                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11416                                 ret = -ENODEV;
11417                                 goto out;
11418                         }
11419                 }
11420
11421                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11422                         /* Success. */
11423                         ret = 0;
11424                         break;
11425                 }
11426         }
11427         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11428             DMA_RWCTRL_WRITE_BNDRY_16) {
11429                 static struct pci_device_id dma_wait_state_chipsets[] = {
11430                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11431                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11432                         { },
11433                 };
11434
11435                 /* DMA test passed without adjusting DMA boundary,
11436                  * now look for chipsets that are known to expose the
11437                  * DMA bug without failing the test.
11438                  */
11439                 if (pci_dev_present(dma_wait_state_chipsets)) {
11440                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11441                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11442                 }
11443                 else
11444                         /* Safe to use the calculated DMA boundary. */
11445                         tp->dma_rwctrl = saved_dma_rwctrl;
11446
11447                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11448         }
11449
11450 out:
11451         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11452 out_nofree:
11453         return ret;
11454 }
11455
11456 static void __devinit tg3_init_link_config(struct tg3 *tp)
11457 {
11458         tp->link_config.advertising =
11459                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11460                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11461                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11462                  ADVERTISED_Autoneg | ADVERTISED_MII);
11463         tp->link_config.speed = SPEED_INVALID;
11464         tp->link_config.duplex = DUPLEX_INVALID;
11465         tp->link_config.autoneg = AUTONEG_ENABLE;
11466         tp->link_config.active_speed = SPEED_INVALID;
11467         tp->link_config.active_duplex = DUPLEX_INVALID;
11468         tp->link_config.phy_is_low_power = 0;
11469         tp->link_config.orig_speed = SPEED_INVALID;
11470         tp->link_config.orig_duplex = DUPLEX_INVALID;
11471         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11472 }
11473
11474 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11475 {
11476         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11477                 tp->bufmgr_config.mbuf_read_dma_low_water =
11478                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11479                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11480                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11481                 tp->bufmgr_config.mbuf_high_water =
11482                         DEFAULT_MB_HIGH_WATER_5705;
11483                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11484                         tp->bufmgr_config.mbuf_mac_rx_low_water =
11485                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
11486                         tp->bufmgr_config.mbuf_high_water =
11487                                 DEFAULT_MB_HIGH_WATER_5906;
11488                 }
11489
11490                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11491                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11492                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11493                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11494                 tp->bufmgr_config.mbuf_high_water_jumbo =
11495                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11496         } else {
11497                 tp->bufmgr_config.mbuf_read_dma_low_water =
11498                         DEFAULT_MB_RDMA_LOW_WATER;
11499                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11500                         DEFAULT_MB_MACRX_LOW_WATER;
11501                 tp->bufmgr_config.mbuf_high_water =
11502                         DEFAULT_MB_HIGH_WATER;
11503
11504                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11505                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11506                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11507                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11508                 tp->bufmgr_config.mbuf_high_water_jumbo =
11509                         DEFAULT_MB_HIGH_WATER_JUMBO;
11510         }
11511
11512         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11513         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11514 }
11515
11516 static char * __devinit tg3_phy_string(struct tg3 *tp)
11517 {
11518         switch (tp->phy_id & PHY_ID_MASK) {
11519         case PHY_ID_BCM5400:    return "5400";
11520         case PHY_ID_BCM5401:    return "5401";
11521         case PHY_ID_BCM5411:    return "5411";
11522         case PHY_ID_BCM5701:    return "5701";
11523         case PHY_ID_BCM5703:    return "5703";
11524         case PHY_ID_BCM5704:    return "5704";
11525         case PHY_ID_BCM5705:    return "5705";
11526         case PHY_ID_BCM5750:    return "5750";
11527         case PHY_ID_BCM5752:    return "5752";
11528         case PHY_ID_BCM5714:    return "5714";
11529         case PHY_ID_BCM5780:    return "5780";
11530         case PHY_ID_BCM5755:    return "5755";
11531         case PHY_ID_BCM5787:    return "5787";
11532         case PHY_ID_BCM5756:    return "5722/5756";
11533         case PHY_ID_BCM5906:    return "5906";
11534         case PHY_ID_BCM8002:    return "8002/serdes";
11535         case 0:                 return "serdes";
11536         default:                return "unknown";
11537         };
11538 }
11539
11540 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11541 {
11542         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11543                 strcpy(str, "PCI Express");
11544                 return str;
11545         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11546                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11547
11548                 strcpy(str, "PCIX:");
11549
11550                 if ((clock_ctrl == 7) ||
11551                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11552                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11553                         strcat(str, "133MHz");
11554                 else if (clock_ctrl == 0)
11555                         strcat(str, "33MHz");
11556                 else if (clock_ctrl == 2)
11557                         strcat(str, "50MHz");
11558                 else if (clock_ctrl == 4)
11559                         strcat(str, "66MHz");
11560                 else if (clock_ctrl == 6)
11561                         strcat(str, "100MHz");
11562         } else {
11563                 strcpy(str, "PCI:");
11564                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11565                         strcat(str, "66MHz");
11566                 else
11567                         strcat(str, "33MHz");
11568         }
11569         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11570                 strcat(str, ":32-bit");
11571         else
11572                 strcat(str, ":64-bit");
11573         return str;
11574 }
11575
11576 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11577 {
11578         struct pci_dev *peer;
11579         unsigned int func, devnr = tp->pdev->devfn & ~7;
11580
11581         for (func = 0; func < 8; func++) {
11582                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11583                 if (peer && peer != tp->pdev)
11584                         break;
11585                 pci_dev_put(peer);
11586         }
11587         /* 5704 can be configured in single-port mode, set peer to
11588          * tp->pdev in that case.
11589          */
11590         if (!peer) {
11591                 peer = tp->pdev;
11592                 return peer;
11593         }
11594
11595         /*
11596          * We don't need to keep the refcount elevated; there's no way
11597          * to remove one half of this device without removing the other
11598          */
11599         pci_dev_put(peer);
11600
11601         return peer;
11602 }
11603
11604 static void __devinit tg3_init_coal(struct tg3 *tp)
11605 {
11606         struct ethtool_coalesce *ec = &tp->coal;
11607
11608         memset(ec, 0, sizeof(*ec));
11609         ec->cmd = ETHTOOL_GCOALESCE;
11610         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11611         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11612         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11613         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11614         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11615         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11616         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11617         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11618         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11619
11620         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11621                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11622                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11623                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11624                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11625                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11626         }
11627
11628         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11629                 ec->rx_coalesce_usecs_irq = 0;
11630                 ec->tx_coalesce_usecs_irq = 0;
11631                 ec->stats_block_coalesce_usecs = 0;
11632         }
11633 }
11634
11635 static int __devinit tg3_init_one(struct pci_dev *pdev,
11636                                   const struct pci_device_id *ent)
11637 {
11638         static int tg3_version_printed = 0;
11639         unsigned long tg3reg_base, tg3reg_len;
11640         struct net_device *dev;
11641         struct tg3 *tp;
11642         int i, err, pm_cap;
11643         char str[40];
11644         u64 dma_mask, persist_dma_mask;
11645
11646         if (tg3_version_printed++ == 0)
11647                 printk(KERN_INFO "%s", version);
11648
11649         err = pci_enable_device(pdev);
11650         if (err) {
11651                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11652                        "aborting.\n");
11653                 return err;
11654         }
11655
11656         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11657                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11658                        "base address, aborting.\n");
11659                 err = -ENODEV;
11660                 goto err_out_disable_pdev;
11661         }
11662
11663         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11664         if (err) {
11665                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11666                        "aborting.\n");
11667                 goto err_out_disable_pdev;
11668         }
11669
11670         pci_set_master(pdev);
11671
11672         /* Find power-management capability. */
11673         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11674         if (pm_cap == 0) {
11675                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11676                        "aborting.\n");
11677                 err = -EIO;
11678                 goto err_out_free_res;
11679         }
11680
11681         tg3reg_base = pci_resource_start(pdev, 0);
11682         tg3reg_len = pci_resource_len(pdev, 0);
11683
11684         dev = alloc_etherdev(sizeof(*tp));
11685         if (!dev) {
11686                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11687                 err = -ENOMEM;
11688                 goto err_out_free_res;
11689         }
11690
11691         SET_MODULE_OWNER(dev);
11692         SET_NETDEV_DEV(dev, &pdev->dev);
11693
11694 #if TG3_VLAN_TAG_USED
11695         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11696         dev->vlan_rx_register = tg3_vlan_rx_register;
11697         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11698 #endif
11699
11700         tp = netdev_priv(dev);
11701         tp->pdev = pdev;
11702         tp->dev = dev;
11703         tp->pm_cap = pm_cap;
11704         tp->mac_mode = TG3_DEF_MAC_MODE;
11705         tp->rx_mode = TG3_DEF_RX_MODE;
11706         tp->tx_mode = TG3_DEF_TX_MODE;
11707         tp->mi_mode = MAC_MI_MODE_BASE;
11708         if (tg3_debug > 0)
11709                 tp->msg_enable = tg3_debug;
11710         else
11711                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11712
11713         /* The word/byte swap controls here control register access byte
11714          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11715          * setting below.
11716          */
11717         tp->misc_host_ctrl =
11718                 MISC_HOST_CTRL_MASK_PCI_INT |
11719                 MISC_HOST_CTRL_WORD_SWAP |
11720                 MISC_HOST_CTRL_INDIR_ACCESS |
11721                 MISC_HOST_CTRL_PCISTATE_RW;
11722
11723         /* The NONFRM (non-frame) byte/word swap controls take effect
11724          * on descriptor entries, anything which isn't packet data.
11725          *
11726          * The StrongARM chips on the board (one for tx, one for rx)
11727          * are running in big-endian mode.
11728          */
11729         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11730                         GRC_MODE_WSWAP_NONFRM_DATA);
11731 #ifdef __BIG_ENDIAN
11732         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11733 #endif
11734         spin_lock_init(&tp->lock);
11735         spin_lock_init(&tp->indirect_lock);
11736         INIT_WORK(&tp->reset_task, tg3_reset_task);
11737
11738         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11739         if (tp->regs == 0UL) {
11740                 printk(KERN_ERR PFX "Cannot map device registers, "
11741                        "aborting.\n");
11742                 err = -ENOMEM;
11743                 goto err_out_free_dev;
11744         }
11745
11746         tg3_init_link_config(tp);
11747
11748         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11749         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11750         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11751
11752         dev->open = tg3_open;
11753         dev->stop = tg3_close;
11754         dev->get_stats = tg3_get_stats;
11755         dev->set_multicast_list = tg3_set_rx_mode;
11756         dev->set_mac_address = tg3_set_mac_addr;
11757         dev->do_ioctl = tg3_ioctl;
11758         dev->tx_timeout = tg3_tx_timeout;
11759         dev->poll = tg3_poll;
11760         dev->ethtool_ops = &tg3_ethtool_ops;
11761         dev->weight = 64;
11762         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11763         dev->change_mtu = tg3_change_mtu;
11764         dev->irq = pdev->irq;
11765 #ifdef CONFIG_NET_POLL_CONTROLLER
11766         dev->poll_controller = tg3_poll_controller;
11767 #endif
11768
11769         err = tg3_get_invariants(tp);
11770         if (err) {
11771                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11772                        "aborting.\n");
11773                 goto err_out_iounmap;
11774         }
11775
11776         /* The EPB bridge inside 5714, 5715, and 5780 and any
11777          * device behind the EPB cannot support DMA addresses > 40-bit.
11778          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11779          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11780          * do DMA address check in tg3_start_xmit().
11781          */
11782         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11783                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11784         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11785                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11786 #ifdef CONFIG_HIGHMEM
11787                 dma_mask = DMA_64BIT_MASK;
11788 #endif
11789         } else
11790                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11791
11792         /* Configure DMA attributes. */
11793         if (dma_mask > DMA_32BIT_MASK) {
11794                 err = pci_set_dma_mask(pdev, dma_mask);
11795                 if (!err) {
11796                         dev->features |= NETIF_F_HIGHDMA;
11797                         err = pci_set_consistent_dma_mask(pdev,
11798                                                           persist_dma_mask);
11799                         if (err < 0) {
11800                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11801                                        "DMA for consistent allocations\n");
11802                                 goto err_out_iounmap;
11803                         }
11804                 }
11805         }
11806         if (err || dma_mask == DMA_32BIT_MASK) {
11807                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11808                 if (err) {
11809                         printk(KERN_ERR PFX "No usable DMA configuration, "
11810                                "aborting.\n");
11811                         goto err_out_iounmap;
11812                 }
11813         }
11814
11815         tg3_init_bufmgr_config(tp);
11816
11817 #if TG3_TSO_SUPPORT != 0
11818         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11819                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11820         }
11821         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11822             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11823             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11824             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11825             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11826                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11827         } else {
11828                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11829         }
11830
11831         /* TSO is on by default on chips that support hardware TSO.
11832          * Firmware TSO on older chips gives lower performance, so it
11833          * is off by default, but can be enabled using ethtool.
11834          */
11835         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11836                 dev->features |= NETIF_F_TSO;
11837                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
11838                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
11839                         dev->features |= NETIF_F_TSO6;
11840         }
11841
11842 #endif
11843
11844         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11845             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11846             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11847                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11848                 tp->rx_pending = 63;
11849         }
11850
11851         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11852             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11853                 tp->pdev_peer = tg3_find_peer(tp);
11854
11855         err = tg3_get_device_address(tp);
11856         if (err) {
11857                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11858                        "aborting.\n");
11859                 goto err_out_iounmap;
11860         }
11861
11862         /*
11863          * Reset chip in case UNDI or EFI driver did not shutdown
11864          * DMA self test will enable WDMAC and we'll see (spurious)
11865          * pending DMA on the PCI bus at that point.
11866          */
11867         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11868             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11869                 pci_save_state(tp->pdev);
11870                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11871                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11872         }
11873
11874         err = tg3_test_dma(tp);
11875         if (err) {
11876                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11877                 goto err_out_iounmap;
11878         }
11879
11880         /* Tigon3 can do ipv4 only... and some chips have buggy
11881          * checksumming.
11882          */
11883         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11884                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11885                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11886                         dev->features |= NETIF_F_HW_CSUM;
11887                 else
11888                         dev->features |= NETIF_F_IP_CSUM;
11889                 dev->features |= NETIF_F_SG;
11890                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11891         } else
11892                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11893
11894         /* flow control autonegotiation is default behavior */
11895         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11896
11897         tg3_init_coal(tp);
11898
11899         /* Now that we have fully setup the chip, save away a snapshot
11900          * of the PCI config space.  We need to restore this after
11901          * GRC_MISC_CFG core clock resets and some resume events.
11902          */
11903         pci_save_state(tp->pdev);
11904
11905         err = register_netdev(dev);
11906         if (err) {
11907                 printk(KERN_ERR PFX "Cannot register net device, "
11908                        "aborting.\n");
11909                 goto err_out_iounmap;
11910         }
11911
11912         pci_set_drvdata(pdev, dev);
11913
11914         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11915                dev->name,
11916                tp->board_part_number,
11917                tp->pci_chip_rev_id,
11918                tg3_phy_string(tp),
11919                tg3_bus_string(tp, str),
11920                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11921
11922         for (i = 0; i < 6; i++)
11923                 printk("%2.2x%c", dev->dev_addr[i],
11924                        i == 5 ? '\n' : ':');
11925
11926         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11927                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11928                "TSOcap[%d] \n",
11929                dev->name,
11930                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11931                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11932                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11933                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11934                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11935                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11936                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11937         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11938                dev->name, tp->dma_rwctrl,
11939                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11940                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11941
11942         netif_carrier_off(tp->dev);
11943
11944         return 0;
11945
11946 err_out_iounmap:
11947         if (tp->regs) {
11948                 iounmap(tp->regs);
11949                 tp->regs = NULL;
11950         }
11951
11952 err_out_free_dev:
11953         free_netdev(dev);
11954
11955 err_out_free_res:
11956         pci_release_regions(pdev);
11957
11958 err_out_disable_pdev:
11959         pci_disable_device(pdev);
11960         pci_set_drvdata(pdev, NULL);
11961         return err;
11962 }
11963
11964 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11965 {
11966         struct net_device *dev = pci_get_drvdata(pdev);
11967
11968         if (dev) {
11969                 struct tg3 *tp = netdev_priv(dev);
11970
11971                 flush_scheduled_work();
11972                 unregister_netdev(dev);
11973                 if (tp->regs) {
11974                         iounmap(tp->regs);
11975                         tp->regs = NULL;
11976                 }
11977                 free_netdev(dev);
11978                 pci_release_regions(pdev);
11979                 pci_disable_device(pdev);
11980                 pci_set_drvdata(pdev, NULL);
11981         }
11982 }
11983
11984 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11985 {
11986         struct net_device *dev = pci_get_drvdata(pdev);
11987         struct tg3 *tp = netdev_priv(dev);
11988         int err;
11989
11990         if (!netif_running(dev))
11991                 return 0;
11992
11993         flush_scheduled_work();
11994         tg3_netif_stop(tp);
11995
11996         del_timer_sync(&tp->timer);
11997
11998         tg3_full_lock(tp, 1);
11999         tg3_disable_ints(tp);
12000         tg3_full_unlock(tp);
12001
12002         netif_device_detach(dev);
12003
12004         tg3_full_lock(tp, 0);
12005         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12006         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
12007         tg3_full_unlock(tp);
12008
12009         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12010         if (err) {
12011                 tg3_full_lock(tp, 0);
12012
12013                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12014                 if (tg3_restart_hw(tp, 1))
12015                         goto out;
12016
12017                 tp->timer.expires = jiffies + tp->timer_offset;
12018                 add_timer(&tp->timer);
12019
12020                 netif_device_attach(dev);
12021                 tg3_netif_start(tp);
12022
12023 out:
12024                 tg3_full_unlock(tp);
12025         }
12026
12027         return err;
12028 }
12029
12030 static int tg3_resume(struct pci_dev *pdev)
12031 {
12032         struct net_device *dev = pci_get_drvdata(pdev);
12033         struct tg3 *tp = netdev_priv(dev);
12034         int err;
12035
12036         if (!netif_running(dev))
12037                 return 0;
12038
12039         pci_restore_state(tp->pdev);
12040
12041         err = tg3_set_power_state(tp, PCI_D0);
12042         if (err)
12043                 return err;
12044
12045         netif_device_attach(dev);
12046
12047         tg3_full_lock(tp, 0);
12048
12049         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12050         err = tg3_restart_hw(tp, 1);
12051         if (err)
12052                 goto out;
12053
12054         tp->timer.expires = jiffies + tp->timer_offset;
12055         add_timer(&tp->timer);
12056
12057         tg3_netif_start(tp);
12058
12059 out:
12060         tg3_full_unlock(tp);
12061
12062         return err;
12063 }
12064
12065 static struct pci_driver tg3_driver = {
12066         .name           = DRV_MODULE_NAME,
12067         .id_table       = tg3_pci_tbl,
12068         .probe          = tg3_init_one,
12069         .remove         = __devexit_p(tg3_remove_one),
12070         .suspend        = tg3_suspend,
12071         .resume         = tg3_resume
12072 };
12073
12074 static int __init tg3_init(void)
12075 {
12076         return pci_register_driver(&tg3_driver);
12077 }
12078
12079 static void __exit tg3_cleanup(void)
12080 {
12081         pci_unregister_driver(&tg3_driver);
12082 }
12083
12084 module_init(tg3_init);
12085 module_exit(tg3_cleanup);