2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2007 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
44 #include <net/checksum.h>
47 #include <asm/system.h>
49 #include <asm/byteorder.h>
50 #include <asm/uaccess.h>
53 #include <asm/idprom.h>
57 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
58 #define TG3_VLAN_TAG_USED 1
60 #define TG3_VLAN_TAG_USED 0
63 #define TG3_TSO_SUPPORT 1
67 #define DRV_MODULE_NAME "tg3"
68 #define PFX DRV_MODULE_NAME ": "
69 #define DRV_MODULE_VERSION "3.94"
70 #define DRV_MODULE_RELDATE "August 14, 2008"
72 #define TG3_DEF_MAC_MODE 0
73 #define TG3_DEF_RX_MODE 0
74 #define TG3_DEF_TX_MODE 0
75 #define TG3_DEF_MSG_ENABLE \
85 /* length of time before we decide the hardware is borked,
86 * and dev->tx_timeout() should be called to fix the problem
88 #define TG3_TX_TIMEOUT (5 * HZ)
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU 60
92 #define TG3_MAX_MTU(tp) \
93 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96 * You can't change the ring sizes, but you can change where you place
97 * them in the NIC onboard memory.
99 #define TG3_RX_RING_SIZE 512
100 #define TG3_DEF_RX_RING_PENDING 200
101 #define TG3_RX_JUMBO_RING_SIZE 256
102 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
104 /* Do not place this n-ring entries value into the tp struct itself,
105 * we really want to expose these constants to GCC so that modulo et
106 * al. operations are done with shifts and masks instead of with
107 * hw multiply/modulo instructions. Another solution would be to
108 * replace things like '% foo' with '& (foo - 1)'.
110 #define TG3_RX_RCB_RING_SIZE(tp) \
111 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
113 #define TG3_TX_RING_SIZE 512
114 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
116 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
118 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_JUMBO_RING_SIZE)
120 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121 TG3_RX_RCB_RING_SIZE(tp))
122 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
124 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
126 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
127 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
129 /* minimum number of free TX descriptors required to wake up TX process */
130 #define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
132 /* number of ETHTOOL_GSTATS u64's */
133 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
135 #define TG3_NUM_TEST 6
137 static char version[] __devinitdata =
138 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
140 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
141 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
142 MODULE_LICENSE("GPL");
143 MODULE_VERSION(DRV_MODULE_VERSION);
145 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
146 module_param(tg3_debug, int, 0);
147 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
149 static struct pci_device_id tg3_pci_tbl[] = {
150 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
151 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
152 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
211 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
212 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
213 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
214 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
215 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
216 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
217 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
221 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
223 static const struct {
224 const char string[ETH_GSTRING_LEN];
225 } ethtool_stats_keys[TG3_NUM_STATS] = {
228 { "rx_ucast_packets" },
229 { "rx_mcast_packets" },
230 { "rx_bcast_packets" },
232 { "rx_align_errors" },
233 { "rx_xon_pause_rcvd" },
234 { "rx_xoff_pause_rcvd" },
235 { "rx_mac_ctrl_rcvd" },
236 { "rx_xoff_entered" },
237 { "rx_frame_too_long_errors" },
239 { "rx_undersize_packets" },
240 { "rx_in_length_errors" },
241 { "rx_out_length_errors" },
242 { "rx_64_or_less_octet_packets" },
243 { "rx_65_to_127_octet_packets" },
244 { "rx_128_to_255_octet_packets" },
245 { "rx_256_to_511_octet_packets" },
246 { "rx_512_to_1023_octet_packets" },
247 { "rx_1024_to_1522_octet_packets" },
248 { "rx_1523_to_2047_octet_packets" },
249 { "rx_2048_to_4095_octet_packets" },
250 { "rx_4096_to_8191_octet_packets" },
251 { "rx_8192_to_9022_octet_packets" },
258 { "tx_flow_control" },
260 { "tx_single_collisions" },
261 { "tx_mult_collisions" },
263 { "tx_excessive_collisions" },
264 { "tx_late_collisions" },
265 { "tx_collide_2times" },
266 { "tx_collide_3times" },
267 { "tx_collide_4times" },
268 { "tx_collide_5times" },
269 { "tx_collide_6times" },
270 { "tx_collide_7times" },
271 { "tx_collide_8times" },
272 { "tx_collide_9times" },
273 { "tx_collide_10times" },
274 { "tx_collide_11times" },
275 { "tx_collide_12times" },
276 { "tx_collide_13times" },
277 { "tx_collide_14times" },
278 { "tx_collide_15times" },
279 { "tx_ucast_packets" },
280 { "tx_mcast_packets" },
281 { "tx_bcast_packets" },
282 { "tx_carrier_sense_errors" },
286 { "dma_writeq_full" },
287 { "dma_write_prioq_full" },
291 { "rx_threshold_hit" },
293 { "dma_readq_full" },
294 { "dma_read_prioq_full" },
295 { "tx_comp_queue_full" },
297 { "ring_set_send_prod_index" },
298 { "ring_status_update" },
300 { "nic_avoided_irqs" },
301 { "nic_tx_threshold_hit" }
304 static const struct {
305 const char string[ETH_GSTRING_LEN];
306 } ethtool_test_keys[TG3_NUM_TEST] = {
307 { "nvram test (online) " },
308 { "link test (online) " },
309 { "register test (offline)" },
310 { "memory test (offline)" },
311 { "loopback test (offline)" },
312 { "interrupt test (offline)" },
315 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
317 writel(val, tp->regs + off);
320 static u32 tg3_read32(struct tg3 *tp, u32 off)
322 return (readl(tp->regs + off));
325 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
327 writel(val, tp->aperegs + off);
330 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
332 return (readl(tp->aperegs + off));
335 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
339 spin_lock_irqsave(&tp->indirect_lock, flags);
340 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
341 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
342 spin_unlock_irqrestore(&tp->indirect_lock, flags);
345 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
347 writel(val, tp->regs + off);
348 readl(tp->regs + off);
351 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
356 spin_lock_irqsave(&tp->indirect_lock, flags);
357 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
358 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
359 spin_unlock_irqrestore(&tp->indirect_lock, flags);
363 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
367 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
368 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
369 TG3_64BIT_REG_LOW, val);
372 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
373 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
374 TG3_64BIT_REG_LOW, val);
378 spin_lock_irqsave(&tp->indirect_lock, flags);
379 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
380 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
381 spin_unlock_irqrestore(&tp->indirect_lock, flags);
383 /* In indirect mode when disabling interrupts, we also need
384 * to clear the interrupt bit in the GRC local ctrl register.
386 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
388 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
389 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
393 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
398 spin_lock_irqsave(&tp->indirect_lock, flags);
399 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
400 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
401 spin_unlock_irqrestore(&tp->indirect_lock, flags);
405 /* usec_wait specifies the wait time in usec when writing to certain registers
406 * where it is unsafe to read back the register without some delay.
407 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
408 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
410 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
412 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
413 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
414 /* Non-posted methods */
415 tp->write32(tp, off, val);
418 tg3_write32(tp, off, val);
423 /* Wait again after the read for the posted method to guarantee that
424 * the wait time is met.
430 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
432 tp->write32_mbox(tp, off, val);
433 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
434 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
435 tp->read32_mbox(tp, off);
438 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
440 void __iomem *mbox = tp->regs + off;
442 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
444 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
448 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
450 return (readl(tp->regs + off + GRCMBOX_BASE));
453 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
455 writel(val, tp->regs + off + GRCMBOX_BASE);
458 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
459 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
460 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
461 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
462 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
464 #define tw32(reg,val) tp->write32(tp, reg, val)
465 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
466 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
467 #define tr32(reg) tp->read32(tp, reg)
469 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
473 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
474 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
477 spin_lock_irqsave(&tp->indirect_lock, flags);
478 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
479 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
480 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
482 /* Always leave this as zero. */
483 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
485 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
486 tw32_f(TG3PCI_MEM_WIN_DATA, val);
488 /* Always leave this as zero. */
489 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
491 spin_unlock_irqrestore(&tp->indirect_lock, flags);
494 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
498 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
499 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
506 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
507 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
509 /* Always leave this as zero. */
510 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
512 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
513 *val = tr32(TG3PCI_MEM_WIN_DATA);
515 /* Always leave this as zero. */
516 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
518 spin_unlock_irqrestore(&tp->indirect_lock, flags);
521 static void tg3_ape_lock_init(struct tg3 *tp)
525 /* Make sure the driver hasn't any stale locks. */
526 for (i = 0; i < 8; i++)
527 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
528 APE_LOCK_GRANT_DRIVER);
531 static int tg3_ape_lock(struct tg3 *tp, int locknum)
537 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
541 case TG3_APE_LOCK_GRC:
542 case TG3_APE_LOCK_MEM:
550 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
552 /* Wait for up to 1 millisecond to acquire lock. */
553 for (i = 0; i < 100; i++) {
554 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
555 if (status == APE_LOCK_GRANT_DRIVER)
560 if (status != APE_LOCK_GRANT_DRIVER) {
561 /* Revoke the lock request. */
562 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
563 APE_LOCK_GRANT_DRIVER);
571 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
575 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
579 case TG3_APE_LOCK_GRC:
580 case TG3_APE_LOCK_MEM:
587 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
590 static void tg3_disable_ints(struct tg3 *tp)
592 tw32(TG3PCI_MISC_HOST_CTRL,
593 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
594 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
597 static inline void tg3_cond_int(struct tg3 *tp)
599 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
600 (tp->hw_status->status & SD_STATUS_UPDATED))
601 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
603 tw32(HOSTCC_MODE, tp->coalesce_mode |
604 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
607 static void tg3_enable_ints(struct tg3 *tp)
612 tw32(TG3PCI_MISC_HOST_CTRL,
613 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
614 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
615 (tp->last_tag << 24));
616 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
617 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
618 (tp->last_tag << 24));
622 static inline unsigned int tg3_has_work(struct tg3 *tp)
624 struct tg3_hw_status *sblk = tp->hw_status;
625 unsigned int work_exists = 0;
627 /* check for phy events */
628 if (!(tp->tg3_flags &
629 (TG3_FLAG_USE_LINKCHG_REG |
630 TG3_FLAG_POLL_SERDES))) {
631 if (sblk->status & SD_STATUS_LINK_CHG)
634 /* check for RX/TX work to do */
635 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
636 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
643 * similar to tg3_enable_ints, but it accurately determines whether there
644 * is new work pending and can return without flushing the PIO write
645 * which reenables interrupts
647 static void tg3_restart_ints(struct tg3 *tp)
649 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
653 /* When doing tagged status, this work check is unnecessary.
654 * The last_tag we write above tells the chip which piece of
655 * work we've completed.
657 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
659 tw32(HOSTCC_MODE, tp->coalesce_mode |
660 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
663 static inline void tg3_netif_stop(struct tg3 *tp)
665 tp->dev->trans_start = jiffies; /* prevent tx timeout */
666 napi_disable(&tp->napi);
667 netif_tx_disable(tp->dev);
670 static inline void tg3_netif_start(struct tg3 *tp)
672 netif_wake_queue(tp->dev);
673 /* NOTE: unconditional netif_wake_queue is only appropriate
674 * so long as all callers are assured to have free tx slots
675 * (such as after tg3_init_hw)
677 napi_enable(&tp->napi);
678 tp->hw_status->status |= SD_STATUS_UPDATED;
682 static void tg3_switch_clocks(struct tg3 *tp)
684 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
687 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
688 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
691 orig_clock_ctrl = clock_ctrl;
692 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
693 CLOCK_CTRL_CLKRUN_OENABLE |
695 tp->pci_clock_ctrl = clock_ctrl;
697 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
698 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
699 tw32_wait_f(TG3PCI_CLOCK_CTRL,
700 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
702 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
703 tw32_wait_f(TG3PCI_CLOCK_CTRL,
705 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
707 tw32_wait_f(TG3PCI_CLOCK_CTRL,
708 clock_ctrl | (CLOCK_CTRL_ALTCLK),
711 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
714 #define PHY_BUSY_LOOPS 5000
716 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
722 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
724 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
730 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
731 MI_COM_PHY_ADDR_MASK);
732 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
733 MI_COM_REG_ADDR_MASK);
734 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
736 tw32_f(MAC_MI_COM, frame_val);
738 loops = PHY_BUSY_LOOPS;
741 frame_val = tr32(MAC_MI_COM);
743 if ((frame_val & MI_COM_BUSY) == 0) {
745 frame_val = tr32(MAC_MI_COM);
753 *val = frame_val & MI_COM_DATA_MASK;
757 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
758 tw32_f(MAC_MI_MODE, tp->mi_mode);
765 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
771 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
772 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
775 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
777 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
781 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
782 MI_COM_PHY_ADDR_MASK);
783 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
784 MI_COM_REG_ADDR_MASK);
785 frame_val |= (val & MI_COM_DATA_MASK);
786 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
788 tw32_f(MAC_MI_COM, frame_val);
790 loops = PHY_BUSY_LOOPS;
793 frame_val = tr32(MAC_MI_COM);
794 if ((frame_val & MI_COM_BUSY) == 0) {
796 frame_val = tr32(MAC_MI_COM);
806 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
807 tw32_f(MAC_MI_MODE, tp->mi_mode);
814 static int tg3_bmcr_reset(struct tg3 *tp)
819 /* OK, reset it, and poll the BMCR_RESET bit until it
820 * clears or we time out.
822 phy_control = BMCR_RESET;
823 err = tg3_writephy(tp, MII_BMCR, phy_control);
829 err = tg3_readphy(tp, MII_BMCR, &phy_control);
833 if ((phy_control & BMCR_RESET) == 0) {
845 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
847 struct tg3 *tp = (struct tg3 *)bp->priv;
850 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
853 if (tg3_readphy(tp, reg, &val))
859 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
861 struct tg3 *tp = (struct tg3 *)bp->priv;
863 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
866 if (tg3_writephy(tp, reg, val))
872 static int tg3_mdio_reset(struct mii_bus *bp)
877 static void tg3_mdio_config(struct tg3 *tp)
881 if (tp->mdio_bus->phy_map[PHY_ADDR]->interface !=
882 PHY_INTERFACE_MODE_RGMII)
885 val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
886 MAC_PHYCFG1_RGMII_SND_STAT_EN);
887 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
888 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
889 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
890 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
891 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
893 tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
895 val = tr32(MAC_PHYCFG2) & ~(MAC_PHYCFG2_INBAND_ENABLE);
896 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
897 val |= MAC_PHYCFG2_INBAND_ENABLE;
898 tw32(MAC_PHYCFG2, val);
900 val = tr32(MAC_EXT_RGMII_MODE);
901 val &= ~(MAC_RGMII_MODE_RX_INT_B |
902 MAC_RGMII_MODE_RX_QUALITY |
903 MAC_RGMII_MODE_RX_ACTIVITY |
904 MAC_RGMII_MODE_RX_ENG_DET |
905 MAC_RGMII_MODE_TX_ENABLE |
906 MAC_RGMII_MODE_TX_LOWPWR |
907 MAC_RGMII_MODE_TX_RESET);
908 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
909 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
910 val |= MAC_RGMII_MODE_RX_INT_B |
911 MAC_RGMII_MODE_RX_QUALITY |
912 MAC_RGMII_MODE_RX_ACTIVITY |
913 MAC_RGMII_MODE_RX_ENG_DET;
914 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
915 val |= MAC_RGMII_MODE_TX_ENABLE |
916 MAC_RGMII_MODE_TX_LOWPWR |
917 MAC_RGMII_MODE_TX_RESET;
919 tw32(MAC_EXT_RGMII_MODE, val);
922 static void tg3_mdio_start(struct tg3 *tp)
924 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
925 mutex_lock(&tp->mdio_bus->mdio_lock);
926 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
927 mutex_unlock(&tp->mdio_bus->mdio_lock);
930 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
931 tw32_f(MAC_MI_MODE, tp->mi_mode);
934 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)
938 static void tg3_mdio_stop(struct tg3 *tp)
940 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
941 mutex_lock(&tp->mdio_bus->mdio_lock);
942 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
943 mutex_unlock(&tp->mdio_bus->mdio_lock);
947 static int tg3_mdio_init(struct tg3 *tp)
951 struct phy_device *phydev;
955 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
956 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
959 tp->mdio_bus = mdiobus_alloc();
960 if (tp->mdio_bus == NULL)
963 tp->mdio_bus->name = "tg3 mdio bus";
964 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
965 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
966 tp->mdio_bus->priv = tp;
967 tp->mdio_bus->parent = &tp->pdev->dev;
968 tp->mdio_bus->read = &tg3_mdio_read;
969 tp->mdio_bus->write = &tg3_mdio_write;
970 tp->mdio_bus->reset = &tg3_mdio_reset;
971 tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
972 tp->mdio_bus->irq = &tp->mdio_irq[0];
974 for (i = 0; i < PHY_MAX_ADDR; i++)
975 tp->mdio_bus->irq[i] = PHY_POLL;
977 /* The bus registration will look for all the PHYs on the mdio bus.
978 * Unfortunately, it does not ensure the PHY is powered up before
979 * accessing the PHY ID registers. A chip reset is the
980 * quickest way to bring the device back to an operational state..
982 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
985 i = mdiobus_register(tp->mdio_bus);
987 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
992 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
994 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
996 switch (phydev->phy_id) {
997 case TG3_PHY_ID_BCM50610:
998 phydev->interface = PHY_INTERFACE_MODE_RGMII;
999 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1000 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1001 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1002 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1003 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1004 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1006 case TG3_PHY_ID_BCMAC131:
1007 phydev->interface = PHY_INTERFACE_MODE_MII;
1011 tg3_mdio_config(tp);
1016 static void tg3_mdio_fini(struct tg3 *tp)
1018 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1019 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1020 mdiobus_unregister(tp->mdio_bus);
1021 mdiobus_free(tp->mdio_bus);
1022 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1026 /* tp->lock is held. */
1027 static inline void tg3_generate_fw_event(struct tg3 *tp)
1031 val = tr32(GRC_RX_CPU_EVENT);
1032 val |= GRC_RX_CPU_DRIVER_EVENT;
1033 tw32_f(GRC_RX_CPU_EVENT, val);
1035 tp->last_event_jiffies = jiffies;
1038 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1040 /* tp->lock is held. */
1041 static void tg3_wait_for_event_ack(struct tg3 *tp)
1044 unsigned int delay_cnt;
1047 /* If enough time has passed, no wait is necessary. */
1048 time_remain = (long)(tp->last_event_jiffies + 1 +
1049 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1051 if (time_remain < 0)
1054 /* Check if we can shorten the wait time. */
1055 delay_cnt = jiffies_to_usecs(time_remain);
1056 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1057 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1058 delay_cnt = (delay_cnt >> 3) + 1;
1060 for (i = 0; i < delay_cnt; i++) {
1061 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1067 /* tp->lock is held. */
1068 static void tg3_ump_link_report(struct tg3 *tp)
1073 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1074 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1077 tg3_wait_for_event_ack(tp);
1079 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1081 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1084 if (!tg3_readphy(tp, MII_BMCR, ®))
1086 if (!tg3_readphy(tp, MII_BMSR, ®))
1087 val |= (reg & 0xffff);
1088 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1091 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1093 if (!tg3_readphy(tp, MII_LPA, ®))
1094 val |= (reg & 0xffff);
1095 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1098 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1099 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1101 if (!tg3_readphy(tp, MII_STAT1000, ®))
1102 val |= (reg & 0xffff);
1104 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1106 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1110 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1112 tg3_generate_fw_event(tp);
1115 static void tg3_link_report(struct tg3 *tp)
1117 if (!netif_carrier_ok(tp->dev)) {
1118 if (netif_msg_link(tp))
1119 printk(KERN_INFO PFX "%s: Link is down.\n",
1121 tg3_ump_link_report(tp);
1122 } else if (netif_msg_link(tp)) {
1123 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1125 (tp->link_config.active_speed == SPEED_1000 ?
1127 (tp->link_config.active_speed == SPEED_100 ?
1129 (tp->link_config.active_duplex == DUPLEX_FULL ?
1132 printk(KERN_INFO PFX
1133 "%s: Flow control is %s for TX and %s for RX.\n",
1135 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1137 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1139 tg3_ump_link_report(tp);
1143 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1147 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1148 miireg = ADVERTISE_PAUSE_CAP;
1149 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1150 miireg = ADVERTISE_PAUSE_ASYM;
1151 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1152 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1159 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1163 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1164 miireg = ADVERTISE_1000XPAUSE;
1165 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1166 miireg = ADVERTISE_1000XPSE_ASYM;
1167 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1168 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1175 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1179 if (lcladv & ADVERTISE_PAUSE_CAP) {
1180 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1181 if (rmtadv & LPA_PAUSE_CAP)
1182 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1183 else if (rmtadv & LPA_PAUSE_ASYM)
1184 cap = TG3_FLOW_CTRL_RX;
1186 if (rmtadv & LPA_PAUSE_CAP)
1187 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1189 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1190 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1191 cap = TG3_FLOW_CTRL_TX;
1197 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1201 if (lcladv & ADVERTISE_1000XPAUSE) {
1202 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1203 if (rmtadv & LPA_1000XPAUSE)
1204 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1205 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1206 cap = TG3_FLOW_CTRL_RX;
1208 if (rmtadv & LPA_1000XPAUSE)
1209 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1211 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1212 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1213 cap = TG3_FLOW_CTRL_TX;
1219 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1223 u32 old_rx_mode = tp->rx_mode;
1224 u32 old_tx_mode = tp->tx_mode;
1226 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1227 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
1229 autoneg = tp->link_config.autoneg;
1231 if (autoneg == AUTONEG_ENABLE &&
1232 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1233 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1234 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1236 flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1238 flowctrl = tp->link_config.flowctrl;
1240 tp->link_config.active_flowctrl = flowctrl;
1242 if (flowctrl & TG3_FLOW_CTRL_RX)
1243 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1245 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1247 if (old_rx_mode != tp->rx_mode)
1248 tw32_f(MAC_RX_MODE, tp->rx_mode);
1250 if (flowctrl & TG3_FLOW_CTRL_TX)
1251 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1253 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1255 if (old_tx_mode != tp->tx_mode)
1256 tw32_f(MAC_TX_MODE, tp->tx_mode);
1259 static void tg3_adjust_link(struct net_device *dev)
1261 u8 oldflowctrl, linkmesg = 0;
1262 u32 mac_mode, lcl_adv, rmt_adv;
1263 struct tg3 *tp = netdev_priv(dev);
1264 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1266 spin_lock(&tp->lock);
1268 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1269 MAC_MODE_HALF_DUPLEX);
1271 oldflowctrl = tp->link_config.active_flowctrl;
1277 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1278 mac_mode |= MAC_MODE_PORT_MODE_MII;
1280 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1282 if (phydev->duplex == DUPLEX_HALF)
1283 mac_mode |= MAC_MODE_HALF_DUPLEX;
1285 lcl_adv = tg3_advert_flowctrl_1000T(
1286 tp->link_config.flowctrl);
1289 rmt_adv = LPA_PAUSE_CAP;
1290 if (phydev->asym_pause)
1291 rmt_adv |= LPA_PAUSE_ASYM;
1294 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1296 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1298 if (mac_mode != tp->mac_mode) {
1299 tp->mac_mode = mac_mode;
1300 tw32_f(MAC_MODE, tp->mac_mode);
1304 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1305 tw32(MAC_TX_LENGTHS,
1306 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1307 (6 << TX_LENGTHS_IPG_SHIFT) |
1308 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1310 tw32(MAC_TX_LENGTHS,
1311 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1312 (6 << TX_LENGTHS_IPG_SHIFT) |
1313 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1315 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1316 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1317 phydev->speed != tp->link_config.active_speed ||
1318 phydev->duplex != tp->link_config.active_duplex ||
1319 oldflowctrl != tp->link_config.active_flowctrl)
1322 tp->link_config.active_speed = phydev->speed;
1323 tp->link_config.active_duplex = phydev->duplex;
1325 spin_unlock(&tp->lock);
1328 tg3_link_report(tp);
1331 static int tg3_phy_init(struct tg3 *tp)
1333 struct phy_device *phydev;
1335 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1338 /* Bring the PHY back to a known state. */
1341 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1343 /* Attach the MAC to the PHY. */
1344 phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
1345 phydev->dev_flags, phydev->interface);
1346 if (IS_ERR(phydev)) {
1347 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1348 return PTR_ERR(phydev);
1351 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1353 /* Mask with MAC supported features. */
1354 phydev->supported &= (PHY_GBIT_FEATURES |
1356 SUPPORTED_Asym_Pause);
1358 phydev->advertising = phydev->supported;
1361 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
1362 tp->dev->name, phydev->drv->name, phydev->dev.bus_id);
1367 static void tg3_phy_start(struct tg3 *tp)
1369 struct phy_device *phydev;
1371 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1374 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1376 if (tp->link_config.phy_is_low_power) {
1377 tp->link_config.phy_is_low_power = 0;
1378 phydev->speed = tp->link_config.orig_speed;
1379 phydev->duplex = tp->link_config.orig_duplex;
1380 phydev->autoneg = tp->link_config.orig_autoneg;
1381 phydev->advertising = tp->link_config.orig_advertising;
1386 phy_start_aneg(phydev);
1389 static void tg3_phy_stop(struct tg3 *tp)
1391 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1394 phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
1397 static void tg3_phy_fini(struct tg3 *tp)
1399 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1400 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1401 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1405 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1407 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1408 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1411 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1415 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1416 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1419 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1422 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1423 tg3_writephy(tp, MII_TG3_EPHY_TEST,
1424 ephy | MII_TG3_EPHY_SHADOW_EN);
1425 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1427 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1429 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1430 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1432 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1435 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1436 MII_TG3_AUXCTL_SHDWSEL_MISC;
1437 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1438 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1440 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1442 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1443 phy |= MII_TG3_AUXCTL_MISC_WREN;
1444 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1449 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1453 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1456 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1457 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1458 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1459 (val | (1 << 15) | (1 << 4)));
1462 static void tg3_phy_apply_otp(struct tg3 *tp)
1471 /* Enable SM_DSP clock and tx 6dB coding. */
1472 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1473 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1474 MII_TG3_AUXCTL_ACTL_TX_6DB;
1475 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1477 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1478 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1479 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1481 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1482 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1483 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1485 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1486 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1487 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1489 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1490 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1492 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1493 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1495 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1496 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1497 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1499 /* Turn off SM_DSP clock. */
1500 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1501 MII_TG3_AUXCTL_ACTL_TX_6DB;
1502 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1505 static int tg3_wait_macro_done(struct tg3 *tp)
1512 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1513 if ((tmp32 & 0x1000) == 0)
1523 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1525 static const u32 test_pat[4][6] = {
1526 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1527 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1528 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1529 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1533 for (chan = 0; chan < 4; chan++) {
1536 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1537 (chan * 0x2000) | 0x0200);
1538 tg3_writephy(tp, 0x16, 0x0002);
1540 for (i = 0; i < 6; i++)
1541 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1544 tg3_writephy(tp, 0x16, 0x0202);
1545 if (tg3_wait_macro_done(tp)) {
1550 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1551 (chan * 0x2000) | 0x0200);
1552 tg3_writephy(tp, 0x16, 0x0082);
1553 if (tg3_wait_macro_done(tp)) {
1558 tg3_writephy(tp, 0x16, 0x0802);
1559 if (tg3_wait_macro_done(tp)) {
1564 for (i = 0; i < 6; i += 2) {
1567 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1568 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1569 tg3_wait_macro_done(tp)) {
1575 if (low != test_pat[chan][i] ||
1576 high != test_pat[chan][i+1]) {
1577 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1578 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1579 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1589 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1593 for (chan = 0; chan < 4; chan++) {
1596 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1597 (chan * 0x2000) | 0x0200);
1598 tg3_writephy(tp, 0x16, 0x0002);
1599 for (i = 0; i < 6; i++)
1600 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1601 tg3_writephy(tp, 0x16, 0x0202);
1602 if (tg3_wait_macro_done(tp))
1609 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1611 u32 reg32, phy9_orig;
1612 int retries, do_phy_reset, err;
1618 err = tg3_bmcr_reset(tp);
1624 /* Disable transmitter and interrupt. */
1625 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1629 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1631 /* Set full-duplex, 1000 mbps. */
1632 tg3_writephy(tp, MII_BMCR,
1633 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1635 /* Set to master mode. */
1636 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1639 tg3_writephy(tp, MII_TG3_CTRL,
1640 (MII_TG3_CTRL_AS_MASTER |
1641 MII_TG3_CTRL_ENABLE_AS_MASTER));
1643 /* Enable SM_DSP_CLOCK and 6dB. */
1644 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1646 /* Block the PHY control access. */
1647 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1648 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1650 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1653 } while (--retries);
1655 err = tg3_phy_reset_chanpat(tp);
1659 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1660 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1662 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1663 tg3_writephy(tp, 0x16, 0x0000);
1665 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1666 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1667 /* Set Extended packet length bit for jumbo frames */
1668 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1671 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1674 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1676 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1678 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1685 /* This will reset the tigon3 PHY if there is no valid
1686 * link unless the FORCE argument is non-zero.
1688 static int tg3_phy_reset(struct tg3 *tp)
1694 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1697 val = tr32(GRC_MISC_CFG);
1698 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1701 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1702 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1706 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1707 netif_carrier_off(tp->dev);
1708 tg3_link_report(tp);
1711 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1712 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1713 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1714 err = tg3_phy_reset_5703_4_5(tp);
1721 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1722 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1723 cpmuctrl = tr32(TG3_CPMU_CTRL);
1724 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1726 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1729 err = tg3_bmcr_reset(tp);
1733 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1736 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1737 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1739 tw32(TG3_CPMU_CTRL, cpmuctrl);
1742 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1745 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1746 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1747 CPMU_LSPD_1000MB_MACCLK_12_5) {
1748 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1750 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1753 /* Disable GPHY autopowerdown. */
1754 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1755 MII_TG3_MISC_SHDW_WREN |
1756 MII_TG3_MISC_SHDW_APD_SEL |
1757 MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1760 tg3_phy_apply_otp(tp);
1763 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1764 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1765 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1766 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1767 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1768 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1769 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1771 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1772 tg3_writephy(tp, 0x1c, 0x8d68);
1773 tg3_writephy(tp, 0x1c, 0x8d68);
1775 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1776 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1777 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1778 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1779 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1780 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1781 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1782 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1783 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1785 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1786 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1787 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1788 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1789 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1790 tg3_writephy(tp, MII_TG3_TEST1,
1791 MII_TG3_TEST1_TRIM_EN | 0x4);
1793 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1794 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1796 /* Set Extended packet length bit (bit 14) on all chips that */
1797 /* support jumbo frames */
1798 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1799 /* Cannot do read-modify-write on 5401 */
1800 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1801 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1804 /* Set bit 14 with read-modify-write to preserve other bits */
1805 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1806 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1807 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1810 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1811 * jumbo frames transmission.
1813 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1816 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1817 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1818 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1821 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1822 /* adjust output voltage */
1823 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1826 tg3_phy_toggle_automdix(tp, 1);
1827 tg3_phy_set_wirespeed(tp);
1831 static void tg3_frob_aux_power(struct tg3 *tp)
1833 struct tg3 *tp_peer = tp;
1835 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1838 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1839 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1840 struct net_device *dev_peer;
1842 dev_peer = pci_get_drvdata(tp->pdev_peer);
1843 /* remove_one() may have been run on the peer. */
1847 tp_peer = netdev_priv(dev_peer);
1850 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1851 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1852 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1853 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1854 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1855 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1856 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1857 (GRC_LCLCTRL_GPIO_OE0 |
1858 GRC_LCLCTRL_GPIO_OE1 |
1859 GRC_LCLCTRL_GPIO_OE2 |
1860 GRC_LCLCTRL_GPIO_OUTPUT0 |
1861 GRC_LCLCTRL_GPIO_OUTPUT1),
1863 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1864 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1865 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1866 GRC_LCLCTRL_GPIO_OE1 |
1867 GRC_LCLCTRL_GPIO_OE2 |
1868 GRC_LCLCTRL_GPIO_OUTPUT0 |
1869 GRC_LCLCTRL_GPIO_OUTPUT1 |
1871 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1873 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1874 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1876 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1877 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1880 u32 grc_local_ctrl = 0;
1882 if (tp_peer != tp &&
1883 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1886 /* Workaround to prevent overdrawing Amps. */
1887 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1889 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1890 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1891 grc_local_ctrl, 100);
1894 /* On 5753 and variants, GPIO2 cannot be used. */
1895 no_gpio2 = tp->nic_sram_data_cfg &
1896 NIC_SRAM_DATA_CFG_NO_GPIO2;
1898 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1899 GRC_LCLCTRL_GPIO_OE1 |
1900 GRC_LCLCTRL_GPIO_OE2 |
1901 GRC_LCLCTRL_GPIO_OUTPUT1 |
1902 GRC_LCLCTRL_GPIO_OUTPUT2;
1904 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1905 GRC_LCLCTRL_GPIO_OUTPUT2);
1907 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1908 grc_local_ctrl, 100);
1910 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1912 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1913 grc_local_ctrl, 100);
1916 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1917 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1918 grc_local_ctrl, 100);
1922 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1923 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1924 if (tp_peer != tp &&
1925 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1928 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1929 (GRC_LCLCTRL_GPIO_OE1 |
1930 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1932 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1933 GRC_LCLCTRL_GPIO_OE1, 100);
1935 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1936 (GRC_LCLCTRL_GPIO_OE1 |
1937 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1942 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1944 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1946 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1947 if (speed != SPEED_10)
1949 } else if (speed == SPEED_10)
1955 static int tg3_setup_phy(struct tg3 *, int);
1957 #define RESET_KIND_SHUTDOWN 0
1958 #define RESET_KIND_INIT 1
1959 #define RESET_KIND_SUSPEND 2
1961 static void tg3_write_sig_post_reset(struct tg3 *, int);
1962 static int tg3_halt_cpu(struct tg3 *, u32);
1963 static int tg3_nvram_lock(struct tg3 *);
1964 static void tg3_nvram_unlock(struct tg3 *);
1966 static void tg3_power_down_phy(struct tg3 *tp)
1970 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1971 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1972 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1973 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1976 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1977 tw32(SG_DIG_CTRL, sg_dig_ctrl);
1978 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1983 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1985 val = tr32(GRC_MISC_CFG);
1986 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1989 } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1990 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1991 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1992 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1995 /* The PHY should not be powered down on some chips because
1998 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2000 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2001 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2004 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
2005 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2006 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2007 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2008 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2011 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2014 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2018 /* Make sure register accesses (indirect or otherwise)
2019 * will function correctly.
2021 pci_write_config_dword(tp->pdev,
2022 TG3PCI_MISC_HOST_CTRL,
2023 tp->misc_host_ctrl);
2027 pci_enable_wake(tp->pdev, state, false);
2028 pci_set_power_state(tp->pdev, PCI_D0);
2030 /* Switch out of Vaux if it is a NIC */
2031 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2032 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2042 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2043 tp->dev->name, state);
2046 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2047 tw32(TG3PCI_MISC_HOST_CTRL,
2048 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2050 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2051 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2052 !tp->link_config.phy_is_low_power) {
2053 struct phy_device *phydev;
2056 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
2058 tp->link_config.phy_is_low_power = 1;
2060 tp->link_config.orig_speed = phydev->speed;
2061 tp->link_config.orig_duplex = phydev->duplex;
2062 tp->link_config.orig_autoneg = phydev->autoneg;
2063 tp->link_config.orig_advertising = phydev->advertising;
2065 advertising = ADVERTISED_TP |
2067 ADVERTISED_Autoneg |
2068 ADVERTISED_10baseT_Half;
2070 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2071 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
2072 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2074 ADVERTISED_100baseT_Half |
2075 ADVERTISED_100baseT_Full |
2076 ADVERTISED_10baseT_Full;
2078 advertising |= ADVERTISED_10baseT_Full;
2081 phydev->advertising = advertising;
2083 phy_start_aneg(phydev);
2086 if (tp->link_config.phy_is_low_power == 0) {
2087 tp->link_config.phy_is_low_power = 1;
2088 tp->link_config.orig_speed = tp->link_config.speed;
2089 tp->link_config.orig_duplex = tp->link_config.duplex;
2090 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2093 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2094 tp->link_config.speed = SPEED_10;
2095 tp->link_config.duplex = DUPLEX_HALF;
2096 tp->link_config.autoneg = AUTONEG_ENABLE;
2097 tg3_setup_phy(tp, 0);
2101 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2104 val = tr32(GRC_VCPU_EXT_CTRL);
2105 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2106 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2110 for (i = 0; i < 200; i++) {
2111 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2112 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2117 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2118 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2119 WOL_DRV_STATE_SHUTDOWN |
2123 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
2126 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2127 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
2128 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2132 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2133 mac_mode = MAC_MODE_PORT_MODE_GMII;
2135 mac_mode = MAC_MODE_PORT_MODE_MII;
2137 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2138 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2140 u32 speed = (tp->tg3_flags &
2141 TG3_FLAG_WOL_SPEED_100MB) ?
2142 SPEED_100 : SPEED_10;
2143 if (tg3_5700_link_polarity(tp, speed))
2144 mac_mode |= MAC_MODE_LINK_POLARITY;
2146 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2149 mac_mode = MAC_MODE_PORT_MODE_TBI;
2152 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2153 tw32(MAC_LED_CTRL, tp->led_ctrl);
2155 if (pci_pme_capable(tp->pdev, state) &&
2156 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE))
2157 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2159 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2160 mac_mode |= tp->mac_mode &
2161 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2162 if (mac_mode & MAC_MODE_APE_TX_EN)
2163 mac_mode |= MAC_MODE_TDE_ENABLE;
2166 tw32_f(MAC_MODE, mac_mode);
2169 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2173 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2174 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2178 base_val = tp->pci_clock_ctrl;
2179 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2180 CLOCK_CTRL_TXCLK_DISABLE);
2182 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2183 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2184 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2185 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2186 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2188 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2189 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2190 u32 newbits1, newbits2;
2192 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2193 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2194 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2195 CLOCK_CTRL_TXCLK_DISABLE |
2197 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2198 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2199 newbits1 = CLOCK_CTRL_625_CORE;
2200 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2202 newbits1 = CLOCK_CTRL_ALTCLK;
2203 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2206 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2209 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2212 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2215 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2216 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2217 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2218 CLOCK_CTRL_TXCLK_DISABLE |
2219 CLOCK_CTRL_44MHZ_CORE);
2221 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2224 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2225 tp->pci_clock_ctrl | newbits3, 40);
2229 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
2230 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2231 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
2232 tg3_power_down_phy(tp);
2234 tg3_frob_aux_power(tp);
2236 /* Workaround for unstable PLL clock */
2237 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2238 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2239 u32 val = tr32(0x7d00);
2241 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2243 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2246 err = tg3_nvram_lock(tp);
2247 tg3_halt_cpu(tp, RX_CPU_BASE);
2249 tg3_nvram_unlock(tp);
2253 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2255 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
2256 pci_enable_wake(tp->pdev, state, true);
2258 /* Finally, set the new power state. */
2259 pci_set_power_state(tp->pdev, state);
2264 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2266 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2267 case MII_TG3_AUX_STAT_10HALF:
2269 *duplex = DUPLEX_HALF;
2272 case MII_TG3_AUX_STAT_10FULL:
2274 *duplex = DUPLEX_FULL;
2277 case MII_TG3_AUX_STAT_100HALF:
2279 *duplex = DUPLEX_HALF;
2282 case MII_TG3_AUX_STAT_100FULL:
2284 *duplex = DUPLEX_FULL;
2287 case MII_TG3_AUX_STAT_1000HALF:
2288 *speed = SPEED_1000;
2289 *duplex = DUPLEX_HALF;
2292 case MII_TG3_AUX_STAT_1000FULL:
2293 *speed = SPEED_1000;
2294 *duplex = DUPLEX_FULL;
2298 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2299 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2301 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2305 *speed = SPEED_INVALID;
2306 *duplex = DUPLEX_INVALID;
2311 static void tg3_phy_copper_begin(struct tg3 *tp)
2316 if (tp->link_config.phy_is_low_power) {
2317 /* Entering low power mode. Disable gigabit and
2318 * 100baseT advertisements.
2320 tg3_writephy(tp, MII_TG3_CTRL, 0);
2322 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2323 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2324 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2325 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2327 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2328 } else if (tp->link_config.speed == SPEED_INVALID) {
2329 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2330 tp->link_config.advertising &=
2331 ~(ADVERTISED_1000baseT_Half |
2332 ADVERTISED_1000baseT_Full);
2334 new_adv = ADVERTISE_CSMA;
2335 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2336 new_adv |= ADVERTISE_10HALF;
2337 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2338 new_adv |= ADVERTISE_10FULL;
2339 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2340 new_adv |= ADVERTISE_100HALF;
2341 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2342 new_adv |= ADVERTISE_100FULL;
2344 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2346 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2348 if (tp->link_config.advertising &
2349 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2351 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2352 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2353 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2354 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2355 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2356 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2357 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2358 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2359 MII_TG3_CTRL_ENABLE_AS_MASTER);
2360 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2362 tg3_writephy(tp, MII_TG3_CTRL, 0);
2365 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2366 new_adv |= ADVERTISE_CSMA;
2368 /* Asking for a specific link mode. */
2369 if (tp->link_config.speed == SPEED_1000) {
2370 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2372 if (tp->link_config.duplex == DUPLEX_FULL)
2373 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2375 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2376 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2377 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2378 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2379 MII_TG3_CTRL_ENABLE_AS_MASTER);
2381 if (tp->link_config.speed == SPEED_100) {
2382 if (tp->link_config.duplex == DUPLEX_FULL)
2383 new_adv |= ADVERTISE_100FULL;
2385 new_adv |= ADVERTISE_100HALF;
2387 if (tp->link_config.duplex == DUPLEX_FULL)
2388 new_adv |= ADVERTISE_10FULL;
2390 new_adv |= ADVERTISE_10HALF;
2392 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2397 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2400 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2401 tp->link_config.speed != SPEED_INVALID) {
2402 u32 bmcr, orig_bmcr;
2404 tp->link_config.active_speed = tp->link_config.speed;
2405 tp->link_config.active_duplex = tp->link_config.duplex;
2408 switch (tp->link_config.speed) {
2414 bmcr |= BMCR_SPEED100;
2418 bmcr |= TG3_BMCR_SPEED1000;
2422 if (tp->link_config.duplex == DUPLEX_FULL)
2423 bmcr |= BMCR_FULLDPLX;
2425 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2426 (bmcr != orig_bmcr)) {
2427 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2428 for (i = 0; i < 1500; i++) {
2432 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2433 tg3_readphy(tp, MII_BMSR, &tmp))
2435 if (!(tmp & BMSR_LSTATUS)) {
2440 tg3_writephy(tp, MII_BMCR, bmcr);
2444 tg3_writephy(tp, MII_BMCR,
2445 BMCR_ANENABLE | BMCR_ANRESTART);
2449 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2453 /* Turn off tap power management. */
2454 /* Set Extended packet length bit */
2455 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2457 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2458 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2460 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2461 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2463 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2464 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2466 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2467 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2469 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2470 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2477 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2479 u32 adv_reg, all_mask = 0;
2481 if (mask & ADVERTISED_10baseT_Half)
2482 all_mask |= ADVERTISE_10HALF;
2483 if (mask & ADVERTISED_10baseT_Full)
2484 all_mask |= ADVERTISE_10FULL;
2485 if (mask & ADVERTISED_100baseT_Half)
2486 all_mask |= ADVERTISE_100HALF;
2487 if (mask & ADVERTISED_100baseT_Full)
2488 all_mask |= ADVERTISE_100FULL;
2490 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2493 if ((adv_reg & all_mask) != all_mask)
2495 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2499 if (mask & ADVERTISED_1000baseT_Half)
2500 all_mask |= ADVERTISE_1000HALF;
2501 if (mask & ADVERTISED_1000baseT_Full)
2502 all_mask |= ADVERTISE_1000FULL;
2504 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2507 if ((tg3_ctrl & all_mask) != all_mask)
2513 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2517 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2520 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2521 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2523 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2524 if (curadv != reqadv)
2527 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2528 tg3_readphy(tp, MII_LPA, rmtadv);
2530 /* Reprogram the advertisement register, even if it
2531 * does not affect the current link. If the link
2532 * gets renegotiated in the future, we can save an
2533 * additional renegotiation cycle by advertising
2534 * it correctly in the first place.
2536 if (curadv != reqadv) {
2537 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2538 ADVERTISE_PAUSE_ASYM);
2539 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2546 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2548 int current_link_up;
2550 u32 lcl_adv, rmt_adv;
2558 (MAC_STATUS_SYNC_CHANGED |
2559 MAC_STATUS_CFG_CHANGED |
2560 MAC_STATUS_MI_COMPLETION |
2561 MAC_STATUS_LNKSTATE_CHANGED));
2564 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2566 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2570 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2572 /* Some third-party PHYs need to be reset on link going
2575 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2576 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2577 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2578 netif_carrier_ok(tp->dev)) {
2579 tg3_readphy(tp, MII_BMSR, &bmsr);
2580 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2581 !(bmsr & BMSR_LSTATUS))
2587 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2588 tg3_readphy(tp, MII_BMSR, &bmsr);
2589 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2590 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2593 if (!(bmsr & BMSR_LSTATUS)) {
2594 err = tg3_init_5401phy_dsp(tp);
2598 tg3_readphy(tp, MII_BMSR, &bmsr);
2599 for (i = 0; i < 1000; i++) {
2601 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2602 (bmsr & BMSR_LSTATUS)) {
2608 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2609 !(bmsr & BMSR_LSTATUS) &&
2610 tp->link_config.active_speed == SPEED_1000) {
2611 err = tg3_phy_reset(tp);
2613 err = tg3_init_5401phy_dsp(tp);
2618 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2619 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2620 /* 5701 {A0,B0} CRC bug workaround */
2621 tg3_writephy(tp, 0x15, 0x0a75);
2622 tg3_writephy(tp, 0x1c, 0x8c68);
2623 tg3_writephy(tp, 0x1c, 0x8d68);
2624 tg3_writephy(tp, 0x1c, 0x8c68);
2627 /* Clear pending interrupts... */
2628 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2629 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2631 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2632 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2633 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2634 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2636 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2637 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2638 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2639 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2640 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2642 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2645 current_link_up = 0;
2646 current_speed = SPEED_INVALID;
2647 current_duplex = DUPLEX_INVALID;
2649 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2652 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2653 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2654 if (!(val & (1 << 10))) {
2656 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2662 for (i = 0; i < 100; i++) {
2663 tg3_readphy(tp, MII_BMSR, &bmsr);
2664 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2665 (bmsr & BMSR_LSTATUS))
2670 if (bmsr & BMSR_LSTATUS) {
2673 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2674 for (i = 0; i < 2000; i++) {
2676 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2681 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2686 for (i = 0; i < 200; i++) {
2687 tg3_readphy(tp, MII_BMCR, &bmcr);
2688 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2690 if (bmcr && bmcr != 0x7fff)
2698 tp->link_config.active_speed = current_speed;
2699 tp->link_config.active_duplex = current_duplex;
2701 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2702 if ((bmcr & BMCR_ANENABLE) &&
2703 tg3_copper_is_advertising_all(tp,
2704 tp->link_config.advertising)) {
2705 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2707 current_link_up = 1;
2710 if (!(bmcr & BMCR_ANENABLE) &&
2711 tp->link_config.speed == current_speed &&
2712 tp->link_config.duplex == current_duplex &&
2713 tp->link_config.flowctrl ==
2714 tp->link_config.active_flowctrl) {
2715 current_link_up = 1;
2719 if (current_link_up == 1 &&
2720 tp->link_config.active_duplex == DUPLEX_FULL)
2721 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2725 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2728 tg3_phy_copper_begin(tp);
2730 tg3_readphy(tp, MII_BMSR, &tmp);
2731 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2732 (tmp & BMSR_LSTATUS))
2733 current_link_up = 1;
2736 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2737 if (current_link_up == 1) {
2738 if (tp->link_config.active_speed == SPEED_100 ||
2739 tp->link_config.active_speed == SPEED_10)
2740 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2742 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2744 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2746 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2747 if (tp->link_config.active_duplex == DUPLEX_HALF)
2748 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2750 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2751 if (current_link_up == 1 &&
2752 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2753 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2755 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2758 /* ??? Without this setting Netgear GA302T PHY does not
2759 * ??? send/receive packets...
2761 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2762 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2763 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2764 tw32_f(MAC_MI_MODE, tp->mi_mode);
2768 tw32_f(MAC_MODE, tp->mac_mode);
2771 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2772 /* Polled via timer. */
2773 tw32_f(MAC_EVENT, 0);
2775 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2779 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2780 current_link_up == 1 &&
2781 tp->link_config.active_speed == SPEED_1000 &&
2782 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2783 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2786 (MAC_STATUS_SYNC_CHANGED |
2787 MAC_STATUS_CFG_CHANGED));
2790 NIC_SRAM_FIRMWARE_MBOX,
2791 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2794 if (current_link_up != netif_carrier_ok(tp->dev)) {
2795 if (current_link_up)
2796 netif_carrier_on(tp->dev);
2798 netif_carrier_off(tp->dev);
2799 tg3_link_report(tp);
2805 struct tg3_fiber_aneginfo {
2807 #define ANEG_STATE_UNKNOWN 0
2808 #define ANEG_STATE_AN_ENABLE 1
2809 #define ANEG_STATE_RESTART_INIT 2
2810 #define ANEG_STATE_RESTART 3
2811 #define ANEG_STATE_DISABLE_LINK_OK 4
2812 #define ANEG_STATE_ABILITY_DETECT_INIT 5
2813 #define ANEG_STATE_ABILITY_DETECT 6
2814 #define ANEG_STATE_ACK_DETECT_INIT 7
2815 #define ANEG_STATE_ACK_DETECT 8
2816 #define ANEG_STATE_COMPLETE_ACK_INIT 9
2817 #define ANEG_STATE_COMPLETE_ACK 10
2818 #define ANEG_STATE_IDLE_DETECT_INIT 11
2819 #define ANEG_STATE_IDLE_DETECT 12
2820 #define ANEG_STATE_LINK_OK 13
2821 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2822 #define ANEG_STATE_NEXT_PAGE_WAIT 15
2825 #define MR_AN_ENABLE 0x00000001
2826 #define MR_RESTART_AN 0x00000002
2827 #define MR_AN_COMPLETE 0x00000004
2828 #define MR_PAGE_RX 0x00000008
2829 #define MR_NP_LOADED 0x00000010
2830 #define MR_TOGGLE_TX 0x00000020
2831 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
2832 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
2833 #define MR_LP_ADV_SYM_PAUSE 0x00000100
2834 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
2835 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2836 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2837 #define MR_LP_ADV_NEXT_PAGE 0x00001000
2838 #define MR_TOGGLE_RX 0x00002000
2839 #define MR_NP_RX 0x00004000
2841 #define MR_LINK_OK 0x80000000
2843 unsigned long link_time, cur_time;
2845 u32 ability_match_cfg;
2846 int ability_match_count;
2848 char ability_match, idle_match, ack_match;
2850 u32 txconfig, rxconfig;
2851 #define ANEG_CFG_NP 0x00000080
2852 #define ANEG_CFG_ACK 0x00000040
2853 #define ANEG_CFG_RF2 0x00000020
2854 #define ANEG_CFG_RF1 0x00000010
2855 #define ANEG_CFG_PS2 0x00000001
2856 #define ANEG_CFG_PS1 0x00008000
2857 #define ANEG_CFG_HD 0x00004000
2858 #define ANEG_CFG_FD 0x00002000
2859 #define ANEG_CFG_INVAL 0x00001f06
2864 #define ANEG_TIMER_ENAB 2
2865 #define ANEG_FAILED -1
2867 #define ANEG_STATE_SETTLE_TIME 10000
2869 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2870 struct tg3_fiber_aneginfo *ap)
2873 unsigned long delta;
2877 if (ap->state == ANEG_STATE_UNKNOWN) {
2881 ap->ability_match_cfg = 0;
2882 ap->ability_match_count = 0;
2883 ap->ability_match = 0;
2889 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2890 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2892 if (rx_cfg_reg != ap->ability_match_cfg) {
2893 ap->ability_match_cfg = rx_cfg_reg;
2894 ap->ability_match = 0;
2895 ap->ability_match_count = 0;
2897 if (++ap->ability_match_count > 1) {
2898 ap->ability_match = 1;
2899 ap->ability_match_cfg = rx_cfg_reg;
2902 if (rx_cfg_reg & ANEG_CFG_ACK)
2910 ap->ability_match_cfg = 0;
2911 ap->ability_match_count = 0;
2912 ap->ability_match = 0;
2918 ap->rxconfig = rx_cfg_reg;
2922 case ANEG_STATE_UNKNOWN:
2923 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2924 ap->state = ANEG_STATE_AN_ENABLE;
2927 case ANEG_STATE_AN_ENABLE:
2928 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2929 if (ap->flags & MR_AN_ENABLE) {
2932 ap->ability_match_cfg = 0;
2933 ap->ability_match_count = 0;
2934 ap->ability_match = 0;
2938 ap->state = ANEG_STATE_RESTART_INIT;
2940 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2944 case ANEG_STATE_RESTART_INIT:
2945 ap->link_time = ap->cur_time;
2946 ap->flags &= ~(MR_NP_LOADED);
2948 tw32(MAC_TX_AUTO_NEG, 0);
2949 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2950 tw32_f(MAC_MODE, tp->mac_mode);
2953 ret = ANEG_TIMER_ENAB;
2954 ap->state = ANEG_STATE_RESTART;
2957 case ANEG_STATE_RESTART:
2958 delta = ap->cur_time - ap->link_time;
2959 if (delta > ANEG_STATE_SETTLE_TIME) {
2960 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2962 ret = ANEG_TIMER_ENAB;
2966 case ANEG_STATE_DISABLE_LINK_OK:
2970 case ANEG_STATE_ABILITY_DETECT_INIT:
2971 ap->flags &= ~(MR_TOGGLE_TX);
2972 ap->txconfig = ANEG_CFG_FD;
2973 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2974 if (flowctrl & ADVERTISE_1000XPAUSE)
2975 ap->txconfig |= ANEG_CFG_PS1;
2976 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2977 ap->txconfig |= ANEG_CFG_PS2;
2978 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2979 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2980 tw32_f(MAC_MODE, tp->mac_mode);
2983 ap->state = ANEG_STATE_ABILITY_DETECT;
2986 case ANEG_STATE_ABILITY_DETECT:
2987 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2988 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2992 case ANEG_STATE_ACK_DETECT_INIT:
2993 ap->txconfig |= ANEG_CFG_ACK;
2994 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2995 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2996 tw32_f(MAC_MODE, tp->mac_mode);
2999 ap->state = ANEG_STATE_ACK_DETECT;
3002 case ANEG_STATE_ACK_DETECT:
3003 if (ap->ack_match != 0) {
3004 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3005 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3006 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3008 ap->state = ANEG_STATE_AN_ENABLE;
3010 } else if (ap->ability_match != 0 &&
3011 ap->rxconfig == 0) {
3012 ap->state = ANEG_STATE_AN_ENABLE;
3016 case ANEG_STATE_COMPLETE_ACK_INIT:
3017 if (ap->rxconfig & ANEG_CFG_INVAL) {
3021 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3022 MR_LP_ADV_HALF_DUPLEX |
3023 MR_LP_ADV_SYM_PAUSE |
3024 MR_LP_ADV_ASYM_PAUSE |
3025 MR_LP_ADV_REMOTE_FAULT1 |
3026 MR_LP_ADV_REMOTE_FAULT2 |
3027 MR_LP_ADV_NEXT_PAGE |
3030 if (ap->rxconfig & ANEG_CFG_FD)
3031 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3032 if (ap->rxconfig & ANEG_CFG_HD)
3033 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3034 if (ap->rxconfig & ANEG_CFG_PS1)
3035 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3036 if (ap->rxconfig & ANEG_CFG_PS2)
3037 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3038 if (ap->rxconfig & ANEG_CFG_RF1)
3039 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3040 if (ap->rxconfig & ANEG_CFG_RF2)
3041 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3042 if (ap->rxconfig & ANEG_CFG_NP)
3043 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3045 ap->link_time = ap->cur_time;
3047 ap->flags ^= (MR_TOGGLE_TX);
3048 if (ap->rxconfig & 0x0008)
3049 ap->flags |= MR_TOGGLE_RX;
3050 if (ap->rxconfig & ANEG_CFG_NP)
3051 ap->flags |= MR_NP_RX;
3052 ap->flags |= MR_PAGE_RX;
3054 ap->state = ANEG_STATE_COMPLETE_ACK;
3055 ret = ANEG_TIMER_ENAB;
3058 case ANEG_STATE_COMPLETE_ACK:
3059 if (ap->ability_match != 0 &&
3060 ap->rxconfig == 0) {
3061 ap->state = ANEG_STATE_AN_ENABLE;
3064 delta = ap->cur_time - ap->link_time;
3065 if (delta > ANEG_STATE_SETTLE_TIME) {
3066 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3067 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3069 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3070 !(ap->flags & MR_NP_RX)) {
3071 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3079 case ANEG_STATE_IDLE_DETECT_INIT:
3080 ap->link_time = ap->cur_time;
3081 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3082 tw32_f(MAC_MODE, tp->mac_mode);
3085 ap->state = ANEG_STATE_IDLE_DETECT;
3086 ret = ANEG_TIMER_ENAB;
3089 case ANEG_STATE_IDLE_DETECT:
3090 if (ap->ability_match != 0 &&
3091 ap->rxconfig == 0) {
3092 ap->state = ANEG_STATE_AN_ENABLE;
3095 delta = ap->cur_time - ap->link_time;
3096 if (delta > ANEG_STATE_SETTLE_TIME) {
3097 /* XXX another gem from the Broadcom driver :( */
3098 ap->state = ANEG_STATE_LINK_OK;
3102 case ANEG_STATE_LINK_OK:
3103 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3107 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3108 /* ??? unimplemented */
3111 case ANEG_STATE_NEXT_PAGE_WAIT:
3112 /* ??? unimplemented */
3123 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3126 struct tg3_fiber_aneginfo aninfo;
3127 int status = ANEG_FAILED;
3131 tw32_f(MAC_TX_AUTO_NEG, 0);
3133 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3134 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3137 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3140 memset(&aninfo, 0, sizeof(aninfo));
3141 aninfo.flags |= MR_AN_ENABLE;
3142 aninfo.state = ANEG_STATE_UNKNOWN;
3143 aninfo.cur_time = 0;
3145 while (++tick < 195000) {
3146 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3147 if (status == ANEG_DONE || status == ANEG_FAILED)
3153 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3154 tw32_f(MAC_MODE, tp->mac_mode);
3157 *txflags = aninfo.txconfig;
3158 *rxflags = aninfo.flags;
3160 if (status == ANEG_DONE &&
3161 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3162 MR_LP_ADV_FULL_DUPLEX)))
3168 static void tg3_init_bcm8002(struct tg3 *tp)
3170 u32 mac_status = tr32(MAC_STATUS);
3173 /* Reset when initting first time or we have a link. */
3174 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3175 !(mac_status & MAC_STATUS_PCS_SYNCED))
3178 /* Set PLL lock range. */
3179 tg3_writephy(tp, 0x16, 0x8007);
3182 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3184 /* Wait for reset to complete. */
3185 /* XXX schedule_timeout() ... */
3186 for (i = 0; i < 500; i++)
3189 /* Config mode; select PMA/Ch 1 regs. */
3190 tg3_writephy(tp, 0x10, 0x8411);
3192 /* Enable auto-lock and comdet, select txclk for tx. */
3193 tg3_writephy(tp, 0x11, 0x0a10);
3195 tg3_writephy(tp, 0x18, 0x00a0);
3196 tg3_writephy(tp, 0x16, 0x41ff);
3198 /* Assert and deassert POR. */
3199 tg3_writephy(tp, 0x13, 0x0400);
3201 tg3_writephy(tp, 0x13, 0x0000);
3203 tg3_writephy(tp, 0x11, 0x0a50);
3205 tg3_writephy(tp, 0x11, 0x0a10);
3207 /* Wait for signal to stabilize */
3208 /* XXX schedule_timeout() ... */
3209 for (i = 0; i < 15000; i++)
3212 /* Deselect the channel register so we can read the PHYID
3215 tg3_writephy(tp, 0x10, 0x8011);
3218 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3221 u32 sg_dig_ctrl, sg_dig_status;
3222 u32 serdes_cfg, expected_sg_dig_ctrl;
3223 int workaround, port_a;
3224 int current_link_up;
3227 expected_sg_dig_ctrl = 0;
3230 current_link_up = 0;
3232 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3233 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3235 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3238 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3239 /* preserve bits 20-23 for voltage regulator */
3240 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3243 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3245 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3246 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3248 u32 val = serdes_cfg;
3254 tw32_f(MAC_SERDES_CFG, val);
3257 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3259 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3260 tg3_setup_flow_control(tp, 0, 0);
3261 current_link_up = 1;
3266 /* Want auto-negotiation. */
3267 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3269 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3270 if (flowctrl & ADVERTISE_1000XPAUSE)
3271 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3272 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3273 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3275 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3276 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3277 tp->serdes_counter &&
3278 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3279 MAC_STATUS_RCVD_CFG)) ==
3280 MAC_STATUS_PCS_SYNCED)) {
3281 tp->serdes_counter--;
3282 current_link_up = 1;
3287 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3288 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3290 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3292 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3293 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3294 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3295 MAC_STATUS_SIGNAL_DET)) {
3296 sg_dig_status = tr32(SG_DIG_STATUS);
3297 mac_status = tr32(MAC_STATUS);
3299 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3300 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3301 u32 local_adv = 0, remote_adv = 0;
3303 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3304 local_adv |= ADVERTISE_1000XPAUSE;
3305 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3306 local_adv |= ADVERTISE_1000XPSE_ASYM;
3308 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3309 remote_adv |= LPA_1000XPAUSE;
3310 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3311 remote_adv |= LPA_1000XPAUSE_ASYM;
3313 tg3_setup_flow_control(tp, local_adv, remote_adv);
3314 current_link_up = 1;
3315 tp->serdes_counter = 0;
3316 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3317 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3318 if (tp->serdes_counter)
3319 tp->serdes_counter--;
3322 u32 val = serdes_cfg;
3329 tw32_f(MAC_SERDES_CFG, val);
3332 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3335 /* Link parallel detection - link is up */
3336 /* only if we have PCS_SYNC and not */
3337 /* receiving config code words */
3338 mac_status = tr32(MAC_STATUS);
3339 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3340 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3341 tg3_setup_flow_control(tp, 0, 0);
3342 current_link_up = 1;
3344 TG3_FLG2_PARALLEL_DETECT;
3345 tp->serdes_counter =
3346 SERDES_PARALLEL_DET_TIMEOUT;
3348 goto restart_autoneg;
3352 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3353 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3357 return current_link_up;
3360 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3362 int current_link_up = 0;
3364 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3367 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3368 u32 txflags, rxflags;
3371 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3372 u32 local_adv = 0, remote_adv = 0;
3374 if (txflags & ANEG_CFG_PS1)
3375 local_adv |= ADVERTISE_1000XPAUSE;
3376 if (txflags & ANEG_CFG_PS2)
3377 local_adv |= ADVERTISE_1000XPSE_ASYM;
3379 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3380 remote_adv |= LPA_1000XPAUSE;
3381 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3382 remote_adv |= LPA_1000XPAUSE_ASYM;
3384 tg3_setup_flow_control(tp, local_adv, remote_adv);
3386 current_link_up = 1;
3388 for (i = 0; i < 30; i++) {
3391 (MAC_STATUS_SYNC_CHANGED |
3392 MAC_STATUS_CFG_CHANGED));
3394 if ((tr32(MAC_STATUS) &
3395 (MAC_STATUS_SYNC_CHANGED |
3396 MAC_STATUS_CFG_CHANGED)) == 0)
3400 mac_status = tr32(MAC_STATUS);
3401 if (current_link_up == 0 &&
3402 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3403 !(mac_status & MAC_STATUS_RCVD_CFG))
3404 current_link_up = 1;
3406 tg3_setup_flow_control(tp, 0, 0);
3408 /* Forcing 1000FD link up. */
3409 current_link_up = 1;
3411 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3414 tw32_f(MAC_MODE, tp->mac_mode);
3419 return current_link_up;
3422 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3425 u16 orig_active_speed;
3426 u8 orig_active_duplex;
3428 int current_link_up;
3431 orig_pause_cfg = tp->link_config.active_flowctrl;
3432 orig_active_speed = tp->link_config.active_speed;
3433 orig_active_duplex = tp->link_config.active_duplex;
3435 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3436 netif_carrier_ok(tp->dev) &&
3437 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3438 mac_status = tr32(MAC_STATUS);
3439 mac_status &= (MAC_STATUS_PCS_SYNCED |
3440 MAC_STATUS_SIGNAL_DET |
3441 MAC_STATUS_CFG_CHANGED |
3442 MAC_STATUS_RCVD_CFG);
3443 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3444 MAC_STATUS_SIGNAL_DET)) {
3445 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3446 MAC_STATUS_CFG_CHANGED));
3451 tw32_f(MAC_TX_AUTO_NEG, 0);
3453 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3454 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3455 tw32_f(MAC_MODE, tp->mac_mode);
3458 if (tp->phy_id == PHY_ID_BCM8002)
3459 tg3_init_bcm8002(tp);
3461 /* Enable link change event even when serdes polling. */
3462 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3465 current_link_up = 0;
3466 mac_status = tr32(MAC_STATUS);
3468 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3469 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3471 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3473 tp->hw_status->status =
3474 (SD_STATUS_UPDATED |
3475 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3477 for (i = 0; i < 100; i++) {
3478 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3479 MAC_STATUS_CFG_CHANGED));
3481 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3482 MAC_STATUS_CFG_CHANGED |
3483 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3487 mac_status = tr32(MAC_STATUS);
3488 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3489 current_link_up = 0;
3490 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3491 tp->serdes_counter == 0) {
3492 tw32_f(MAC_MODE, (tp->mac_mode |
3493 MAC_MODE_SEND_CONFIGS));
3495 tw32_f(MAC_MODE, tp->mac_mode);
3499 if (current_link_up == 1) {
3500 tp->link_config.active_speed = SPEED_1000;
3501 tp->link_config.active_duplex = DUPLEX_FULL;
3502 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3503 LED_CTRL_LNKLED_OVERRIDE |
3504 LED_CTRL_1000MBPS_ON));
3506 tp->link_config.active_speed = SPEED_INVALID;
3507 tp->link_config.active_duplex = DUPLEX_INVALID;
3508 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3509 LED_CTRL_LNKLED_OVERRIDE |
3510 LED_CTRL_TRAFFIC_OVERRIDE));
3513 if (current_link_up != netif_carrier_ok(tp->dev)) {
3514 if (current_link_up)
3515 netif_carrier_on(tp->dev);
3517 netif_carrier_off(tp->dev);
3518 tg3_link_report(tp);
3520 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3521 if (orig_pause_cfg != now_pause_cfg ||
3522 orig_active_speed != tp->link_config.active_speed ||
3523 orig_active_duplex != tp->link_config.active_duplex)
3524 tg3_link_report(tp);
3530 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3532 int current_link_up, err = 0;
3536 u32 local_adv, remote_adv;
3538 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3539 tw32_f(MAC_MODE, tp->mac_mode);
3545 (MAC_STATUS_SYNC_CHANGED |
3546 MAC_STATUS_CFG_CHANGED |
3547 MAC_STATUS_MI_COMPLETION |
3548 MAC_STATUS_LNKSTATE_CHANGED));
3554 current_link_up = 0;
3555 current_speed = SPEED_INVALID;
3556 current_duplex = DUPLEX_INVALID;
3558 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3559 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3560 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3561 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3562 bmsr |= BMSR_LSTATUS;
3564 bmsr &= ~BMSR_LSTATUS;
3567 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3569 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3570 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3571 /* do nothing, just check for link up at the end */
3572 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3575 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3576 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3577 ADVERTISE_1000XPAUSE |
3578 ADVERTISE_1000XPSE_ASYM |
3581 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3583 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3584 new_adv |= ADVERTISE_1000XHALF;
3585 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3586 new_adv |= ADVERTISE_1000XFULL;
3588 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3589 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3590 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3591 tg3_writephy(tp, MII_BMCR, bmcr);
3593 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3594 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3595 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3602 bmcr &= ~BMCR_SPEED1000;
3603 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3605 if (tp->link_config.duplex == DUPLEX_FULL)
3606 new_bmcr |= BMCR_FULLDPLX;
3608 if (new_bmcr != bmcr) {
3609 /* BMCR_SPEED1000 is a reserved bit that needs
3610 * to be set on write.
3612 new_bmcr |= BMCR_SPEED1000;
3614 /* Force a linkdown */
3615 if (netif_carrier_ok(tp->dev)) {
3618 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3619 adv &= ~(ADVERTISE_1000XFULL |
3620 ADVERTISE_1000XHALF |
3622 tg3_writephy(tp, MII_ADVERTISE, adv);
3623 tg3_writephy(tp, MII_BMCR, bmcr |
3627 netif_carrier_off(tp->dev);
3629 tg3_writephy(tp, MII_BMCR, new_bmcr);
3631 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3632 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3633 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3635 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3636 bmsr |= BMSR_LSTATUS;
3638 bmsr &= ~BMSR_LSTATUS;
3640 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3644 if (bmsr & BMSR_LSTATUS) {
3645 current_speed = SPEED_1000;
3646 current_link_up = 1;
3647 if (bmcr & BMCR_FULLDPLX)
3648 current_duplex = DUPLEX_FULL;
3650 current_duplex = DUPLEX_HALF;
3655 if (bmcr & BMCR_ANENABLE) {
3658 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3659 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3660 common = local_adv & remote_adv;
3661 if (common & (ADVERTISE_1000XHALF |
3662 ADVERTISE_1000XFULL)) {
3663 if (common & ADVERTISE_1000XFULL)
3664 current_duplex = DUPLEX_FULL;
3666 current_duplex = DUPLEX_HALF;
3669 current_link_up = 0;
3673 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3674 tg3_setup_flow_control(tp, local_adv, remote_adv);
3676 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3677 if (tp->link_config.active_duplex == DUPLEX_HALF)
3678 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3680 tw32_f(MAC_MODE, tp->mac_mode);
3683 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3685 tp->link_config.active_speed = current_speed;
3686 tp->link_config.active_duplex = current_duplex;
3688 if (current_link_up != netif_carrier_ok(tp->dev)) {
3689 if (current_link_up)
3690 netif_carrier_on(tp->dev);
3692 netif_carrier_off(tp->dev);
3693 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3695 tg3_link_report(tp);
3700 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3702 if (tp->serdes_counter) {
3703 /* Give autoneg time to complete. */
3704 tp->serdes_counter--;
3707 if (!netif_carrier_ok(tp->dev) &&
3708 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3711 tg3_readphy(tp, MII_BMCR, &bmcr);
3712 if (bmcr & BMCR_ANENABLE) {
3715 /* Select shadow register 0x1f */
3716 tg3_writephy(tp, 0x1c, 0x7c00);
3717 tg3_readphy(tp, 0x1c, &phy1);
3719 /* Select expansion interrupt status register */
3720 tg3_writephy(tp, 0x17, 0x0f01);
3721 tg3_readphy(tp, 0x15, &phy2);
3722 tg3_readphy(tp, 0x15, &phy2);
3724 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3725 /* We have signal detect and not receiving
3726 * config code words, link is up by parallel
3730 bmcr &= ~BMCR_ANENABLE;
3731 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3732 tg3_writephy(tp, MII_BMCR, bmcr);
3733 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3737 else if (netif_carrier_ok(tp->dev) &&
3738 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3739 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3742 /* Select expansion interrupt status register */
3743 tg3_writephy(tp, 0x17, 0x0f01);
3744 tg3_readphy(tp, 0x15, &phy2);
3748 /* Config code words received, turn on autoneg. */
3749 tg3_readphy(tp, MII_BMCR, &bmcr);
3750 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3752 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3758 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3762 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3763 err = tg3_setup_fiber_phy(tp, force_reset);
3764 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3765 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3767 err = tg3_setup_copper_phy(tp, force_reset);
3770 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3771 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3774 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3775 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3777 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3782 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3783 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3784 tw32(GRC_MISC_CFG, val);
3787 if (tp->link_config.active_speed == SPEED_1000 &&
3788 tp->link_config.active_duplex == DUPLEX_HALF)
3789 tw32(MAC_TX_LENGTHS,
3790 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3791 (6 << TX_LENGTHS_IPG_SHIFT) |
3792 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3794 tw32(MAC_TX_LENGTHS,
3795 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3796 (6 << TX_LENGTHS_IPG_SHIFT) |
3797 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3799 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3800 if (netif_carrier_ok(tp->dev)) {
3801 tw32(HOSTCC_STAT_COAL_TICKS,
3802 tp->coal.stats_block_coalesce_usecs);
3804 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3808 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3809 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3810 if (!netif_carrier_ok(tp->dev))
3811 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3814 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3815 tw32(PCIE_PWR_MGMT_THRESH, val);
3821 /* This is called whenever we suspect that the system chipset is re-
3822 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3823 * is bogus tx completions. We try to recover by setting the
3824 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3827 static void tg3_tx_recover(struct tg3 *tp)
3829 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3830 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3832 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3833 "mapped I/O cycles to the network device, attempting to "
3834 "recover. Please report the problem to the driver maintainer "
3835 "and include system chipset information.\n", tp->dev->name);
3837 spin_lock(&tp->lock);
3838 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3839 spin_unlock(&tp->lock);
3842 static inline u32 tg3_tx_avail(struct tg3 *tp)
3845 return (tp->tx_pending -
3846 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3849 /* Tigon3 never reports partial packet sends. So we do not
3850 * need special logic to handle SKBs that have not had all
3851 * of their frags sent yet, like SunGEM does.
3853 static void tg3_tx(struct tg3 *tp)
3855 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3856 u32 sw_idx = tp->tx_cons;
3858 while (sw_idx != hw_idx) {
3859 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3860 struct sk_buff *skb = ri->skb;
3863 if (unlikely(skb == NULL)) {
3868 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
3872 sw_idx = NEXT_TX(sw_idx);
3874 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3875 ri = &tp->tx_buffers[sw_idx];
3876 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3878 sw_idx = NEXT_TX(sw_idx);
3883 if (unlikely(tx_bug)) {
3889 tp->tx_cons = sw_idx;
3891 /* Need to make the tx_cons update visible to tg3_start_xmit()
3892 * before checking for netif_queue_stopped(). Without the
3893 * memory barrier, there is a small possibility that tg3_start_xmit()
3894 * will miss it and cause the queue to be stopped forever.
3898 if (unlikely(netif_queue_stopped(tp->dev) &&
3899 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3900 netif_tx_lock(tp->dev);
3901 if (netif_queue_stopped(tp->dev) &&
3902 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3903 netif_wake_queue(tp->dev);
3904 netif_tx_unlock(tp->dev);
3908 /* Returns size of skb allocated or < 0 on error.
3910 * We only need to fill in the address because the other members
3911 * of the RX descriptor are invariant, see tg3_init_rings.
3913 * Note the purposeful assymetry of cpu vs. chip accesses. For
3914 * posting buffers we only dirty the first cache line of the RX
3915 * descriptor (containing the address). Whereas for the RX status
3916 * buffers the cpu only reads the last cacheline of the RX descriptor
3917 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3919 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3920 int src_idx, u32 dest_idx_unmasked)
3922 struct tg3_rx_buffer_desc *desc;
3923 struct ring_info *map, *src_map;
3924 struct sk_buff *skb;
3926 int skb_size, dest_idx;
3929 switch (opaque_key) {
3930 case RXD_OPAQUE_RING_STD:
3931 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3932 desc = &tp->rx_std[dest_idx];
3933 map = &tp->rx_std_buffers[dest_idx];
3935 src_map = &tp->rx_std_buffers[src_idx];
3936 skb_size = tp->rx_pkt_buf_sz;
3939 case RXD_OPAQUE_RING_JUMBO:
3940 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3941 desc = &tp->rx_jumbo[dest_idx];
3942 map = &tp->rx_jumbo_buffers[dest_idx];
3944 src_map = &tp->rx_jumbo_buffers[src_idx];
3945 skb_size = RX_JUMBO_PKT_BUF_SZ;
3952 /* Do not overwrite any of the map or rp information
3953 * until we are sure we can commit to a new buffer.
3955 * Callers depend upon this behavior and assume that
3956 * we leave everything unchanged if we fail.
3958 skb = netdev_alloc_skb(tp->dev, skb_size);
3962 skb_reserve(skb, tp->rx_offset);
3964 mapping = pci_map_single(tp->pdev, skb->data,
3965 skb_size - tp->rx_offset,
3966 PCI_DMA_FROMDEVICE);
3969 pci_unmap_addr_set(map, mapping, mapping);
3971 if (src_map != NULL)
3972 src_map->skb = NULL;
3974 desc->addr_hi = ((u64)mapping >> 32);
3975 desc->addr_lo = ((u64)mapping & 0xffffffff);
3980 /* We only need to move over in the address because the other
3981 * members of the RX descriptor are invariant. See notes above
3982 * tg3_alloc_rx_skb for full details.
3984 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3985 int src_idx, u32 dest_idx_unmasked)
3987 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3988 struct ring_info *src_map, *dest_map;
3991 switch (opaque_key) {
3992 case RXD_OPAQUE_RING_STD:
3993 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3994 dest_desc = &tp->rx_std[dest_idx];
3995 dest_map = &tp->rx_std_buffers[dest_idx];
3996 src_desc = &tp->rx_std[src_idx];
3997 src_map = &tp->rx_std_buffers[src_idx];
4000 case RXD_OPAQUE_RING_JUMBO:
4001 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4002 dest_desc = &tp->rx_jumbo[dest_idx];
4003 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4004 src_desc = &tp->rx_jumbo[src_idx];
4005 src_map = &tp->rx_jumbo_buffers[src_idx];
4012 dest_map->skb = src_map->skb;
4013 pci_unmap_addr_set(dest_map, mapping,
4014 pci_unmap_addr(src_map, mapping));
4015 dest_desc->addr_hi = src_desc->addr_hi;
4016 dest_desc->addr_lo = src_desc->addr_lo;
4018 src_map->skb = NULL;
4021 #if TG3_VLAN_TAG_USED
4022 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4024 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4028 /* The RX ring scheme is composed of multiple rings which post fresh
4029 * buffers to the chip, and one special ring the chip uses to report
4030 * status back to the host.
4032 * The special ring reports the status of received packets to the
4033 * host. The chip does not write into the original descriptor the
4034 * RX buffer was obtained from. The chip simply takes the original
4035 * descriptor as provided by the host, updates the status and length
4036 * field, then writes this into the next status ring entry.
4038 * Each ring the host uses to post buffers to the chip is described
4039 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4040 * it is first placed into the on-chip ram. When the packet's length
4041 * is known, it walks down the TG3_BDINFO entries to select the ring.
4042 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4043 * which is within the range of the new packet's length is chosen.
4045 * The "separate ring for rx status" scheme may sound queer, but it makes
4046 * sense from a cache coherency perspective. If only the host writes
4047 * to the buffer post rings, and only the chip writes to the rx status
4048 * rings, then cache lines never move beyond shared-modified state.
4049 * If both the host and chip were to write into the same ring, cache line
4050 * eviction could occur since both entities want it in an exclusive state.
4052 static int tg3_rx(struct tg3 *tp, int budget)
4054 u32 work_mask, rx_std_posted = 0;
4055 u32 sw_idx = tp->rx_rcb_ptr;
4059 hw_idx = tp->hw_status->idx[0].rx_producer;
4061 * We need to order the read of hw_idx and the read of
4062 * the opaque cookie.
4067 while (sw_idx != hw_idx && budget > 0) {
4068 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4070 struct sk_buff *skb;
4071 dma_addr_t dma_addr;
4072 u32 opaque_key, desc_idx, *post_ptr;
4074 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4075 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4076 if (opaque_key == RXD_OPAQUE_RING_STD) {
4077 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4079 skb = tp->rx_std_buffers[desc_idx].skb;
4080 post_ptr = &tp->rx_std_ptr;
4082 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4083 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4085 skb = tp->rx_jumbo_buffers[desc_idx].skb;
4086 post_ptr = &tp->rx_jumbo_ptr;
4089 goto next_pkt_nopost;
4092 work_mask |= opaque_key;
4094 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4095 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4097 tg3_recycle_rx(tp, opaque_key,
4098 desc_idx, *post_ptr);
4100 /* Other statistics kept track of by card. */
4101 tp->net_stats.rx_dropped++;
4105 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
4107 if (len > RX_COPY_THRESHOLD
4108 && tp->rx_offset == 2
4109 /* rx_offset != 2 iff this is a 5701 card running
4110 * in PCI-X mode [see tg3_get_invariants()] */
4114 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4115 desc_idx, *post_ptr);
4119 pci_unmap_single(tp->pdev, dma_addr,
4120 skb_size - tp->rx_offset,
4121 PCI_DMA_FROMDEVICE);
4125 struct sk_buff *copy_skb;
4127 tg3_recycle_rx(tp, opaque_key,
4128 desc_idx, *post_ptr);
4130 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
4131 if (copy_skb == NULL)
4132 goto drop_it_no_recycle;
4134 skb_reserve(copy_skb, 2);
4135 skb_put(copy_skb, len);
4136 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4137 skb_copy_from_linear_data(skb, copy_skb->data, len);
4138 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4140 /* We'll reuse the original ring buffer. */
4144 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4145 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4146 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4147 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4148 skb->ip_summed = CHECKSUM_UNNECESSARY;
4150 skb->ip_summed = CHECKSUM_NONE;
4152 skb->protocol = eth_type_trans(skb, tp->dev);
4153 #if TG3_VLAN_TAG_USED
4154 if (tp->vlgrp != NULL &&
4155 desc->type_flags & RXD_FLAG_VLAN) {
4156 tg3_vlan_rx(tp, skb,
4157 desc->err_vlan & RXD_VLAN_MASK);
4160 netif_receive_skb(skb);
4162 tp->dev->last_rx = jiffies;
4169 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4170 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4172 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4173 TG3_64BIT_REG_LOW, idx);
4174 work_mask &= ~RXD_OPAQUE_RING_STD;
4179 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4181 /* Refresh hw_idx to see if there is new work */
4182 if (sw_idx == hw_idx) {
4183 hw_idx = tp->hw_status->idx[0].rx_producer;
4188 /* ACK the status ring. */
4189 tp->rx_rcb_ptr = sw_idx;
4190 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4192 /* Refill RX ring(s). */
4193 if (work_mask & RXD_OPAQUE_RING_STD) {
4194 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4195 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4198 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4199 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4200 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4208 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4210 struct tg3_hw_status *sblk = tp->hw_status;
4212 /* handle link change and other phy events */
4213 if (!(tp->tg3_flags &
4214 (TG3_FLAG_USE_LINKCHG_REG |
4215 TG3_FLAG_POLL_SERDES))) {
4216 if (sblk->status & SD_STATUS_LINK_CHG) {
4217 sblk->status = SD_STATUS_UPDATED |
4218 (sblk->status & ~SD_STATUS_LINK_CHG);
4219 spin_lock(&tp->lock);
4220 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4222 (MAC_STATUS_SYNC_CHANGED |
4223 MAC_STATUS_CFG_CHANGED |
4224 MAC_STATUS_MI_COMPLETION |
4225 MAC_STATUS_LNKSTATE_CHANGED));
4228 tg3_setup_phy(tp, 0);
4229 spin_unlock(&tp->lock);
4233 /* run TX completion thread */
4234 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4236 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4240 /* run RX thread, within the bounds set by NAPI.
4241 * All RX "locking" is done by ensuring outside
4242 * code synchronizes with tg3->napi.poll()
4244 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4245 work_done += tg3_rx(tp, budget - work_done);
4250 static int tg3_poll(struct napi_struct *napi, int budget)
4252 struct tg3 *tp = container_of(napi, struct tg3, napi);
4254 struct tg3_hw_status *sblk = tp->hw_status;
4257 work_done = tg3_poll_work(tp, work_done, budget);
4259 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4262 if (unlikely(work_done >= budget))
4265 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4266 /* tp->last_tag is used in tg3_restart_ints() below
4267 * to tell the hw how much work has been processed,
4268 * so we must read it before checking for more work.
4270 tp->last_tag = sblk->status_tag;
4273 sblk->status &= ~SD_STATUS_UPDATED;
4275 if (likely(!tg3_has_work(tp))) {
4276 netif_rx_complete(tp->dev, napi);
4277 tg3_restart_ints(tp);
4285 /* work_done is guaranteed to be less than budget. */
4286 netif_rx_complete(tp->dev, napi);
4287 schedule_work(&tp->reset_task);
4291 static void tg3_irq_quiesce(struct tg3 *tp)
4293 BUG_ON(tp->irq_sync);
4298 synchronize_irq(tp->pdev->irq);
4301 static inline int tg3_irq_sync(struct tg3 *tp)
4303 return tp->irq_sync;
4306 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4307 * If irq_sync is non-zero, then the IRQ handler must be synchronized
4308 * with as well. Most of the time, this is not necessary except when
4309 * shutting down the device.
4311 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4313 spin_lock_bh(&tp->lock);
4315 tg3_irq_quiesce(tp);
4318 static inline void tg3_full_unlock(struct tg3 *tp)
4320 spin_unlock_bh(&tp->lock);
4323 /* One-shot MSI handler - Chip automatically disables interrupt
4324 * after sending MSI so driver doesn't have to do it.
4326 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4328 struct net_device *dev = dev_id;
4329 struct tg3 *tp = netdev_priv(dev);
4331 prefetch(tp->hw_status);
4332 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4334 if (likely(!tg3_irq_sync(tp)))
4335 netif_rx_schedule(dev, &tp->napi);
4340 /* MSI ISR - No need to check for interrupt sharing and no need to
4341 * flush status block and interrupt mailbox. PCI ordering rules
4342 * guarantee that MSI will arrive after the status block.
4344 static irqreturn_t tg3_msi(int irq, void *dev_id)
4346 struct net_device *dev = dev_id;
4347 struct tg3 *tp = netdev_priv(dev);
4349 prefetch(tp->hw_status);
4350 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4352 * Writing any value to intr-mbox-0 clears PCI INTA# and
4353 * chip-internal interrupt pending events.
4354 * Writing non-zero to intr-mbox-0 additional tells the
4355 * NIC to stop sending us irqs, engaging "in-intr-handler"
4358 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4359 if (likely(!tg3_irq_sync(tp)))
4360 netif_rx_schedule(dev, &tp->napi);
4362 return IRQ_RETVAL(1);
4365 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4367 struct net_device *dev = dev_id;
4368 struct tg3 *tp = netdev_priv(dev);
4369 struct tg3_hw_status *sblk = tp->hw_status;
4370 unsigned int handled = 1;
4372 /* In INTx mode, it is possible for the interrupt to arrive at
4373 * the CPU before the status block posted prior to the interrupt.
4374 * Reading the PCI State register will confirm whether the
4375 * interrupt is ours and will flush the status block.
4377 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4378 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4379 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4386 * Writing any value to intr-mbox-0 clears PCI INTA# and
4387 * chip-internal interrupt pending events.
4388 * Writing non-zero to intr-mbox-0 additional tells the
4389 * NIC to stop sending us irqs, engaging "in-intr-handler"
4392 * Flush the mailbox to de-assert the IRQ immediately to prevent
4393 * spurious interrupts. The flush impacts performance but
4394 * excessive spurious interrupts can be worse in some cases.
4396 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4397 if (tg3_irq_sync(tp))
4399 sblk->status &= ~SD_STATUS_UPDATED;
4400 if (likely(tg3_has_work(tp))) {
4401 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4402 netif_rx_schedule(dev, &tp->napi);
4404 /* No work, shared interrupt perhaps? re-enable
4405 * interrupts, and flush that PCI write
4407 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4411 return IRQ_RETVAL(handled);
4414 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4416 struct net_device *dev = dev_id;
4417 struct tg3 *tp = netdev_priv(dev);
4418 struct tg3_hw_status *sblk = tp->hw_status;
4419 unsigned int handled = 1;
4421 /* In INTx mode, it is possible for the interrupt to arrive at
4422 * the CPU before the status block posted prior to the interrupt.
4423 * Reading the PCI State register will confirm whether the
4424 * interrupt is ours and will flush the status block.
4426 if (unlikely(sblk->status_tag == tp->last_tag)) {
4427 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4428 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4435 * writing any value to intr-mbox-0 clears PCI INTA# and
4436 * chip-internal interrupt pending events.
4437 * writing non-zero to intr-mbox-0 additional tells the
4438 * NIC to stop sending us irqs, engaging "in-intr-handler"
4441 * Flush the mailbox to de-assert the IRQ immediately to prevent
4442 * spurious interrupts. The flush impacts performance but
4443 * excessive spurious interrupts can be worse in some cases.
4445 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4446 if (tg3_irq_sync(tp))
4448 if (netif_rx_schedule_prep(dev, &tp->napi)) {
4449 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4450 /* Update last_tag to mark that this status has been
4451 * seen. Because interrupt may be shared, we may be
4452 * racing with tg3_poll(), so only update last_tag
4453 * if tg3_poll() is not scheduled.
4455 tp->last_tag = sblk->status_tag;
4456 __netif_rx_schedule(dev, &tp->napi);
4459 return IRQ_RETVAL(handled);
4462 /* ISR for interrupt test */
4463 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4465 struct net_device *dev = dev_id;
4466 struct tg3 *tp = netdev_priv(dev);
4467 struct tg3_hw_status *sblk = tp->hw_status;
4469 if ((sblk->status & SD_STATUS_UPDATED) ||
4470 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4471 tg3_disable_ints(tp);
4472 return IRQ_RETVAL(1);
4474 return IRQ_RETVAL(0);
4477 static int tg3_init_hw(struct tg3 *, int);
4478 static int tg3_halt(struct tg3 *, int, int);
4480 /* Restart hardware after configuration changes, self-test, etc.
4481 * Invoked with tp->lock held.
4483 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4484 __releases(tp->lock)
4485 __acquires(tp->lock)
4489 err = tg3_init_hw(tp, reset_phy);
4491 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4492 "aborting.\n", tp->dev->name);
4493 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4494 tg3_full_unlock(tp);
4495 del_timer_sync(&tp->timer);
4497 napi_enable(&tp->napi);
4499 tg3_full_lock(tp, 0);
4504 #ifdef CONFIG_NET_POLL_CONTROLLER
4505 static void tg3_poll_controller(struct net_device *dev)
4507 struct tg3 *tp = netdev_priv(dev);
4509 tg3_interrupt(tp->pdev->irq, dev);
4513 static void tg3_reset_task(struct work_struct *work)
4515 struct tg3 *tp = container_of(work, struct tg3, reset_task);
4517 unsigned int restart_timer;
4519 tg3_full_lock(tp, 0);
4521 if (!netif_running(tp->dev)) {
4522 tg3_full_unlock(tp);
4526 tg3_full_unlock(tp);
4532 tg3_full_lock(tp, 1);
4534 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4535 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4537 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4538 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4539 tp->write32_rx_mbox = tg3_write_flush_reg32;
4540 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4541 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4544 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4545 err = tg3_init_hw(tp, 1);
4549 tg3_netif_start(tp);
4552 mod_timer(&tp->timer, jiffies + 1);
4555 tg3_full_unlock(tp);
4561 static void tg3_dump_short_state(struct tg3 *tp)
4563 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4564 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4565 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4566 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4569 static void tg3_tx_timeout(struct net_device *dev)
4571 struct tg3 *tp = netdev_priv(dev);
4573 if (netif_msg_tx_err(tp)) {
4574 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4576 tg3_dump_short_state(tp);
4579 schedule_work(&tp->reset_task);
4582 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4583 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4585 u32 base = (u32) mapping & 0xffffffff;
4587 return ((base > 0xffffdcc0) &&
4588 (base + len + 8 < base));
4591 /* Test for DMA addresses > 40-bit */
4592 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4595 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4596 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4597 return (((u64) mapping + len) > DMA_40BIT_MASK);
4604 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4606 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4607 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4608 u32 last_plus_one, u32 *start,
4609 u32 base_flags, u32 mss)
4611 struct sk_buff *new_skb;
4612 dma_addr_t new_addr = 0;
4616 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4617 new_skb = skb_copy(skb, GFP_ATOMIC);
4619 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4621 new_skb = skb_copy_expand(skb,
4622 skb_headroom(skb) + more_headroom,
4623 skb_tailroom(skb), GFP_ATOMIC);
4629 /* New SKB is guaranteed to be linear. */
4631 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4632 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4634 /* Make sure new skb does not cross any 4G boundaries.
4635 * Drop the packet if it does.
4637 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
4639 skb_dma_unmap(&tp->pdev->dev, new_skb,
4642 dev_kfree_skb(new_skb);
4645 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4646 base_flags, 1 | (mss << 1));
4647 *start = NEXT_TX(entry);
4651 /* Now clean up the sw ring entries. */
4653 while (entry != last_plus_one) {
4655 tp->tx_buffers[entry].skb = new_skb;
4657 tp->tx_buffers[entry].skb = NULL;
4659 entry = NEXT_TX(entry);
4663 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4669 static void tg3_set_txd(struct tg3 *tp, int entry,
4670 dma_addr_t mapping, int len, u32 flags,
4673 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4674 int is_end = (mss_and_is_end & 0x1);
4675 u32 mss = (mss_and_is_end >> 1);
4679 flags |= TXD_FLAG_END;
4680 if (flags & TXD_FLAG_VLAN) {
4681 vlan_tag = flags >> 16;
4684 vlan_tag |= (mss << TXD_MSS_SHIFT);
4686 txd->addr_hi = ((u64) mapping >> 32);
4687 txd->addr_lo = ((u64) mapping & 0xffffffff);
4688 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4689 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4692 /* hard_start_xmit for devices that don't have any bugs and
4693 * support TG3_FLG2_HW_TSO_2 only.
4695 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4697 struct tg3 *tp = netdev_priv(dev);
4698 u32 len, entry, base_flags, mss;
4699 struct skb_shared_info *sp;
4702 len = skb_headlen(skb);
4704 /* We are running in BH disabled context with netif_tx_lock
4705 * and TX reclaim runs via tp->napi.poll inside of a software
4706 * interrupt. Furthermore, IRQ processing runs lockless so we have
4707 * no IRQ context deadlocks to worry about either. Rejoice!
4709 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4710 if (!netif_queue_stopped(dev)) {
4711 netif_stop_queue(dev);
4713 /* This is a hard error, log it. */
4714 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4715 "queue awake!\n", dev->name);
4717 return NETDEV_TX_BUSY;
4720 entry = tp->tx_prod;
4723 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4724 int tcp_opt_len, ip_tcp_len;
4726 if (skb_header_cloned(skb) &&
4727 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4732 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4733 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4735 struct iphdr *iph = ip_hdr(skb);
4737 tcp_opt_len = tcp_optlen(skb);
4738 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4741 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4742 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4745 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4746 TXD_FLAG_CPU_POST_DMA);
4748 tcp_hdr(skb)->check = 0;
4751 else if (skb->ip_summed == CHECKSUM_PARTIAL)
4752 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4753 #if TG3_VLAN_TAG_USED
4754 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4755 base_flags |= (TXD_FLAG_VLAN |
4756 (vlan_tx_tag_get(skb) << 16));
4759 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4764 sp = skb_shinfo(skb);
4766 mapping = sp->dma_maps[0];
4768 tp->tx_buffers[entry].skb = skb;
4770 tg3_set_txd(tp, entry, mapping, len, base_flags,
4771 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4773 entry = NEXT_TX(entry);
4775 /* Now loop through additional data fragments, and queue them. */
4776 if (skb_shinfo(skb)->nr_frags > 0) {
4777 unsigned int i, last;
4779 last = skb_shinfo(skb)->nr_frags - 1;
4780 for (i = 0; i <= last; i++) {
4781 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4784 mapping = sp->dma_maps[i + 1];
4785 tp->tx_buffers[entry].skb = NULL;
4787 tg3_set_txd(tp, entry, mapping, len,
4788 base_flags, (i == last) | (mss << 1));
4790 entry = NEXT_TX(entry);
4794 /* Packets are ready, update Tx producer idx local and on card. */
4795 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4797 tp->tx_prod = entry;
4798 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4799 netif_stop_queue(dev);
4800 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4801 netif_wake_queue(tp->dev);
4807 dev->trans_start = jiffies;
4809 return NETDEV_TX_OK;
4812 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4814 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4815 * TSO header is greater than 80 bytes.
4817 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4819 struct sk_buff *segs, *nskb;
4821 /* Estimate the number of fragments in the worst case */
4822 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4823 netif_stop_queue(tp->dev);
4824 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4825 return NETDEV_TX_BUSY;
4827 netif_wake_queue(tp->dev);
4830 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4832 goto tg3_tso_bug_end;
4838 tg3_start_xmit_dma_bug(nskb, tp->dev);
4844 return NETDEV_TX_OK;
4847 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4848 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4850 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4852 struct tg3 *tp = netdev_priv(dev);
4853 u32 len, entry, base_flags, mss;
4854 struct skb_shared_info *sp;
4855 int would_hit_hwbug;
4858 len = skb_headlen(skb);
4860 /* We are running in BH disabled context with netif_tx_lock
4861 * and TX reclaim runs via tp->napi.poll inside of a software
4862 * interrupt. Furthermore, IRQ processing runs lockless so we have
4863 * no IRQ context deadlocks to worry about either. Rejoice!
4865 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4866 if (!netif_queue_stopped(dev)) {
4867 netif_stop_queue(dev);
4869 /* This is a hard error, log it. */
4870 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4871 "queue awake!\n", dev->name);
4873 return NETDEV_TX_BUSY;
4876 entry = tp->tx_prod;
4878 if (skb->ip_summed == CHECKSUM_PARTIAL)
4879 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4881 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4883 int tcp_opt_len, ip_tcp_len, hdr_len;
4885 if (skb_header_cloned(skb) &&
4886 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4891 tcp_opt_len = tcp_optlen(skb);
4892 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4894 hdr_len = ip_tcp_len + tcp_opt_len;
4895 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4896 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4897 return (tg3_tso_bug(tp, skb));
4899 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4900 TXD_FLAG_CPU_POST_DMA);
4904 iph->tot_len = htons(mss + hdr_len);
4905 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4906 tcp_hdr(skb)->check = 0;
4907 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4909 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4914 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4915 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4916 if (tcp_opt_len || iph->ihl > 5) {
4919 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4920 mss |= (tsflags << 11);
4923 if (tcp_opt_len || iph->ihl > 5) {
4926 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4927 base_flags |= tsflags << 12;
4931 #if TG3_VLAN_TAG_USED
4932 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4933 base_flags |= (TXD_FLAG_VLAN |
4934 (vlan_tx_tag_get(skb) << 16));
4937 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4942 sp = skb_shinfo(skb);
4944 mapping = sp->dma_maps[0];
4946 tp->tx_buffers[entry].skb = skb;
4948 would_hit_hwbug = 0;
4950 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4951 would_hit_hwbug = 1;
4952 else if (tg3_4g_overflow_test(mapping, len))
4953 would_hit_hwbug = 1;
4955 tg3_set_txd(tp, entry, mapping, len, base_flags,
4956 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4958 entry = NEXT_TX(entry);
4960 /* Now loop through additional data fragments, and queue them. */
4961 if (skb_shinfo(skb)->nr_frags > 0) {
4962 unsigned int i, last;
4964 last = skb_shinfo(skb)->nr_frags - 1;
4965 for (i = 0; i <= last; i++) {
4966 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4969 mapping = sp->dma_maps[i + 1];
4971 tp->tx_buffers[entry].skb = NULL;
4973 if (tg3_4g_overflow_test(mapping, len))
4974 would_hit_hwbug = 1;
4976 if (tg3_40bit_overflow_test(tp, mapping, len))
4977 would_hit_hwbug = 1;
4979 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4980 tg3_set_txd(tp, entry, mapping, len,
4981 base_flags, (i == last)|(mss << 1));
4983 tg3_set_txd(tp, entry, mapping, len,
4984 base_flags, (i == last));
4986 entry = NEXT_TX(entry);
4990 if (would_hit_hwbug) {
4991 u32 last_plus_one = entry;
4994 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4995 start &= (TG3_TX_RING_SIZE - 1);
4997 /* If the workaround fails due to memory/mapping
4998 * failure, silently drop this packet.
5000 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
5001 &start, base_flags, mss))
5007 /* Packets are ready, update Tx producer idx local and on card. */
5008 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5010 tp->tx_prod = entry;
5011 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5012 netif_stop_queue(dev);
5013 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5014 netif_wake_queue(tp->dev);
5020 dev->trans_start = jiffies;
5022 return NETDEV_TX_OK;
5025 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5030 if (new_mtu > ETH_DATA_LEN) {
5031 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5032 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5033 ethtool_op_set_tso(dev, 0);
5036 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5038 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5039 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5040 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5044 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5046 struct tg3 *tp = netdev_priv(dev);
5049 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5052 if (!netif_running(dev)) {
5053 /* We'll just catch it later when the
5056 tg3_set_mtu(dev, tp, new_mtu);
5064 tg3_full_lock(tp, 1);
5066 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5068 tg3_set_mtu(dev, tp, new_mtu);
5070 err = tg3_restart_hw(tp, 0);
5073 tg3_netif_start(tp);
5075 tg3_full_unlock(tp);
5083 /* Free up pending packets in all rx/tx rings.
5085 * The chip has been shut down and the driver detached from
5086 * the networking, so no interrupts or new tx packets will
5087 * end up in the driver. tp->{tx,}lock is not held and we are not
5088 * in an interrupt context and thus may sleep.
5090 static void tg3_free_rings(struct tg3 *tp)
5092 struct ring_info *rxp;
5095 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5096 rxp = &tp->rx_std_buffers[i];
5098 if (rxp->skb == NULL)
5100 pci_unmap_single(tp->pdev,
5101 pci_unmap_addr(rxp, mapping),
5102 tp->rx_pkt_buf_sz - tp->rx_offset,
5103 PCI_DMA_FROMDEVICE);
5104 dev_kfree_skb_any(rxp->skb);
5108 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5109 rxp = &tp->rx_jumbo_buffers[i];
5111 if (rxp->skb == NULL)
5113 pci_unmap_single(tp->pdev,
5114 pci_unmap_addr(rxp, mapping),
5115 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5116 PCI_DMA_FROMDEVICE);
5117 dev_kfree_skb_any(rxp->skb);
5121 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5122 struct tx_ring_info *txp;
5123 struct sk_buff *skb;
5125 txp = &tp->tx_buffers[i];
5133 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5137 i += skb_shinfo(skb)->nr_frags + 1;
5139 dev_kfree_skb_any(skb);
5143 /* Initialize tx/rx rings for packet processing.
5145 * The chip has been shut down and the driver detached from
5146 * the networking, so no interrupts or new tx packets will
5147 * end up in the driver. tp->{tx,}lock are held and thus
5150 static int tg3_init_rings(struct tg3 *tp)
5154 /* Free up all the SKBs. */
5157 /* Zero out all descriptors. */
5158 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5159 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5160 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5161 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5163 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
5164 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5165 (tp->dev->mtu > ETH_DATA_LEN))
5166 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5168 /* Initialize invariants of the rings, we only set this
5169 * stuff once. This works because the card does not
5170 * write into the rx buffer posting rings.
5172 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5173 struct tg3_rx_buffer_desc *rxd;
5175 rxd = &tp->rx_std[i];
5176 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
5178 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5179 rxd->opaque = (RXD_OPAQUE_RING_STD |
5180 (i << RXD_OPAQUE_INDEX_SHIFT));
5183 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5184 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5185 struct tg3_rx_buffer_desc *rxd;
5187 rxd = &tp->rx_jumbo[i];
5188 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5190 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5192 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5193 (i << RXD_OPAQUE_INDEX_SHIFT));
5197 /* Now allocate fresh SKBs for each rx ring. */
5198 for (i = 0; i < tp->rx_pending; i++) {
5199 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5200 printk(KERN_WARNING PFX
5201 "%s: Using a smaller RX standard ring, "
5202 "only %d out of %d buffers were allocated "
5204 tp->dev->name, i, tp->rx_pending);
5212 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5213 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5214 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5216 printk(KERN_WARNING PFX
5217 "%s: Using a smaller RX jumbo ring, "
5218 "only %d out of %d buffers were "
5219 "allocated successfully.\n",
5220 tp->dev->name, i, tp->rx_jumbo_pending);
5225 tp->rx_jumbo_pending = i;
5234 * Must not be invoked with interrupt sources disabled and
5235 * the hardware shutdown down.
5237 static void tg3_free_consistent(struct tg3 *tp)
5239 kfree(tp->rx_std_buffers);
5240 tp->rx_std_buffers = NULL;
5242 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5243 tp->rx_std, tp->rx_std_mapping);
5247 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5248 tp->rx_jumbo, tp->rx_jumbo_mapping);
5249 tp->rx_jumbo = NULL;
5252 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5253 tp->rx_rcb, tp->rx_rcb_mapping);
5257 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5258 tp->tx_ring, tp->tx_desc_mapping);
5261 if (tp->hw_status) {
5262 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5263 tp->hw_status, tp->status_mapping);
5264 tp->hw_status = NULL;
5267 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5268 tp->hw_stats, tp->stats_mapping);
5269 tp->hw_stats = NULL;
5274 * Must not be invoked with interrupt sources disabled and
5275 * the hardware shutdown down. Can sleep.
5277 static int tg3_alloc_consistent(struct tg3 *tp)
5279 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5281 TG3_RX_JUMBO_RING_SIZE)) +
5282 (sizeof(struct tx_ring_info) *
5285 if (!tp->rx_std_buffers)
5288 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5289 tp->tx_buffers = (struct tx_ring_info *)
5290 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5292 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5293 &tp->rx_std_mapping);
5297 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5298 &tp->rx_jumbo_mapping);
5303 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5304 &tp->rx_rcb_mapping);
5308 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5309 &tp->tx_desc_mapping);
5313 tp->hw_status = pci_alloc_consistent(tp->pdev,
5315 &tp->status_mapping);
5319 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5320 sizeof(struct tg3_hw_stats),
5321 &tp->stats_mapping);
5325 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5326 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5331 tg3_free_consistent(tp);
5335 #define MAX_WAIT_CNT 1000
5337 /* To stop a block, clear the enable bit and poll till it
5338 * clears. tp->lock is held.
5340 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5345 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5352 /* We can't enable/disable these bits of the
5353 * 5705/5750, just say success.
5366 for (i = 0; i < MAX_WAIT_CNT; i++) {
5369 if ((val & enable_bit) == 0)
5373 if (i == MAX_WAIT_CNT && !silent) {
5374 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5375 "ofs=%lx enable_bit=%x\n",
5383 /* tp->lock is held. */
5384 static int tg3_abort_hw(struct tg3 *tp, int silent)
5388 tg3_disable_ints(tp);
5390 tp->rx_mode &= ~RX_MODE_ENABLE;
5391 tw32_f(MAC_RX_MODE, tp->rx_mode);
5394 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5395 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5396 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5397 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5398 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5399 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5401 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5402 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5403 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5404 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5405 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5406 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5407 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5409 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5410 tw32_f(MAC_MODE, tp->mac_mode);
5413 tp->tx_mode &= ~TX_MODE_ENABLE;
5414 tw32_f(MAC_TX_MODE, tp->tx_mode);
5416 for (i = 0; i < MAX_WAIT_CNT; i++) {
5418 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5421 if (i >= MAX_WAIT_CNT) {
5422 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5423 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5424 tp->dev->name, tr32(MAC_TX_MODE));
5428 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5429 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5430 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5432 tw32(FTQ_RESET, 0xffffffff);
5433 tw32(FTQ_RESET, 0x00000000);
5435 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5436 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5439 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5441 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5446 /* tp->lock is held. */
5447 static int tg3_nvram_lock(struct tg3 *tp)
5449 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5452 if (tp->nvram_lock_cnt == 0) {
5453 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5454 for (i = 0; i < 8000; i++) {
5455 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5460 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5464 tp->nvram_lock_cnt++;
5469 /* tp->lock is held. */
5470 static void tg3_nvram_unlock(struct tg3 *tp)
5472 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5473 if (tp->nvram_lock_cnt > 0)
5474 tp->nvram_lock_cnt--;
5475 if (tp->nvram_lock_cnt == 0)
5476 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5480 /* tp->lock is held. */
5481 static void tg3_enable_nvram_access(struct tg3 *tp)
5483 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5484 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5485 u32 nvaccess = tr32(NVRAM_ACCESS);
5487 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5491 /* tp->lock is held. */
5492 static void tg3_disable_nvram_access(struct tg3 *tp)
5494 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5495 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5496 u32 nvaccess = tr32(NVRAM_ACCESS);
5498 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5502 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5507 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5508 if (apedata != APE_SEG_SIG_MAGIC)
5511 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5512 if (!(apedata & APE_FW_STATUS_READY))
5515 /* Wait for up to 1 millisecond for APE to service previous event. */
5516 for (i = 0; i < 10; i++) {
5517 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5520 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5522 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5523 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5524 event | APE_EVENT_STATUS_EVENT_PENDING);
5526 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5528 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5534 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5535 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5538 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5543 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5547 case RESET_KIND_INIT:
5548 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5549 APE_HOST_SEG_SIG_MAGIC);
5550 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5551 APE_HOST_SEG_LEN_MAGIC);
5552 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5553 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5554 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5555 APE_HOST_DRIVER_ID_MAGIC);
5556 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5557 APE_HOST_BEHAV_NO_PHYLOCK);
5559 event = APE_EVENT_STATUS_STATE_START;
5561 case RESET_KIND_SHUTDOWN:
5562 event = APE_EVENT_STATUS_STATE_UNLOAD;
5564 case RESET_KIND_SUSPEND:
5565 event = APE_EVENT_STATUS_STATE_SUSPEND;
5571 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5573 tg3_ape_send_event(tp, event);
5576 /* tp->lock is held. */
5577 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5579 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5580 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5582 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5584 case RESET_KIND_INIT:
5585 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5589 case RESET_KIND_SHUTDOWN:
5590 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5594 case RESET_KIND_SUSPEND:
5595 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5604 if (kind == RESET_KIND_INIT ||
5605 kind == RESET_KIND_SUSPEND)
5606 tg3_ape_driver_state_change(tp, kind);
5609 /* tp->lock is held. */
5610 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5612 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5614 case RESET_KIND_INIT:
5615 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5616 DRV_STATE_START_DONE);
5619 case RESET_KIND_SHUTDOWN:
5620 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5621 DRV_STATE_UNLOAD_DONE);
5629 if (kind == RESET_KIND_SHUTDOWN)
5630 tg3_ape_driver_state_change(tp, kind);
5633 /* tp->lock is held. */
5634 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5636 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5638 case RESET_KIND_INIT:
5639 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5643 case RESET_KIND_SHUTDOWN:
5644 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5648 case RESET_KIND_SUSPEND:
5649 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5659 static int tg3_poll_fw(struct tg3 *tp)
5664 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5665 /* Wait up to 20ms for init done. */
5666 for (i = 0; i < 200; i++) {
5667 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5674 /* Wait for firmware initialization to complete. */
5675 for (i = 0; i < 100000; i++) {
5676 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5677 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5682 /* Chip might not be fitted with firmware. Some Sun onboard
5683 * parts are configured like that. So don't signal the timeout
5684 * of the above loop as an error, but do report the lack of
5685 * running firmware once.
5688 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5689 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5691 printk(KERN_INFO PFX "%s: No firmware running.\n",
5698 /* Save PCI command register before chip reset */
5699 static void tg3_save_pci_state(struct tg3 *tp)
5701 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5704 /* Restore PCI state after chip reset */
5705 static void tg3_restore_pci_state(struct tg3 *tp)
5709 /* Re-enable indirect register accesses. */
5710 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5711 tp->misc_host_ctrl);
5713 /* Set MAX PCI retry to zero. */
5714 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5715 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5716 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5717 val |= PCISTATE_RETRY_SAME_DMA;
5718 /* Allow reads and writes to the APE register and memory space. */
5719 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5720 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5721 PCISTATE_ALLOW_APE_SHMEM_WR;
5722 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5724 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5726 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5727 pcie_set_readrq(tp->pdev, 4096);
5729 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5730 tp->pci_cacheline_sz);
5731 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5735 /* Make sure PCI-X relaxed ordering bit is clear. */
5739 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5741 pcix_cmd &= ~PCI_X_CMD_ERO;
5742 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5746 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5748 /* Chip reset on 5780 will reset MSI enable bit,
5749 * so need to restore it.
5751 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5754 pci_read_config_word(tp->pdev,
5755 tp->msi_cap + PCI_MSI_FLAGS,
5757 pci_write_config_word(tp->pdev,
5758 tp->msi_cap + PCI_MSI_FLAGS,
5759 ctrl | PCI_MSI_FLAGS_ENABLE);
5760 val = tr32(MSGINT_MODE);
5761 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5766 static void tg3_stop_fw(struct tg3 *);
5768 /* tp->lock is held. */
5769 static int tg3_chip_reset(struct tg3 *tp)
5772 void (*write_op)(struct tg3 *, u32, u32);
5779 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5781 /* No matching tg3_nvram_unlock() after this because
5782 * chip reset below will undo the nvram lock.
5784 tp->nvram_lock_cnt = 0;
5786 /* GRC_MISC_CFG core clock reset will clear the memory
5787 * enable bit in PCI register 4 and the MSI enable bit
5788 * on some chips, so we save relevant registers here.
5790 tg3_save_pci_state(tp);
5792 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5793 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5794 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5795 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5796 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5797 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
5798 tw32(GRC_FASTBOOT_PC, 0);
5801 * We must avoid the readl() that normally takes place.
5802 * It locks machines, causes machine checks, and other
5803 * fun things. So, temporarily disable the 5701
5804 * hardware workaround, while we do the reset.
5806 write_op = tp->write32;
5807 if (write_op == tg3_write_flush_reg32)
5808 tp->write32 = tg3_write32;
5810 /* Prevent the irq handler from reading or writing PCI registers
5811 * during chip reset when the memory enable bit in the PCI command
5812 * register may be cleared. The chip does not generate interrupt
5813 * at this time, but the irq handler may still be called due to irq
5814 * sharing or irqpoll.
5816 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5817 if (tp->hw_status) {
5818 tp->hw_status->status = 0;
5819 tp->hw_status->status_tag = 0;
5823 synchronize_irq(tp->pdev->irq);
5826 val = GRC_MISC_CFG_CORECLK_RESET;
5828 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5829 if (tr32(0x7e2c) == 0x60) {
5832 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5833 tw32(GRC_MISC_CFG, (1 << 29));
5838 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5839 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5840 tw32(GRC_VCPU_EXT_CTRL,
5841 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5844 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5845 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5846 tw32(GRC_MISC_CFG, val);
5848 /* restore 5701 hardware bug workaround write method */
5849 tp->write32 = write_op;
5851 /* Unfortunately, we have to delay before the PCI read back.
5852 * Some 575X chips even will not respond to a PCI cfg access
5853 * when the reset command is given to the chip.
5855 * How do these hardware designers expect things to work
5856 * properly if the PCI write is posted for a long period
5857 * of time? It is always necessary to have some method by
5858 * which a register read back can occur to push the write
5859 * out which does the reset.
5861 * For most tg3 variants the trick below was working.
5866 /* Flush PCI posted writes. The normal MMIO registers
5867 * are inaccessible at this time so this is the only
5868 * way to make this reliably (actually, this is no longer
5869 * the case, see above). I tried to use indirect
5870 * register read/write but this upset some 5701 variants.
5872 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5876 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5877 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5881 /* Wait for link training to complete. */
5882 for (i = 0; i < 5000; i++)
5885 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5886 pci_write_config_dword(tp->pdev, 0xc4,
5887 cfg_val | (1 << 15));
5889 /* Set PCIE max payload size and clear error status. */
5890 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5893 tg3_restore_pci_state(tp);
5895 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5898 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5899 val = tr32(MEMARB_MODE);
5900 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5902 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5904 tw32(0x5000, 0x400);
5907 tw32(GRC_MODE, tp->grc_mode);
5909 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5912 tw32(0xc4, val | (1 << 15));
5915 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5916 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5917 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5918 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5919 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5920 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5923 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5924 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5925 tw32_f(MAC_MODE, tp->mac_mode);
5926 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5927 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5928 tw32_f(MAC_MODE, tp->mac_mode);
5929 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
5930 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
5931 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
5932 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
5933 tw32_f(MAC_MODE, tp->mac_mode);
5935 tw32_f(MAC_MODE, 0);
5940 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
5942 err = tg3_poll_fw(tp);
5946 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5947 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5950 tw32(0x7c00, val | (1 << 25));
5953 /* Reprobe ASF enable state. */
5954 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5955 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5956 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5957 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5960 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5961 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5962 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5963 tp->last_event_jiffies = jiffies;
5964 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5965 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5972 /* tp->lock is held. */
5973 static void tg3_stop_fw(struct tg3 *tp)
5975 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5976 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5977 /* Wait for RX cpu to ACK the previous event. */
5978 tg3_wait_for_event_ack(tp);
5980 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5982 tg3_generate_fw_event(tp);
5984 /* Wait for RX cpu to ACK this event. */
5985 tg3_wait_for_event_ack(tp);
5989 /* tp->lock is held. */
5990 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5996 tg3_write_sig_pre_reset(tp, kind);
5998 tg3_abort_hw(tp, silent);
5999 err = tg3_chip_reset(tp);
6001 tg3_write_sig_legacy(tp, kind);
6002 tg3_write_sig_post_reset(tp, kind);
6010 #define TG3_FW_RELEASE_MAJOR 0x0
6011 #define TG3_FW_RELASE_MINOR 0x0
6012 #define TG3_FW_RELEASE_FIX 0x0
6013 #define TG3_FW_START_ADDR 0x08000000
6014 #define TG3_FW_TEXT_ADDR 0x08000000
6015 #define TG3_FW_TEXT_LEN 0x9c0
6016 #define TG3_FW_RODATA_ADDR 0x080009c0
6017 #define TG3_FW_RODATA_LEN 0x60
6018 #define TG3_FW_DATA_ADDR 0x08000a40
6019 #define TG3_FW_DATA_LEN 0x20
6020 #define TG3_FW_SBSS_ADDR 0x08000a60
6021 #define TG3_FW_SBSS_LEN 0xc
6022 #define TG3_FW_BSS_ADDR 0x08000a70
6023 #define TG3_FW_BSS_LEN 0x10
6025 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
6026 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6027 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6028 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6029 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6030 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6031 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6032 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6033 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6034 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6035 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6036 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6037 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6038 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6039 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6040 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6041 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6042 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6043 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6044 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6045 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6046 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6047 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6048 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6049 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6050 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6052 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6053 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6054 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6055 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6056 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6057 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6058 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6059 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6060 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6061 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6062 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6063 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6064 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6065 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6066 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6067 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6068 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6069 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6070 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6071 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6072 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6073 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6074 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6075 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6076 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6077 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6078 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6079 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6080 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6081 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6082 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6083 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6084 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6085 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6086 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6087 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6088 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6089 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6090 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6091 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6092 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6093 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6094 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6095 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6096 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6097 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6098 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6099 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6100 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6101 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6102 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6103 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6104 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6105 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6106 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6107 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6108 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6109 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6110 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6111 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6112 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6113 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6114 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6115 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6116 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6119 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
6120 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6121 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6122 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6123 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6127 #if 0 /* All zeros, don't eat up space with it. */
6128 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6129 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6130 0x00000000, 0x00000000, 0x00000000, 0x00000000
6134 #define RX_CPU_SCRATCH_BASE 0x30000
6135 #define RX_CPU_SCRATCH_SIZE 0x04000
6136 #define TX_CPU_SCRATCH_BASE 0x34000
6137 #define TX_CPU_SCRATCH_SIZE 0x04000
6139 /* tp->lock is held. */
6140 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6144 BUG_ON(offset == TX_CPU_BASE &&
6145 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6147 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6148 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6150 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6153 if (offset == RX_CPU_BASE) {
6154 for (i = 0; i < 10000; i++) {
6155 tw32(offset + CPU_STATE, 0xffffffff);
6156 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6157 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6161 tw32(offset + CPU_STATE, 0xffffffff);
6162 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
6165 for (i = 0; i < 10000; i++) {
6166 tw32(offset + CPU_STATE, 0xffffffff);
6167 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6168 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6174 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6177 (offset == RX_CPU_BASE ? "RX" : "TX"));
6181 /* Clear firmware's nvram arbitration. */
6182 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6183 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6188 unsigned int text_base;
6189 unsigned int text_len;
6190 const u32 *text_data;
6191 unsigned int rodata_base;
6192 unsigned int rodata_len;
6193 const u32 *rodata_data;
6194 unsigned int data_base;
6195 unsigned int data_len;
6196 const u32 *data_data;
6199 /* tp->lock is held. */
6200 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6201 int cpu_scratch_size, struct fw_info *info)
6203 int err, lock_err, i;
6204 void (*write_op)(struct tg3 *, u32, u32);
6206 if (cpu_base == TX_CPU_BASE &&
6207 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6208 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6209 "TX cpu firmware on %s which is 5705.\n",
6214 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6215 write_op = tg3_write_mem;
6217 write_op = tg3_write_indirect_reg32;
6219 /* It is possible that bootcode is still loading at this point.
6220 * Get the nvram lock first before halting the cpu.
6222 lock_err = tg3_nvram_lock(tp);
6223 err = tg3_halt_cpu(tp, cpu_base);
6225 tg3_nvram_unlock(tp);
6229 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6230 write_op(tp, cpu_scratch_base + i, 0);
6231 tw32(cpu_base + CPU_STATE, 0xffffffff);
6232 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6233 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6234 write_op(tp, (cpu_scratch_base +
6235 (info->text_base & 0xffff) +
6238 info->text_data[i] : 0));
6239 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6240 write_op(tp, (cpu_scratch_base +
6241 (info->rodata_base & 0xffff) +
6243 (info->rodata_data ?
6244 info->rodata_data[i] : 0));
6245 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6246 write_op(tp, (cpu_scratch_base +
6247 (info->data_base & 0xffff) +
6250 info->data_data[i] : 0));
6258 /* tp->lock is held. */
6259 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6261 struct fw_info info;
6264 info.text_base = TG3_FW_TEXT_ADDR;
6265 info.text_len = TG3_FW_TEXT_LEN;
6266 info.text_data = &tg3FwText[0];
6267 info.rodata_base = TG3_FW_RODATA_ADDR;
6268 info.rodata_len = TG3_FW_RODATA_LEN;
6269 info.rodata_data = &tg3FwRodata[0];
6270 info.data_base = TG3_FW_DATA_ADDR;
6271 info.data_len = TG3_FW_DATA_LEN;
6272 info.data_data = NULL;
6274 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6275 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6280 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6281 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6286 /* Now startup only the RX cpu. */
6287 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6288 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6290 for (i = 0; i < 5; i++) {
6291 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6293 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6294 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
6295 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6299 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6300 "to set RX CPU PC, is %08x should be %08x\n",
6301 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6305 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6306 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
6312 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
6313 #define TG3_TSO_FW_RELASE_MINOR 0x6
6314 #define TG3_TSO_FW_RELEASE_FIX 0x0
6315 #define TG3_TSO_FW_START_ADDR 0x08000000
6316 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
6317 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
6318 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
6319 #define TG3_TSO_FW_RODATA_LEN 0x60
6320 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
6321 #define TG3_TSO_FW_DATA_LEN 0x30
6322 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
6323 #define TG3_TSO_FW_SBSS_LEN 0x2c
6324 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
6325 #define TG3_TSO_FW_BSS_LEN 0x894
6327 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
6328 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6329 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6330 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6331 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6332 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6333 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6334 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6335 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6336 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6337 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6338 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6339 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6340 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6341 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6342 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6343 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6344 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6345 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6346 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6347 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6348 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6349 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6350 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6351 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6352 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6353 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6354 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6355 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6356 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6357 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6358 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6359 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6360 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6361 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6362 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6363 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6364 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6365 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6366 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6367 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6368 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6369 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6370 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6371 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6372 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6373 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6374 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6375 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6376 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6377 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6378 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6379 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6380 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6381 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6382 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6383 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6384 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6385 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6386 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6387 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6388 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6389 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6390 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6391 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6392 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6393 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6394 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6395 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6396 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6397 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6398 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6399 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6400 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6401 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6402 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6403 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6404 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6405 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6406 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6407 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6408 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6409 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6410 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6411 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6412 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6413 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6414 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6415 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6416 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6417 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6418 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6419 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6420 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6421 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6422 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6423 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6424 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6425 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6426 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6427 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6428 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6429 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6430 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6431 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6432 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6433 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6434 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6435 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6436 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6437 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6438 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6439 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6440 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6441 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6442 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6443 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6444 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6445 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6446 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6447 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6448 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6449 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6450 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6451 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6452 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6453 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6454 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6455 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6456 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6457 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6458 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6459 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6460 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6461 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6462 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6463 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6464 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6465 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6466 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6467 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6468 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6469 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6470 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6471 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6472 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6473 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6474 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6475 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6476 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6477 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6478 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6479 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6480 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6481 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6482 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6483 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6484 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6485 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6486 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6487 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6488 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6489 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6490 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6491 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6492 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6493 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6494 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6495 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6496 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6497 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6498 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6499 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6500 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6501 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6502 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6503 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6504 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6505 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6506 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6507 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6508 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6509 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6510 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6511 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6512 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6513 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6514 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6515 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6516 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6517 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6518 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6519 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6520 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6521 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6522 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6523 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6524 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6525 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6526 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6527 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6528 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6529 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6530 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6531 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6532 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6533 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6534 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6535 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6536 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6537 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6538 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6539 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6540 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6541 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6542 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6543 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6544 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6545 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6546 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6547 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6548 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6549 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6550 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6551 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6552 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6553 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6554 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6555 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6556 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6557 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6558 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6559 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6560 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6561 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6562 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6563 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6564 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6565 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6566 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6567 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6568 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6569 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6570 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6571 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6572 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6573 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6574 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6575 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6576 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6577 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6578 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6579 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6580 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6581 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6582 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6583 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6584 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6585 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6586 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6587 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6588 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6589 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6590 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6591 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6592 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6593 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6594 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6595 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6596 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6597 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6598 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6599 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6600 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6601 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6602 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6603 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6604 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6605 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6606 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6607 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6608 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6609 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6610 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6611 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6614 static const u32 tg3TsoFwRodata[] = {
6615 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6616 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6617 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6618 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6622 static const u32 tg3TsoFwData[] = {
6623 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6624 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6628 /* 5705 needs a special version of the TSO firmware. */
6629 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
6630 #define TG3_TSO5_FW_RELASE_MINOR 0x2
6631 #define TG3_TSO5_FW_RELEASE_FIX 0x0
6632 #define TG3_TSO5_FW_START_ADDR 0x00010000
6633 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
6634 #define TG3_TSO5_FW_TEXT_LEN 0xe90
6635 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
6636 #define TG3_TSO5_FW_RODATA_LEN 0x50
6637 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
6638 #define TG3_TSO5_FW_DATA_LEN 0x20
6639 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
6640 #define TG3_TSO5_FW_SBSS_LEN 0x28
6641 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6642 #define TG3_TSO5_FW_BSS_LEN 0x88
6644 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6645 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6646 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6647 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6648 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6649 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6650 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6651 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6652 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6653 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6654 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6655 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6656 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6657 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6658 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6659 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6660 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6661 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6662 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6663 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6664 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6665 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6666 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6667 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6668 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6669 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6670 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6671 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6672 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6673 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6674 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6675 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6676 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6677 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6678 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6679 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6680 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6681 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6682 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6683 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6684 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6685 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6686 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6687 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6688 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6689 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6690 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6691 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6692 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6693 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6694 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6695 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6696 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6697 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6698 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6699 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6700 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6701 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6702 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6703 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6704 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6705 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6706 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6707 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6708 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6709 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6710 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6711 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6712 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6713 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6714 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6715 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6716 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6717 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6718 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6719 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6720 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6721 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6722 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6723 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6724 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6725 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6726 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6727 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6728 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6729 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6730 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6731 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6732 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6733 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6734 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6735 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6736 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6737 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6738 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6739 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6740 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6741 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6742 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6743 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6744 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6745 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6746 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6747 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6748 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6749 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6750 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6751 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6752 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6753 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6754 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6755 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6756 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6757 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6758 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6759 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6760 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6761 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6762 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6763 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6764 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6765 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6766 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6767 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6768 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6769 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6770 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6771 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6772 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6773 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6774 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6775 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6776 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6777 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6778 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6779 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6780 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6781 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6782 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6783 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6784 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6785 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6786 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6787 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6788 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6789 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6790 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6791 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6792 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6793 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6794 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6795 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6796 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6797 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6798 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6799 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6800 0x00000000, 0x00000000, 0x00000000,
6803 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6804 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6805 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6806 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6807 0x00000000, 0x00000000, 0x00000000,
6810 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6811 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6812 0x00000000, 0x00000000, 0x00000000,
6815 /* tp->lock is held. */
6816 static int tg3_load_tso_firmware(struct tg3 *tp)
6818 struct fw_info info;
6819 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6822 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6825 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6826 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6827 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6828 info.text_data = &tg3Tso5FwText[0];
6829 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6830 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6831 info.rodata_data = &tg3Tso5FwRodata[0];
6832 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6833 info.data_len = TG3_TSO5_FW_DATA_LEN;
6834 info.data_data = &tg3Tso5FwData[0];
6835 cpu_base = RX_CPU_BASE;
6836 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6837 cpu_scratch_size = (info.text_len +
6840 TG3_TSO5_FW_SBSS_LEN +
6841 TG3_TSO5_FW_BSS_LEN);
6843 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6844 info.text_len = TG3_TSO_FW_TEXT_LEN;
6845 info.text_data = &tg3TsoFwText[0];
6846 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6847 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6848 info.rodata_data = &tg3TsoFwRodata[0];
6849 info.data_base = TG3_TSO_FW_DATA_ADDR;
6850 info.data_len = TG3_TSO_FW_DATA_LEN;
6851 info.data_data = &tg3TsoFwData[0];
6852 cpu_base = TX_CPU_BASE;
6853 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6854 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6857 err = tg3_load_firmware_cpu(tp, cpu_base,
6858 cpu_scratch_base, cpu_scratch_size,
6863 /* Now startup the cpu. */
6864 tw32(cpu_base + CPU_STATE, 0xffffffff);
6865 tw32_f(cpu_base + CPU_PC, info.text_base);
6867 for (i = 0; i < 5; i++) {
6868 if (tr32(cpu_base + CPU_PC) == info.text_base)
6870 tw32(cpu_base + CPU_STATE, 0xffffffff);
6871 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6872 tw32_f(cpu_base + CPU_PC, info.text_base);
6876 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6877 "to set CPU PC, is %08x should be %08x\n",
6878 tp->dev->name, tr32(cpu_base + CPU_PC),
6882 tw32(cpu_base + CPU_STATE, 0xffffffff);
6883 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6888 /* tp->lock is held. */
6889 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6891 u32 addr_high, addr_low;
6894 addr_high = ((tp->dev->dev_addr[0] << 8) |
6895 tp->dev->dev_addr[1]);
6896 addr_low = ((tp->dev->dev_addr[2] << 24) |
6897 (tp->dev->dev_addr[3] << 16) |
6898 (tp->dev->dev_addr[4] << 8) |
6899 (tp->dev->dev_addr[5] << 0));
6900 for (i = 0; i < 4; i++) {
6901 if (i == 1 && skip_mac_1)
6903 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6904 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6907 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6908 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6909 for (i = 0; i < 12; i++) {
6910 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6911 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6915 addr_high = (tp->dev->dev_addr[0] +
6916 tp->dev->dev_addr[1] +
6917 tp->dev->dev_addr[2] +
6918 tp->dev->dev_addr[3] +
6919 tp->dev->dev_addr[4] +
6920 tp->dev->dev_addr[5]) &
6921 TX_BACKOFF_SEED_MASK;
6922 tw32(MAC_TX_BACKOFF_SEED, addr_high);
6925 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6927 struct tg3 *tp = netdev_priv(dev);
6928 struct sockaddr *addr = p;
6929 int err = 0, skip_mac_1 = 0;
6931 if (!is_valid_ether_addr(addr->sa_data))
6934 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6936 if (!netif_running(dev))
6939 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6940 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6942 addr0_high = tr32(MAC_ADDR_0_HIGH);
6943 addr0_low = tr32(MAC_ADDR_0_LOW);
6944 addr1_high = tr32(MAC_ADDR_1_HIGH);
6945 addr1_low = tr32(MAC_ADDR_1_LOW);
6947 /* Skip MAC addr 1 if ASF is using it. */
6948 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6949 !(addr1_high == 0 && addr1_low == 0))
6952 spin_lock_bh(&tp->lock);
6953 __tg3_set_mac_addr(tp, skip_mac_1);
6954 spin_unlock_bh(&tp->lock);
6959 /* tp->lock is held. */
6960 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6961 dma_addr_t mapping, u32 maxlen_flags,
6965 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6966 ((u64) mapping >> 32));
6968 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6969 ((u64) mapping & 0xffffffff));
6971 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6974 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6976 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6980 static void __tg3_set_rx_mode(struct net_device *);
6981 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6983 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6984 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6985 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6986 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6987 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6988 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6989 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6991 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6992 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6993 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6994 u32 val = ec->stats_block_coalesce_usecs;
6996 if (!netif_carrier_ok(tp->dev))
6999 tw32(HOSTCC_STAT_COAL_TICKS, val);
7003 /* tp->lock is held. */
7004 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7006 u32 val, rdmac_mode;
7009 tg3_disable_ints(tp);
7013 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7015 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7016 tg3_abort_hw(tp, 1);
7020 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7023 err = tg3_chip_reset(tp);
7027 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7029 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
7030 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
7031 val = tr32(TG3_CPMU_CTRL);
7032 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7033 tw32(TG3_CPMU_CTRL, val);
7035 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7036 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7037 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7038 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7040 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7041 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7042 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7043 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7045 val = tr32(TG3_CPMU_HST_ACC);
7046 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7047 val |= CPMU_HST_ACC_MACCLK_6_25;
7048 tw32(TG3_CPMU_HST_ACC, val);
7051 /* This works around an issue with Athlon chipsets on
7052 * B3 tigon3 silicon. This bit has no effect on any
7053 * other revision. But do not set this on PCI Express
7054 * chips and don't even touch the clocks if the CPMU is present.
7056 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7057 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7058 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7059 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7062 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7063 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7064 val = tr32(TG3PCI_PCISTATE);
7065 val |= PCISTATE_RETRY_SAME_DMA;
7066 tw32(TG3PCI_PCISTATE, val);
7069 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7070 /* Allow reads and writes to the
7071 * APE register and memory space.
7073 val = tr32(TG3PCI_PCISTATE);
7074 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7075 PCISTATE_ALLOW_APE_SHMEM_WR;
7076 tw32(TG3PCI_PCISTATE, val);
7079 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7080 /* Enable some hw fixes. */
7081 val = tr32(TG3PCI_MSI_DATA);
7082 val |= (1 << 26) | (1 << 28) | (1 << 29);
7083 tw32(TG3PCI_MSI_DATA, val);
7086 /* Descriptor ring init may make accesses to the
7087 * NIC SRAM area to setup the TX descriptors, so we
7088 * can only do this after the hardware has been
7089 * successfully reset.
7091 err = tg3_init_rings(tp);
7095 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7096 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
7097 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7098 /* This value is determined during the probe time DMA
7099 * engine test, tg3_test_dma.
7101 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7104 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7105 GRC_MODE_4X_NIC_SEND_RINGS |
7106 GRC_MODE_NO_TX_PHDR_CSUM |
7107 GRC_MODE_NO_RX_PHDR_CSUM);
7108 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7110 /* Pseudo-header checksum is done by hardware logic and not
7111 * the offload processers, so make the chip do the pseudo-
7112 * header checksums on receive. For transmit it is more
7113 * convenient to do the pseudo-header checksum in software
7114 * as Linux does that on transmit for us in all cases.
7116 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7120 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7122 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7123 val = tr32(GRC_MISC_CFG);
7125 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7126 tw32(GRC_MISC_CFG, val);
7128 /* Initialize MBUF/DESC pool. */
7129 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7131 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7132 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7133 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7134 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7136 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7137 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7138 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7140 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7143 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7144 TG3_TSO5_FW_RODATA_LEN +
7145 TG3_TSO5_FW_DATA_LEN +
7146 TG3_TSO5_FW_SBSS_LEN +
7147 TG3_TSO5_FW_BSS_LEN);
7148 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7149 tw32(BUFMGR_MB_POOL_ADDR,
7150 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7151 tw32(BUFMGR_MB_POOL_SIZE,
7152 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7155 if (tp->dev->mtu <= ETH_DATA_LEN) {
7156 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7157 tp->bufmgr_config.mbuf_read_dma_low_water);
7158 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7159 tp->bufmgr_config.mbuf_mac_rx_low_water);
7160 tw32(BUFMGR_MB_HIGH_WATER,
7161 tp->bufmgr_config.mbuf_high_water);
7163 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7164 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7165 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7166 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7167 tw32(BUFMGR_MB_HIGH_WATER,
7168 tp->bufmgr_config.mbuf_high_water_jumbo);
7170 tw32(BUFMGR_DMA_LOW_WATER,
7171 tp->bufmgr_config.dma_low_water);
7172 tw32(BUFMGR_DMA_HIGH_WATER,
7173 tp->bufmgr_config.dma_high_water);
7175 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7176 for (i = 0; i < 2000; i++) {
7177 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7182 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7187 /* Setup replenish threshold. */
7188 val = tp->rx_pending / 8;
7191 else if (val > tp->rx_std_max_post)
7192 val = tp->rx_std_max_post;
7193 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7194 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7195 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7197 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7198 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7201 tw32(RCVBDI_STD_THRESH, val);
7203 /* Initialize TG3_BDINFO's at:
7204 * RCVDBDI_STD_BD: standard eth size rx ring
7205 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7206 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7209 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7210 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7211 * ring attribute flags
7212 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7214 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7215 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7217 * The size of each ring is fixed in the firmware, but the location is
7220 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7221 ((u64) tp->rx_std_mapping >> 32));
7222 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7223 ((u64) tp->rx_std_mapping & 0xffffffff));
7224 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7225 NIC_SRAM_RX_BUFFER_DESC);
7227 /* Don't even try to program the JUMBO/MINI buffer descriptor
7230 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7231 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7232 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7234 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7235 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7237 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7238 BDINFO_FLAGS_DISABLED);
7240 /* Setup replenish threshold. */
7241 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7243 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7244 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7245 ((u64) tp->rx_jumbo_mapping >> 32));
7246 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7247 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7248 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7249 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7250 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7251 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7253 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7254 BDINFO_FLAGS_DISABLED);
7259 /* There is only one send ring on 5705/5750, no need to explicitly
7260 * disable the others.
7262 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7263 /* Clear out send RCB ring in SRAM. */
7264 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7265 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7266 BDINFO_FLAGS_DISABLED);
7271 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7272 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7274 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7275 tp->tx_desc_mapping,
7276 (TG3_TX_RING_SIZE <<
7277 BDINFO_FLAGS_MAXLEN_SHIFT),
7278 NIC_SRAM_TX_BUFFER_DESC);
7280 /* There is only one receive return ring on 5705/5750, no need
7281 * to explicitly disable the others.
7283 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7284 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7285 i += TG3_BDINFO_SIZE) {
7286 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7287 BDINFO_FLAGS_DISABLED);
7292 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7294 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7296 (TG3_RX_RCB_RING_SIZE(tp) <<
7297 BDINFO_FLAGS_MAXLEN_SHIFT),
7300 tp->rx_std_ptr = tp->rx_pending;
7301 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7304 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7305 tp->rx_jumbo_pending : 0;
7306 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7309 /* Initialize MAC address and backoff seed. */
7310 __tg3_set_mac_addr(tp, 0);
7312 /* MTU + ethernet header + FCS + optional VLAN tag */
7313 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7315 /* The slot time is changed by tg3_setup_phy if we
7316 * run at gigabit with half duplex.
7318 tw32(MAC_TX_LENGTHS,
7319 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7320 (6 << TX_LENGTHS_IPG_SHIFT) |
7321 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7323 /* Receive rules. */
7324 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7325 tw32(RCVLPC_CONFIG, 0x0181);
7327 /* Calculate RDMAC_MODE setting early, we need it to determine
7328 * the RCVLPC_STATE_ENABLE mask.
7330 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7331 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7332 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7333 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7334 RDMAC_MODE_LNGREAD_ENAB);
7336 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7337 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7338 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7339 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7340 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7342 /* If statement applies to 5705 and 5750 PCI devices only */
7343 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7344 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7345 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7346 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7347 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7348 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7349 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7350 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7351 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7355 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7356 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7358 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7359 rdmac_mode |= (1 << 27);
7361 /* Receive/send statistics. */
7362 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7363 val = tr32(RCVLPC_STATS_ENABLE);
7364 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7365 tw32(RCVLPC_STATS_ENABLE, val);
7366 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7367 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7368 val = tr32(RCVLPC_STATS_ENABLE);
7369 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7370 tw32(RCVLPC_STATS_ENABLE, val);
7372 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7374 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7375 tw32(SNDDATAI_STATSENAB, 0xffffff);
7376 tw32(SNDDATAI_STATSCTRL,
7377 (SNDDATAI_SCTRL_ENABLE |
7378 SNDDATAI_SCTRL_FASTUPD));
7380 /* Setup host coalescing engine. */
7381 tw32(HOSTCC_MODE, 0);
7382 for (i = 0; i < 2000; i++) {
7383 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7388 __tg3_set_coalesce(tp, &tp->coal);
7390 /* set status block DMA address */
7391 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7392 ((u64) tp->status_mapping >> 32));
7393 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7394 ((u64) tp->status_mapping & 0xffffffff));
7396 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7397 /* Status/statistics block address. See tg3_timer,
7398 * the tg3_periodic_fetch_stats call there, and
7399 * tg3_get_stats to see how this works for 5705/5750 chips.
7401 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7402 ((u64) tp->stats_mapping >> 32));
7403 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7404 ((u64) tp->stats_mapping & 0xffffffff));
7405 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7406 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7409 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7411 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7412 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7413 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7414 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7416 /* Clear statistics/status block in chip, and status block in ram. */
7417 for (i = NIC_SRAM_STATS_BLK;
7418 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7420 tg3_write_mem(tp, i, 0);
7423 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7425 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7426 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7427 /* reset to prevent losing 1st rx packet intermittently */
7428 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7432 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7433 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7436 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7437 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7438 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7439 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7440 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7441 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7442 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7445 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7446 * If TG3_FLG2_IS_NIC is zero, we should read the
7447 * register to preserve the GPIO settings for LOMs. The GPIOs,
7448 * whether used as inputs or outputs, are set by boot code after
7451 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7454 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7455 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7456 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7458 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7459 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7460 GRC_LCLCTRL_GPIO_OUTPUT3;
7462 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7463 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7465 tp->grc_local_ctrl &= ~gpio_mask;
7466 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7468 /* GPIO1 must be driven high for eeprom write protect */
7469 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7470 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7471 GRC_LCLCTRL_GPIO_OUTPUT1);
7473 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7476 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7479 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7480 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7484 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7485 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7486 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7487 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7488 WDMAC_MODE_LNGREAD_ENAB);
7490 /* If statement applies to 5705 and 5750 PCI devices only */
7491 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7492 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7493 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7494 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7495 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7496 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7498 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7499 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7500 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7501 val |= WDMAC_MODE_RX_ACCEL;
7505 /* Enable host coalescing bug fix */
7506 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7507 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7508 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7509 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7510 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
7511 val |= WDMAC_MODE_STATUS_TAG_FIX;
7513 tw32_f(WDMAC_MODE, val);
7516 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7519 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7521 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7522 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7523 pcix_cmd |= PCI_X_CMD_READ_2K;
7524 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7525 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7526 pcix_cmd |= PCI_X_CMD_READ_2K;
7528 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7532 tw32_f(RDMAC_MODE, rdmac_mode);
7535 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7536 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7537 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7539 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7541 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7543 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7545 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7546 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7547 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7548 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7549 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7550 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7551 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7552 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7554 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7555 err = tg3_load_5701_a0_firmware_fix(tp);
7560 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7561 err = tg3_load_tso_firmware(tp);
7566 tp->tx_mode = TX_MODE_ENABLE;
7567 tw32_f(MAC_TX_MODE, tp->tx_mode);
7570 tp->rx_mode = RX_MODE_ENABLE;
7571 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7572 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7573 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7574 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7575 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7577 tw32_f(MAC_RX_MODE, tp->rx_mode);
7580 tw32(MAC_LED_CTRL, tp->led_ctrl);
7582 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7583 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7584 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7587 tw32_f(MAC_RX_MODE, tp->rx_mode);
7590 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7591 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7592 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7593 /* Set drive transmission level to 1.2V */
7594 /* only if the signal pre-emphasis bit is not set */
7595 val = tr32(MAC_SERDES_CFG);
7598 tw32(MAC_SERDES_CFG, val);
7600 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7601 tw32(MAC_SERDES_CFG, 0x616000);
7604 /* Prevent chip from dropping frames when flow control
7607 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7609 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7610 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7611 /* Use hardware link auto-negotiation */
7612 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7615 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7616 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7619 tmp = tr32(SERDES_RX_CTRL);
7620 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7621 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7622 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7623 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7626 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7627 if (tp->link_config.phy_is_low_power) {
7628 tp->link_config.phy_is_low_power = 0;
7629 tp->link_config.speed = tp->link_config.orig_speed;
7630 tp->link_config.duplex = tp->link_config.orig_duplex;
7631 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7634 err = tg3_setup_phy(tp, 0);
7638 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7639 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7642 /* Clear CRC stats. */
7643 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7644 tg3_writephy(tp, MII_TG3_TEST1,
7645 tmp | MII_TG3_TEST1_CRC_EN);
7646 tg3_readphy(tp, 0x14, &tmp);
7651 __tg3_set_rx_mode(tp->dev);
7653 /* Initialize receive rules. */
7654 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7655 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7656 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7657 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7659 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7660 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7664 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7668 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7670 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7672 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7674 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7676 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7678 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7680 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7682 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7684 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7686 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7688 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7690 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7692 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7694 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7702 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7703 /* Write our heartbeat update interval to APE. */
7704 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7705 APE_HOST_HEARTBEAT_INT_DISABLE);
7707 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7712 /* Called at device open time to get the chip ready for
7713 * packet processing. Invoked with tp->lock held.
7715 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7717 tg3_switch_clocks(tp);
7719 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7721 return tg3_reset_hw(tp, reset_phy);
7724 #define TG3_STAT_ADD32(PSTAT, REG) \
7725 do { u32 __val = tr32(REG); \
7726 (PSTAT)->low += __val; \
7727 if ((PSTAT)->low < __val) \
7728 (PSTAT)->high += 1; \
7731 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7733 struct tg3_hw_stats *sp = tp->hw_stats;
7735 if (!netif_carrier_ok(tp->dev))
7738 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7739 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7740 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7741 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7742 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7743 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7744 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7745 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7746 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7747 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7748 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7749 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7750 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7752 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7753 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7754 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7755 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7756 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7757 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7758 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7759 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7760 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7761 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7762 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7763 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7764 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7765 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7767 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7768 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7769 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7772 static void tg3_timer(unsigned long __opaque)
7774 struct tg3 *tp = (struct tg3 *) __opaque;
7779 spin_lock(&tp->lock);
7781 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7782 /* All of this garbage is because when using non-tagged
7783 * IRQ status the mailbox/status_block protocol the chip
7784 * uses with the cpu is race prone.
7786 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7787 tw32(GRC_LOCAL_CTRL,
7788 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7790 tw32(HOSTCC_MODE, tp->coalesce_mode |
7791 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7794 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7795 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7796 spin_unlock(&tp->lock);
7797 schedule_work(&tp->reset_task);
7802 /* This part only runs once per second. */
7803 if (!--tp->timer_counter) {
7804 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7805 tg3_periodic_fetch_stats(tp);
7807 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7811 mac_stat = tr32(MAC_STATUS);
7814 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7815 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7817 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7821 tg3_setup_phy(tp, 0);
7822 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7823 u32 mac_stat = tr32(MAC_STATUS);
7826 if (netif_carrier_ok(tp->dev) &&
7827 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7830 if (! netif_carrier_ok(tp->dev) &&
7831 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7832 MAC_STATUS_SIGNAL_DET))) {
7836 if (!tp->serdes_counter) {
7839 ~MAC_MODE_PORT_MODE_MASK));
7841 tw32_f(MAC_MODE, tp->mac_mode);
7844 tg3_setup_phy(tp, 0);
7846 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7847 tg3_serdes_parallel_detect(tp);
7849 tp->timer_counter = tp->timer_multiplier;
7852 /* Heartbeat is only sent once every 2 seconds.
7854 * The heartbeat is to tell the ASF firmware that the host
7855 * driver is still alive. In the event that the OS crashes,
7856 * ASF needs to reset the hardware to free up the FIFO space
7857 * that may be filled with rx packets destined for the host.
7858 * If the FIFO is full, ASF will no longer function properly.
7860 * Unintended resets have been reported on real time kernels
7861 * where the timer doesn't run on time. Netpoll will also have
7864 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7865 * to check the ring condition when the heartbeat is expiring
7866 * before doing the reset. This will prevent most unintended
7869 if (!--tp->asf_counter) {
7870 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7871 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7872 tg3_wait_for_event_ack(tp);
7874 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7875 FWCMD_NICDRV_ALIVE3);
7876 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7877 /* 5 seconds timeout */
7878 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7880 tg3_generate_fw_event(tp);
7882 tp->asf_counter = tp->asf_multiplier;
7885 spin_unlock(&tp->lock);
7888 tp->timer.expires = jiffies + tp->timer_offset;
7889 add_timer(&tp->timer);
7892 static int tg3_request_irq(struct tg3 *tp)
7895 unsigned long flags;
7896 struct net_device *dev = tp->dev;
7898 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7900 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7902 flags = IRQF_SAMPLE_RANDOM;
7905 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7906 fn = tg3_interrupt_tagged;
7907 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7909 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7912 static int tg3_test_interrupt(struct tg3 *tp)
7914 struct net_device *dev = tp->dev;
7915 int err, i, intr_ok = 0;
7917 if (!netif_running(dev))
7920 tg3_disable_ints(tp);
7922 free_irq(tp->pdev->irq, dev);
7924 err = request_irq(tp->pdev->irq, tg3_test_isr,
7925 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7929 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7930 tg3_enable_ints(tp);
7932 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7935 for (i = 0; i < 5; i++) {
7936 u32 int_mbox, misc_host_ctrl;
7938 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7940 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7942 if ((int_mbox != 0) ||
7943 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7951 tg3_disable_ints(tp);
7953 free_irq(tp->pdev->irq, dev);
7955 err = tg3_request_irq(tp);
7966 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7967 * successfully restored
7969 static int tg3_test_msi(struct tg3 *tp)
7971 struct net_device *dev = tp->dev;
7975 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7978 /* Turn off SERR reporting in case MSI terminates with Master
7981 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7982 pci_write_config_word(tp->pdev, PCI_COMMAND,
7983 pci_cmd & ~PCI_COMMAND_SERR);
7985 err = tg3_test_interrupt(tp);
7987 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7992 /* other failures */
7996 /* MSI test failed, go back to INTx mode */
7997 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7998 "switching to INTx mode. Please report this failure to "
7999 "the PCI maintainer and include system chipset information.\n",
8002 free_irq(tp->pdev->irq, dev);
8003 pci_disable_msi(tp->pdev);
8005 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8007 err = tg3_request_irq(tp);
8011 /* Need to reset the chip because the MSI cycle may have terminated
8012 * with Master Abort.
8014 tg3_full_lock(tp, 1);
8016 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8017 err = tg3_init_hw(tp, 1);
8019 tg3_full_unlock(tp);
8022 free_irq(tp->pdev->irq, dev);
8027 static int tg3_open(struct net_device *dev)
8029 struct tg3 *tp = netdev_priv(dev);
8032 netif_carrier_off(tp->dev);
8034 err = tg3_set_power_state(tp, PCI_D0);
8038 tg3_full_lock(tp, 0);
8040 tg3_disable_ints(tp);
8041 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8043 tg3_full_unlock(tp);
8045 /* The placement of this call is tied
8046 * to the setup and use of Host TX descriptors.
8048 err = tg3_alloc_consistent(tp);
8052 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
8053 /* All MSI supporting chips should support tagged
8054 * status. Assert that this is the case.
8056 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8057 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8058 "Not using MSI.\n", tp->dev->name);
8059 } else if (pci_enable_msi(tp->pdev) == 0) {
8062 msi_mode = tr32(MSGINT_MODE);
8063 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8064 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8067 err = tg3_request_irq(tp);
8070 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8071 pci_disable_msi(tp->pdev);
8072 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8074 tg3_free_consistent(tp);
8078 napi_enable(&tp->napi);
8080 tg3_full_lock(tp, 0);
8082 err = tg3_init_hw(tp, 1);
8084 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8087 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8088 tp->timer_offset = HZ;
8090 tp->timer_offset = HZ / 10;
8092 BUG_ON(tp->timer_offset > HZ);
8093 tp->timer_counter = tp->timer_multiplier =
8094 (HZ / tp->timer_offset);
8095 tp->asf_counter = tp->asf_multiplier =
8096 ((HZ / tp->timer_offset) * 2);
8098 init_timer(&tp->timer);
8099 tp->timer.expires = jiffies + tp->timer_offset;
8100 tp->timer.data = (unsigned long) tp;
8101 tp->timer.function = tg3_timer;
8104 tg3_full_unlock(tp);
8107 napi_disable(&tp->napi);
8108 free_irq(tp->pdev->irq, dev);
8109 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8110 pci_disable_msi(tp->pdev);
8111 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8113 tg3_free_consistent(tp);
8117 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8118 err = tg3_test_msi(tp);
8121 tg3_full_lock(tp, 0);
8123 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8124 pci_disable_msi(tp->pdev);
8125 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8127 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8129 tg3_free_consistent(tp);
8131 tg3_full_unlock(tp);
8133 napi_disable(&tp->napi);
8138 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8139 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
8140 u32 val = tr32(PCIE_TRANSACTION_CFG);
8142 tw32(PCIE_TRANSACTION_CFG,
8143 val | PCIE_TRANS_CFG_1SHOT_MSI);
8150 tg3_full_lock(tp, 0);
8152 add_timer(&tp->timer);
8153 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8154 tg3_enable_ints(tp);
8156 tg3_full_unlock(tp);
8158 netif_start_queue(dev);
8164 /*static*/ void tg3_dump_state(struct tg3 *tp)
8166 u32 val32, val32_2, val32_3, val32_4, val32_5;
8170 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8171 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8172 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8176 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8177 tr32(MAC_MODE), tr32(MAC_STATUS));
8178 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8179 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8180 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8181 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8182 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8183 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8185 /* Send data initiator control block */
8186 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8187 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8188 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8189 tr32(SNDDATAI_STATSCTRL));
8191 /* Send data completion control block */
8192 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8194 /* Send BD ring selector block */
8195 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8196 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8198 /* Send BD initiator control block */
8199 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8200 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8202 /* Send BD completion control block */
8203 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8205 /* Receive list placement control block */
8206 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8207 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8208 printk(" RCVLPC_STATSCTRL[%08x]\n",
8209 tr32(RCVLPC_STATSCTRL));
8211 /* Receive data and receive BD initiator control block */
8212 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8213 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8215 /* Receive data completion control block */
8216 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8219 /* Receive BD initiator control block */
8220 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8221 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8223 /* Receive BD completion control block */
8224 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8225 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8227 /* Receive list selector control block */
8228 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8229 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8231 /* Mbuf cluster free block */
8232 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8233 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8235 /* Host coalescing control block */
8236 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8237 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8238 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8239 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8240 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8241 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8242 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8243 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8244 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8245 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8246 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8247 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8249 /* Memory arbiter control block */
8250 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8251 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8253 /* Buffer manager control block */
8254 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8255 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8256 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8257 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8258 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8259 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8260 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8261 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8263 /* Read DMA control block */
8264 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8265 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8267 /* Write DMA control block */
8268 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8269 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8271 /* DMA completion block */
8272 printk("DEBUG: DMAC_MODE[%08x]\n",
8276 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8277 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8278 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8279 tr32(GRC_LOCAL_CTRL));
8282 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8283 tr32(RCVDBDI_JUMBO_BD + 0x0),
8284 tr32(RCVDBDI_JUMBO_BD + 0x4),
8285 tr32(RCVDBDI_JUMBO_BD + 0x8),
8286 tr32(RCVDBDI_JUMBO_BD + 0xc));
8287 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8288 tr32(RCVDBDI_STD_BD + 0x0),
8289 tr32(RCVDBDI_STD_BD + 0x4),
8290 tr32(RCVDBDI_STD_BD + 0x8),
8291 tr32(RCVDBDI_STD_BD + 0xc));
8292 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8293 tr32(RCVDBDI_MINI_BD + 0x0),
8294 tr32(RCVDBDI_MINI_BD + 0x4),
8295 tr32(RCVDBDI_MINI_BD + 0x8),
8296 tr32(RCVDBDI_MINI_BD + 0xc));
8298 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8299 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8300 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8301 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8302 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8303 val32, val32_2, val32_3, val32_4);
8305 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8306 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8307 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8308 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8309 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8310 val32, val32_2, val32_3, val32_4);
8312 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8313 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8314 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8315 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8316 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8317 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8318 val32, val32_2, val32_3, val32_4, val32_5);
8320 /* SW status block */
8321 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8322 tp->hw_status->status,
8323 tp->hw_status->status_tag,
8324 tp->hw_status->rx_jumbo_consumer,
8325 tp->hw_status->rx_consumer,
8326 tp->hw_status->rx_mini_consumer,
8327 tp->hw_status->idx[0].rx_producer,
8328 tp->hw_status->idx[0].tx_consumer);
8330 /* SW statistics block */
8331 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8332 ((u32 *)tp->hw_stats)[0],
8333 ((u32 *)tp->hw_stats)[1],
8334 ((u32 *)tp->hw_stats)[2],
8335 ((u32 *)tp->hw_stats)[3]);
8338 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8339 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8340 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8341 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8342 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8344 /* NIC side send descriptors. */
8345 for (i = 0; i < 6; i++) {
8348 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8349 + (i * sizeof(struct tg3_tx_buffer_desc));
8350 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8352 readl(txd + 0x0), readl(txd + 0x4),
8353 readl(txd + 0x8), readl(txd + 0xc));
8356 /* NIC side RX descriptors. */
8357 for (i = 0; i < 6; i++) {
8360 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8361 + (i * sizeof(struct tg3_rx_buffer_desc));
8362 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8364 readl(rxd + 0x0), readl(rxd + 0x4),
8365 readl(rxd + 0x8), readl(rxd + 0xc));
8366 rxd += (4 * sizeof(u32));
8367 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8369 readl(rxd + 0x0), readl(rxd + 0x4),
8370 readl(rxd + 0x8), readl(rxd + 0xc));
8373 for (i = 0; i < 6; i++) {
8376 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8377 + (i * sizeof(struct tg3_rx_buffer_desc));
8378 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8380 readl(rxd + 0x0), readl(rxd + 0x4),
8381 readl(rxd + 0x8), readl(rxd + 0xc));
8382 rxd += (4 * sizeof(u32));
8383 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8385 readl(rxd + 0x0), readl(rxd + 0x4),
8386 readl(rxd + 0x8), readl(rxd + 0xc));
8391 static struct net_device_stats *tg3_get_stats(struct net_device *);
8392 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8394 static int tg3_close(struct net_device *dev)
8396 struct tg3 *tp = netdev_priv(dev);
8398 napi_disable(&tp->napi);
8399 cancel_work_sync(&tp->reset_task);
8401 netif_stop_queue(dev);
8403 del_timer_sync(&tp->timer);
8405 tg3_full_lock(tp, 1);
8410 tg3_disable_ints(tp);
8412 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8414 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8416 tg3_full_unlock(tp);
8418 free_irq(tp->pdev->irq, dev);
8419 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8420 pci_disable_msi(tp->pdev);
8421 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8424 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8425 sizeof(tp->net_stats_prev));
8426 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8427 sizeof(tp->estats_prev));
8429 tg3_free_consistent(tp);
8431 tg3_set_power_state(tp, PCI_D3hot);
8433 netif_carrier_off(tp->dev);
8438 static inline unsigned long get_stat64(tg3_stat64_t *val)
8442 #if (BITS_PER_LONG == 32)
8445 ret = ((u64)val->high << 32) | ((u64)val->low);
8450 static inline u64 get_estat64(tg3_stat64_t *val)
8452 return ((u64)val->high << 32) | ((u64)val->low);
8455 static unsigned long calc_crc_errors(struct tg3 *tp)
8457 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8459 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8460 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8461 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8464 spin_lock_bh(&tp->lock);
8465 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8466 tg3_writephy(tp, MII_TG3_TEST1,
8467 val | MII_TG3_TEST1_CRC_EN);
8468 tg3_readphy(tp, 0x14, &val);
8471 spin_unlock_bh(&tp->lock);
8473 tp->phy_crc_errors += val;
8475 return tp->phy_crc_errors;
8478 return get_stat64(&hw_stats->rx_fcs_errors);
8481 #define ESTAT_ADD(member) \
8482 estats->member = old_estats->member + \
8483 get_estat64(&hw_stats->member)
8485 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8487 struct tg3_ethtool_stats *estats = &tp->estats;
8488 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8489 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8494 ESTAT_ADD(rx_octets);
8495 ESTAT_ADD(rx_fragments);
8496 ESTAT_ADD(rx_ucast_packets);
8497 ESTAT_ADD(rx_mcast_packets);
8498 ESTAT_ADD(rx_bcast_packets);
8499 ESTAT_ADD(rx_fcs_errors);
8500 ESTAT_ADD(rx_align_errors);
8501 ESTAT_ADD(rx_xon_pause_rcvd);
8502 ESTAT_ADD(rx_xoff_pause_rcvd);
8503 ESTAT_ADD(rx_mac_ctrl_rcvd);
8504 ESTAT_ADD(rx_xoff_entered);
8505 ESTAT_ADD(rx_frame_too_long_errors);
8506 ESTAT_ADD(rx_jabbers);
8507 ESTAT_ADD(rx_undersize_packets);
8508 ESTAT_ADD(rx_in_length_errors);
8509 ESTAT_ADD(rx_out_length_errors);
8510 ESTAT_ADD(rx_64_or_less_octet_packets);
8511 ESTAT_ADD(rx_65_to_127_octet_packets);
8512 ESTAT_ADD(rx_128_to_255_octet_packets);
8513 ESTAT_ADD(rx_256_to_511_octet_packets);
8514 ESTAT_ADD(rx_512_to_1023_octet_packets);
8515 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8516 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8517 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8518 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8519 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8521 ESTAT_ADD(tx_octets);
8522 ESTAT_ADD(tx_collisions);
8523 ESTAT_ADD(tx_xon_sent);
8524 ESTAT_ADD(tx_xoff_sent);
8525 ESTAT_ADD(tx_flow_control);
8526 ESTAT_ADD(tx_mac_errors);
8527 ESTAT_ADD(tx_single_collisions);
8528 ESTAT_ADD(tx_mult_collisions);
8529 ESTAT_ADD(tx_deferred);
8530 ESTAT_ADD(tx_excessive_collisions);
8531 ESTAT_ADD(tx_late_collisions);
8532 ESTAT_ADD(tx_collide_2times);
8533 ESTAT_ADD(tx_collide_3times);
8534 ESTAT_ADD(tx_collide_4times);
8535 ESTAT_ADD(tx_collide_5times);
8536 ESTAT_ADD(tx_collide_6times);
8537 ESTAT_ADD(tx_collide_7times);
8538 ESTAT_ADD(tx_collide_8times);
8539 ESTAT_ADD(tx_collide_9times);
8540 ESTAT_ADD(tx_collide_10times);
8541 ESTAT_ADD(tx_collide_11times);
8542 ESTAT_ADD(tx_collide_12times);
8543 ESTAT_ADD(tx_collide_13times);
8544 ESTAT_ADD(tx_collide_14times);
8545 ESTAT_ADD(tx_collide_15times);
8546 ESTAT_ADD(tx_ucast_packets);
8547 ESTAT_ADD(tx_mcast_packets);
8548 ESTAT_ADD(tx_bcast_packets);
8549 ESTAT_ADD(tx_carrier_sense_errors);
8550 ESTAT_ADD(tx_discards);
8551 ESTAT_ADD(tx_errors);
8553 ESTAT_ADD(dma_writeq_full);
8554 ESTAT_ADD(dma_write_prioq_full);
8555 ESTAT_ADD(rxbds_empty);
8556 ESTAT_ADD(rx_discards);
8557 ESTAT_ADD(rx_errors);
8558 ESTAT_ADD(rx_threshold_hit);
8560 ESTAT_ADD(dma_readq_full);
8561 ESTAT_ADD(dma_read_prioq_full);
8562 ESTAT_ADD(tx_comp_queue_full);
8564 ESTAT_ADD(ring_set_send_prod_index);
8565 ESTAT_ADD(ring_status_update);
8566 ESTAT_ADD(nic_irqs);
8567 ESTAT_ADD(nic_avoided_irqs);
8568 ESTAT_ADD(nic_tx_threshold_hit);
8573 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8575 struct tg3 *tp = netdev_priv(dev);
8576 struct net_device_stats *stats = &tp->net_stats;
8577 struct net_device_stats *old_stats = &tp->net_stats_prev;
8578 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8583 stats->rx_packets = old_stats->rx_packets +
8584 get_stat64(&hw_stats->rx_ucast_packets) +
8585 get_stat64(&hw_stats->rx_mcast_packets) +
8586 get_stat64(&hw_stats->rx_bcast_packets);
8588 stats->tx_packets = old_stats->tx_packets +
8589 get_stat64(&hw_stats->tx_ucast_packets) +
8590 get_stat64(&hw_stats->tx_mcast_packets) +
8591 get_stat64(&hw_stats->tx_bcast_packets);
8593 stats->rx_bytes = old_stats->rx_bytes +
8594 get_stat64(&hw_stats->rx_octets);
8595 stats->tx_bytes = old_stats->tx_bytes +
8596 get_stat64(&hw_stats->tx_octets);
8598 stats->rx_errors = old_stats->rx_errors +
8599 get_stat64(&hw_stats->rx_errors);
8600 stats->tx_errors = old_stats->tx_errors +
8601 get_stat64(&hw_stats->tx_errors) +
8602 get_stat64(&hw_stats->tx_mac_errors) +
8603 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8604 get_stat64(&hw_stats->tx_discards);
8606 stats->multicast = old_stats->multicast +
8607 get_stat64(&hw_stats->rx_mcast_packets);
8608 stats->collisions = old_stats->collisions +
8609 get_stat64(&hw_stats->tx_collisions);
8611 stats->rx_length_errors = old_stats->rx_length_errors +
8612 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8613 get_stat64(&hw_stats->rx_undersize_packets);
8615 stats->rx_over_errors = old_stats->rx_over_errors +
8616 get_stat64(&hw_stats->rxbds_empty);
8617 stats->rx_frame_errors = old_stats->rx_frame_errors +
8618 get_stat64(&hw_stats->rx_align_errors);
8619 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8620 get_stat64(&hw_stats->tx_discards);
8621 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8622 get_stat64(&hw_stats->tx_carrier_sense_errors);
8624 stats->rx_crc_errors = old_stats->rx_crc_errors +
8625 calc_crc_errors(tp);
8627 stats->rx_missed_errors = old_stats->rx_missed_errors +
8628 get_stat64(&hw_stats->rx_discards);
8633 static inline u32 calc_crc(unsigned char *buf, int len)
8641 for (j = 0; j < len; j++) {
8644 for (k = 0; k < 8; k++) {
8658 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8660 /* accept or reject all multicast frames */
8661 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8662 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8663 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8664 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8667 static void __tg3_set_rx_mode(struct net_device *dev)
8669 struct tg3 *tp = netdev_priv(dev);
8672 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8673 RX_MODE_KEEP_VLAN_TAG);
8675 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8678 #if TG3_VLAN_TAG_USED
8680 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8681 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8683 /* By definition, VLAN is disabled always in this
8686 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8687 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8690 if (dev->flags & IFF_PROMISC) {
8691 /* Promiscuous mode. */
8692 rx_mode |= RX_MODE_PROMISC;
8693 } else if (dev->flags & IFF_ALLMULTI) {
8694 /* Accept all multicast. */
8695 tg3_set_multi (tp, 1);
8696 } else if (dev->mc_count < 1) {
8697 /* Reject all multicast. */
8698 tg3_set_multi (tp, 0);
8700 /* Accept one or more multicast(s). */
8701 struct dev_mc_list *mclist;
8703 u32 mc_filter[4] = { 0, };
8708 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8709 i++, mclist = mclist->next) {
8711 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8713 regidx = (bit & 0x60) >> 5;
8715 mc_filter[regidx] |= (1 << bit);
8718 tw32(MAC_HASH_REG_0, mc_filter[0]);
8719 tw32(MAC_HASH_REG_1, mc_filter[1]);
8720 tw32(MAC_HASH_REG_2, mc_filter[2]);
8721 tw32(MAC_HASH_REG_3, mc_filter[3]);
8724 if (rx_mode != tp->rx_mode) {
8725 tp->rx_mode = rx_mode;
8726 tw32_f(MAC_RX_MODE, rx_mode);
8731 static void tg3_set_rx_mode(struct net_device *dev)
8733 struct tg3 *tp = netdev_priv(dev);
8735 if (!netif_running(dev))
8738 tg3_full_lock(tp, 0);
8739 __tg3_set_rx_mode(dev);
8740 tg3_full_unlock(tp);
8743 #define TG3_REGDUMP_LEN (32 * 1024)
8745 static int tg3_get_regs_len(struct net_device *dev)
8747 return TG3_REGDUMP_LEN;
8750 static void tg3_get_regs(struct net_device *dev,
8751 struct ethtool_regs *regs, void *_p)
8754 struct tg3 *tp = netdev_priv(dev);
8760 memset(p, 0, TG3_REGDUMP_LEN);
8762 if (tp->link_config.phy_is_low_power)
8765 tg3_full_lock(tp, 0);
8767 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
8768 #define GET_REG32_LOOP(base,len) \
8769 do { p = (u32 *)(orig_p + (base)); \
8770 for (i = 0; i < len; i += 4) \
8771 __GET_REG32((base) + i); \
8773 #define GET_REG32_1(reg) \
8774 do { p = (u32 *)(orig_p + (reg)); \
8775 __GET_REG32((reg)); \
8778 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8779 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8780 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8781 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8782 GET_REG32_1(SNDDATAC_MODE);
8783 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8784 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8785 GET_REG32_1(SNDBDC_MODE);
8786 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8787 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8788 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8789 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8790 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8791 GET_REG32_1(RCVDCC_MODE);
8792 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8793 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8794 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8795 GET_REG32_1(MBFREE_MODE);
8796 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8797 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8798 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8799 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8800 GET_REG32_LOOP(WDMAC_MODE, 0x08);
8801 GET_REG32_1(RX_CPU_MODE);
8802 GET_REG32_1(RX_CPU_STATE);
8803 GET_REG32_1(RX_CPU_PGMCTR);
8804 GET_REG32_1(RX_CPU_HWBKPT);
8805 GET_REG32_1(TX_CPU_MODE);
8806 GET_REG32_1(TX_CPU_STATE);
8807 GET_REG32_1(TX_CPU_PGMCTR);
8808 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8809 GET_REG32_LOOP(FTQ_RESET, 0x120);
8810 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8811 GET_REG32_1(DMAC_MODE);
8812 GET_REG32_LOOP(GRC_MODE, 0x4c);
8813 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8814 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8817 #undef GET_REG32_LOOP
8820 tg3_full_unlock(tp);
8823 static int tg3_get_eeprom_len(struct net_device *dev)
8825 struct tg3 *tp = netdev_priv(dev);
8827 return tp->nvram_size;
8830 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8831 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8832 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8834 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8836 struct tg3 *tp = netdev_priv(dev);
8839 u32 i, offset, len, b_offset, b_count;
8842 if (tp->link_config.phy_is_low_power)
8845 offset = eeprom->offset;
8849 eeprom->magic = TG3_EEPROM_MAGIC;
8852 /* adjustments to start on required 4 byte boundary */
8853 b_offset = offset & 3;
8854 b_count = 4 - b_offset;
8855 if (b_count > len) {
8856 /* i.e. offset=1 len=2 */
8859 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8862 memcpy(data, ((char*)&val) + b_offset, b_count);
8865 eeprom->len += b_count;
8868 /* read bytes upto the last 4 byte boundary */
8869 pd = &data[eeprom->len];
8870 for (i = 0; i < (len - (len & 3)); i += 4) {
8871 ret = tg3_nvram_read_le(tp, offset + i, &val);
8876 memcpy(pd + i, &val, 4);
8881 /* read last bytes not ending on 4 byte boundary */
8882 pd = &data[eeprom->len];
8884 b_offset = offset + len - b_count;
8885 ret = tg3_nvram_read_le(tp, b_offset, &val);
8888 memcpy(pd, &val, b_count);
8889 eeprom->len += b_count;
8894 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8896 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8898 struct tg3 *tp = netdev_priv(dev);
8900 u32 offset, len, b_offset, odd_len;
8904 if (tp->link_config.phy_is_low_power)
8907 if (eeprom->magic != TG3_EEPROM_MAGIC)
8910 offset = eeprom->offset;
8913 if ((b_offset = (offset & 3))) {
8914 /* adjustments to start on required 4 byte boundary */
8915 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8926 /* adjustments to end on required 4 byte boundary */
8928 len = (len + 3) & ~3;
8929 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8935 if (b_offset || odd_len) {
8936 buf = kmalloc(len, GFP_KERNEL);
8940 memcpy(buf, &start, 4);
8942 memcpy(buf+len-4, &end, 4);
8943 memcpy(buf + b_offset, data, eeprom->len);
8946 ret = tg3_nvram_write_block(tp, offset, len, buf);
8954 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8956 struct tg3 *tp = netdev_priv(dev);
8958 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8959 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8961 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
8964 cmd->supported = (SUPPORTED_Autoneg);
8966 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8967 cmd->supported |= (SUPPORTED_1000baseT_Half |
8968 SUPPORTED_1000baseT_Full);
8970 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8971 cmd->supported |= (SUPPORTED_100baseT_Half |
8972 SUPPORTED_100baseT_Full |
8973 SUPPORTED_10baseT_Half |
8974 SUPPORTED_10baseT_Full |
8976 cmd->port = PORT_TP;
8978 cmd->supported |= SUPPORTED_FIBRE;
8979 cmd->port = PORT_FIBRE;
8982 cmd->advertising = tp->link_config.advertising;
8983 if (netif_running(dev)) {
8984 cmd->speed = tp->link_config.active_speed;
8985 cmd->duplex = tp->link_config.active_duplex;
8987 cmd->phy_address = PHY_ADDR;
8988 cmd->transceiver = 0;
8989 cmd->autoneg = tp->link_config.autoneg;
8995 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8997 struct tg3 *tp = netdev_priv(dev);
8999 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9000 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9002 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9005 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9006 /* These are the only valid advertisement bits allowed. */
9007 if (cmd->autoneg == AUTONEG_ENABLE &&
9008 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9009 ADVERTISED_1000baseT_Full |
9010 ADVERTISED_Autoneg |
9013 /* Fiber can only do SPEED_1000. */
9014 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9015 (cmd->speed != SPEED_1000))
9017 /* Copper cannot force SPEED_1000. */
9018 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9019 (cmd->speed == SPEED_1000))
9021 else if ((cmd->speed == SPEED_1000) &&
9022 (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9025 tg3_full_lock(tp, 0);
9027 tp->link_config.autoneg = cmd->autoneg;
9028 if (cmd->autoneg == AUTONEG_ENABLE) {
9029 tp->link_config.advertising = (cmd->advertising |
9030 ADVERTISED_Autoneg);
9031 tp->link_config.speed = SPEED_INVALID;
9032 tp->link_config.duplex = DUPLEX_INVALID;
9034 tp->link_config.advertising = 0;
9035 tp->link_config.speed = cmd->speed;
9036 tp->link_config.duplex = cmd->duplex;
9039 tp->link_config.orig_speed = tp->link_config.speed;
9040 tp->link_config.orig_duplex = tp->link_config.duplex;
9041 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9043 if (netif_running(dev))
9044 tg3_setup_phy(tp, 1);
9046 tg3_full_unlock(tp);
9051 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9053 struct tg3 *tp = netdev_priv(dev);
9055 strcpy(info->driver, DRV_MODULE_NAME);
9056 strcpy(info->version, DRV_MODULE_VERSION);
9057 strcpy(info->fw_version, tp->fw_ver);
9058 strcpy(info->bus_info, pci_name(tp->pdev));
9061 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9063 struct tg3 *tp = netdev_priv(dev);
9065 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9066 device_can_wakeup(&tp->pdev->dev))
9067 wol->supported = WAKE_MAGIC;
9071 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
9072 wol->wolopts = WAKE_MAGIC;
9073 memset(&wol->sopass, 0, sizeof(wol->sopass));
9076 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9078 struct tg3 *tp = netdev_priv(dev);
9079 struct device *dp = &tp->pdev->dev;
9081 if (wol->wolopts & ~WAKE_MAGIC)
9083 if ((wol->wolopts & WAKE_MAGIC) &&
9084 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9087 spin_lock_bh(&tp->lock);
9088 if (wol->wolopts & WAKE_MAGIC) {
9089 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9090 device_set_wakeup_enable(dp, true);
9092 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9093 device_set_wakeup_enable(dp, false);
9095 spin_unlock_bh(&tp->lock);
9100 static u32 tg3_get_msglevel(struct net_device *dev)
9102 struct tg3 *tp = netdev_priv(dev);
9103 return tp->msg_enable;
9106 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9108 struct tg3 *tp = netdev_priv(dev);
9109 tp->msg_enable = value;
9112 static int tg3_set_tso(struct net_device *dev, u32 value)
9114 struct tg3 *tp = netdev_priv(dev);
9116 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9121 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9122 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
9124 dev->features |= NETIF_F_TSO6;
9125 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9126 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9127 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9128 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9129 dev->features |= NETIF_F_TSO_ECN;
9131 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9133 return ethtool_op_set_tso(dev, value);
9136 static int tg3_nway_reset(struct net_device *dev)
9138 struct tg3 *tp = netdev_priv(dev);
9141 if (!netif_running(dev))
9144 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9147 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9148 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9150 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
9154 spin_lock_bh(&tp->lock);
9156 tg3_readphy(tp, MII_BMCR, &bmcr);
9157 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9158 ((bmcr & BMCR_ANENABLE) ||
9159 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9160 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9164 spin_unlock_bh(&tp->lock);
9170 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9172 struct tg3 *tp = netdev_priv(dev);
9174 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9175 ering->rx_mini_max_pending = 0;
9176 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9177 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9179 ering->rx_jumbo_max_pending = 0;
9181 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9183 ering->rx_pending = tp->rx_pending;
9184 ering->rx_mini_pending = 0;
9185 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9186 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9188 ering->rx_jumbo_pending = 0;
9190 ering->tx_pending = tp->tx_pending;
9193 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9195 struct tg3 *tp = netdev_priv(dev);
9196 int irq_sync = 0, err = 0;
9198 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9199 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9200 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9201 (ering->tx_pending <= MAX_SKB_FRAGS) ||
9202 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9203 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9206 if (netif_running(dev)) {
9212 tg3_full_lock(tp, irq_sync);
9214 tp->rx_pending = ering->rx_pending;
9216 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9217 tp->rx_pending > 63)
9218 tp->rx_pending = 63;
9219 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9220 tp->tx_pending = ering->tx_pending;
9222 if (netif_running(dev)) {
9223 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9224 err = tg3_restart_hw(tp, 1);
9226 tg3_netif_start(tp);
9229 tg3_full_unlock(tp);
9231 if (irq_sync && !err)
9237 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9239 struct tg3 *tp = netdev_priv(dev);
9241 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9243 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9244 epause->rx_pause = 1;
9246 epause->rx_pause = 0;
9248 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9249 epause->tx_pause = 1;
9251 epause->tx_pause = 0;
9254 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9256 struct tg3 *tp = netdev_priv(dev);
9259 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9260 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9263 if (epause->autoneg) {
9265 struct phy_device *phydev;
9267 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
9269 if (epause->rx_pause) {
9270 if (epause->tx_pause)
9271 newadv = ADVERTISED_Pause;
9273 newadv = ADVERTISED_Pause |
9274 ADVERTISED_Asym_Pause;
9275 } else if (epause->tx_pause) {
9276 newadv = ADVERTISED_Asym_Pause;
9280 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9281 u32 oldadv = phydev->advertising &
9283 ADVERTISED_Asym_Pause);
9284 if (oldadv != newadv) {
9285 phydev->advertising &=
9286 ~(ADVERTISED_Pause |
9287 ADVERTISED_Asym_Pause);
9288 phydev->advertising |= newadv;
9289 err = phy_start_aneg(phydev);
9292 tp->link_config.advertising &=
9293 ~(ADVERTISED_Pause |
9294 ADVERTISED_Asym_Pause);
9295 tp->link_config.advertising |= newadv;
9298 if (epause->rx_pause)
9299 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9301 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9303 if (epause->tx_pause)
9304 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9306 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9308 if (netif_running(dev))
9309 tg3_setup_flow_control(tp, 0, 0);
9314 if (netif_running(dev)) {
9319 tg3_full_lock(tp, irq_sync);
9321 if (epause->autoneg)
9322 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9324 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9325 if (epause->rx_pause)
9326 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9328 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9329 if (epause->tx_pause)
9330 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9332 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9334 if (netif_running(dev)) {
9335 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9336 err = tg3_restart_hw(tp, 1);
9338 tg3_netif_start(tp);
9341 tg3_full_unlock(tp);
9347 static u32 tg3_get_rx_csum(struct net_device *dev)
9349 struct tg3 *tp = netdev_priv(dev);
9350 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9353 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9355 struct tg3 *tp = netdev_priv(dev);
9357 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9363 spin_lock_bh(&tp->lock);
9365 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9367 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9368 spin_unlock_bh(&tp->lock);
9373 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9375 struct tg3 *tp = netdev_priv(dev);
9377 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9383 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9384 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9385 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9386 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9387 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9388 ethtool_op_set_tx_ipv6_csum(dev, data);
9390 ethtool_op_set_tx_csum(dev, data);
9395 static int tg3_get_sset_count (struct net_device *dev, int sset)
9399 return TG3_NUM_TEST;
9401 return TG3_NUM_STATS;
9407 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9409 switch (stringset) {
9411 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
9414 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
9417 WARN_ON(1); /* we need a WARN() */
9422 static int tg3_phys_id(struct net_device *dev, u32 data)
9424 struct tg3 *tp = netdev_priv(dev);
9427 if (!netif_running(tp->dev))
9431 data = UINT_MAX / 2;
9433 for (i = 0; i < (data * 2); i++) {
9435 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9436 LED_CTRL_1000MBPS_ON |
9437 LED_CTRL_100MBPS_ON |
9438 LED_CTRL_10MBPS_ON |
9439 LED_CTRL_TRAFFIC_OVERRIDE |
9440 LED_CTRL_TRAFFIC_BLINK |
9441 LED_CTRL_TRAFFIC_LED);
9444 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9445 LED_CTRL_TRAFFIC_OVERRIDE);
9447 if (msleep_interruptible(500))
9450 tw32(MAC_LED_CTRL, tp->led_ctrl);
9454 static void tg3_get_ethtool_stats (struct net_device *dev,
9455 struct ethtool_stats *estats, u64 *tmp_stats)
9457 struct tg3 *tp = netdev_priv(dev);
9458 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9461 #define NVRAM_TEST_SIZE 0x100
9462 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
9463 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
9464 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
9465 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9466 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9468 static int tg3_test_nvram(struct tg3 *tp)
9472 int i, j, k, err = 0, size;
9474 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9477 if (magic == TG3_EEPROM_MAGIC)
9478 size = NVRAM_TEST_SIZE;
9479 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9480 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9481 TG3_EEPROM_SB_FORMAT_1) {
9482 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9483 case TG3_EEPROM_SB_REVISION_0:
9484 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9486 case TG3_EEPROM_SB_REVISION_2:
9487 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9489 case TG3_EEPROM_SB_REVISION_3:
9490 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9497 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9498 size = NVRAM_SELFBOOT_HW_SIZE;
9502 buf = kmalloc(size, GFP_KERNEL);
9507 for (i = 0, j = 0; i < size; i += 4, j++) {
9508 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9514 /* Selfboot format */
9515 magic = swab32(le32_to_cpu(buf[0]));
9516 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9517 TG3_EEPROM_MAGIC_FW) {
9518 u8 *buf8 = (u8 *) buf, csum8 = 0;
9520 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9521 TG3_EEPROM_SB_REVISION_2) {
9522 /* For rev 2, the csum doesn't include the MBA. */
9523 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9525 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9528 for (i = 0; i < size; i++)
9541 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9542 TG3_EEPROM_MAGIC_HW) {
9543 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9544 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9545 u8 *buf8 = (u8 *) buf;
9547 /* Separate the parity bits and the data bytes. */
9548 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9549 if ((i == 0) || (i == 8)) {
9553 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9554 parity[k++] = buf8[i] & msk;
9561 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9562 parity[k++] = buf8[i] & msk;
9565 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9566 parity[k++] = buf8[i] & msk;
9569 data[j++] = buf8[i];
9573 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9574 u8 hw8 = hweight8(data[i]);
9576 if ((hw8 & 0x1) && parity[i])
9578 else if (!(hw8 & 0x1) && !parity[i])
9585 /* Bootstrap checksum at offset 0x10 */
9586 csum = calc_crc((unsigned char *) buf, 0x10);
9587 if(csum != le32_to_cpu(buf[0x10/4]))
9590 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9591 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9592 if (csum != le32_to_cpu(buf[0xfc/4]))
9602 #define TG3_SERDES_TIMEOUT_SEC 2
9603 #define TG3_COPPER_TIMEOUT_SEC 6
9605 static int tg3_test_link(struct tg3 *tp)
9609 if (!netif_running(tp->dev))
9612 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9613 max = TG3_SERDES_TIMEOUT_SEC;
9615 max = TG3_COPPER_TIMEOUT_SEC;
9617 for (i = 0; i < max; i++) {
9618 if (netif_carrier_ok(tp->dev))
9621 if (msleep_interruptible(1000))
9628 /* Only test the commonly used registers */
9629 static int tg3_test_registers(struct tg3 *tp)
9631 int i, is_5705, is_5750;
9632 u32 offset, read_mask, write_mask, val, save_val, read_val;
9636 #define TG3_FL_5705 0x1
9637 #define TG3_FL_NOT_5705 0x2
9638 #define TG3_FL_NOT_5788 0x4
9639 #define TG3_FL_NOT_5750 0x8
9643 /* MAC Control Registers */
9644 { MAC_MODE, TG3_FL_NOT_5705,
9645 0x00000000, 0x00ef6f8c },
9646 { MAC_MODE, TG3_FL_5705,
9647 0x00000000, 0x01ef6b8c },
9648 { MAC_STATUS, TG3_FL_NOT_5705,
9649 0x03800107, 0x00000000 },
9650 { MAC_STATUS, TG3_FL_5705,
9651 0x03800100, 0x00000000 },
9652 { MAC_ADDR_0_HIGH, 0x0000,
9653 0x00000000, 0x0000ffff },
9654 { MAC_ADDR_0_LOW, 0x0000,
9655 0x00000000, 0xffffffff },
9656 { MAC_RX_MTU_SIZE, 0x0000,
9657 0x00000000, 0x0000ffff },
9658 { MAC_TX_MODE, 0x0000,
9659 0x00000000, 0x00000070 },
9660 { MAC_TX_LENGTHS, 0x0000,
9661 0x00000000, 0x00003fff },
9662 { MAC_RX_MODE, TG3_FL_NOT_5705,
9663 0x00000000, 0x000007fc },
9664 { MAC_RX_MODE, TG3_FL_5705,
9665 0x00000000, 0x000007dc },
9666 { MAC_HASH_REG_0, 0x0000,
9667 0x00000000, 0xffffffff },
9668 { MAC_HASH_REG_1, 0x0000,
9669 0x00000000, 0xffffffff },
9670 { MAC_HASH_REG_2, 0x0000,
9671 0x00000000, 0xffffffff },
9672 { MAC_HASH_REG_3, 0x0000,
9673 0x00000000, 0xffffffff },
9675 /* Receive Data and Receive BD Initiator Control Registers. */
9676 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9677 0x00000000, 0xffffffff },
9678 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9679 0x00000000, 0xffffffff },
9680 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9681 0x00000000, 0x00000003 },
9682 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9683 0x00000000, 0xffffffff },
9684 { RCVDBDI_STD_BD+0, 0x0000,
9685 0x00000000, 0xffffffff },
9686 { RCVDBDI_STD_BD+4, 0x0000,
9687 0x00000000, 0xffffffff },
9688 { RCVDBDI_STD_BD+8, 0x0000,
9689 0x00000000, 0xffff0002 },
9690 { RCVDBDI_STD_BD+0xc, 0x0000,
9691 0x00000000, 0xffffffff },
9693 /* Receive BD Initiator Control Registers. */
9694 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9695 0x00000000, 0xffffffff },
9696 { RCVBDI_STD_THRESH, TG3_FL_5705,
9697 0x00000000, 0x000003ff },
9698 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9699 0x00000000, 0xffffffff },
9701 /* Host Coalescing Control Registers. */
9702 { HOSTCC_MODE, TG3_FL_NOT_5705,
9703 0x00000000, 0x00000004 },
9704 { HOSTCC_MODE, TG3_FL_5705,
9705 0x00000000, 0x000000f6 },
9706 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9707 0x00000000, 0xffffffff },
9708 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9709 0x00000000, 0x000003ff },
9710 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9711 0x00000000, 0xffffffff },
9712 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9713 0x00000000, 0x000003ff },
9714 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9715 0x00000000, 0xffffffff },
9716 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9717 0x00000000, 0x000000ff },
9718 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9719 0x00000000, 0xffffffff },
9720 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9721 0x00000000, 0x000000ff },
9722 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9723 0x00000000, 0xffffffff },
9724 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9725 0x00000000, 0xffffffff },
9726 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9727 0x00000000, 0xffffffff },
9728 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9729 0x00000000, 0x000000ff },
9730 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9731 0x00000000, 0xffffffff },
9732 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9733 0x00000000, 0x000000ff },
9734 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9735 0x00000000, 0xffffffff },
9736 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9737 0x00000000, 0xffffffff },
9738 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9739 0x00000000, 0xffffffff },
9740 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9741 0x00000000, 0xffffffff },
9742 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9743 0x00000000, 0xffffffff },
9744 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9745 0xffffffff, 0x00000000 },
9746 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9747 0xffffffff, 0x00000000 },
9749 /* Buffer Manager Control Registers. */
9750 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9751 0x00000000, 0x007fff80 },
9752 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9753 0x00000000, 0x007fffff },
9754 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9755 0x00000000, 0x0000003f },
9756 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9757 0x00000000, 0x000001ff },
9758 { BUFMGR_MB_HIGH_WATER, 0x0000,
9759 0x00000000, 0x000001ff },
9760 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9761 0xffffffff, 0x00000000 },
9762 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9763 0xffffffff, 0x00000000 },
9765 /* Mailbox Registers */
9766 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9767 0x00000000, 0x000001ff },
9768 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9769 0x00000000, 0x000001ff },
9770 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9771 0x00000000, 0x000007ff },
9772 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9773 0x00000000, 0x000001ff },
9775 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9778 is_5705 = is_5750 = 0;
9779 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9781 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9785 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9786 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9789 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9792 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9793 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9796 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9799 offset = (u32) reg_tbl[i].offset;
9800 read_mask = reg_tbl[i].read_mask;
9801 write_mask = reg_tbl[i].write_mask;
9803 /* Save the original register content */
9804 save_val = tr32(offset);
9806 /* Determine the read-only value. */
9807 read_val = save_val & read_mask;
9809 /* Write zero to the register, then make sure the read-only bits
9810 * are not changed and the read/write bits are all zeros.
9816 /* Test the read-only and read/write bits. */
9817 if (((val & read_mask) != read_val) || (val & write_mask))
9820 /* Write ones to all the bits defined by RdMask and WrMask, then
9821 * make sure the read-only bits are not changed and the
9822 * read/write bits are all ones.
9824 tw32(offset, read_mask | write_mask);
9828 /* Test the read-only bits. */
9829 if ((val & read_mask) != read_val)
9832 /* Test the read/write bits. */
9833 if ((val & write_mask) != write_mask)
9836 tw32(offset, save_val);
9842 if (netif_msg_hw(tp))
9843 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9845 tw32(offset, save_val);
9849 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9851 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9855 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9856 for (j = 0; j < len; j += 4) {
9859 tg3_write_mem(tp, offset + j, test_pattern[i]);
9860 tg3_read_mem(tp, offset + j, &val);
9861 if (val != test_pattern[i])
9868 static int tg3_test_memory(struct tg3 *tp)
9870 static struct mem_entry {
9873 } mem_tbl_570x[] = {
9874 { 0x00000000, 0x00b50},
9875 { 0x00002000, 0x1c000},
9876 { 0xffffffff, 0x00000}
9877 }, mem_tbl_5705[] = {
9878 { 0x00000100, 0x0000c},
9879 { 0x00000200, 0x00008},
9880 { 0x00004000, 0x00800},
9881 { 0x00006000, 0x01000},
9882 { 0x00008000, 0x02000},
9883 { 0x00010000, 0x0e000},
9884 { 0xffffffff, 0x00000}
9885 }, mem_tbl_5755[] = {
9886 { 0x00000200, 0x00008},
9887 { 0x00004000, 0x00800},
9888 { 0x00006000, 0x00800},
9889 { 0x00008000, 0x02000},
9890 { 0x00010000, 0x0c000},
9891 { 0xffffffff, 0x00000}
9892 }, mem_tbl_5906[] = {
9893 { 0x00000200, 0x00008},
9894 { 0x00004000, 0x00400},
9895 { 0x00006000, 0x00400},
9896 { 0x00008000, 0x01000},
9897 { 0x00010000, 0x01000},
9898 { 0xffffffff, 0x00000}
9900 struct mem_entry *mem_tbl;
9904 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9905 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9906 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9907 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9908 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9909 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9910 mem_tbl = mem_tbl_5755;
9911 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9912 mem_tbl = mem_tbl_5906;
9914 mem_tbl = mem_tbl_5705;
9916 mem_tbl = mem_tbl_570x;
9918 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9919 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9920 mem_tbl[i].len)) != 0)
9927 #define TG3_MAC_LOOPBACK 0
9928 #define TG3_PHY_LOOPBACK 1
9930 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9932 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9934 struct sk_buff *skb, *rx_skb;
9937 int num_pkts, tx_len, rx_len, i, err;
9938 struct tg3_rx_buffer_desc *desc;
9940 if (loopback_mode == TG3_MAC_LOOPBACK) {
9941 /* HW errata - mac loopback fails in some cases on 5780.
9942 * Normal traffic and PHY loopback are not affected by
9945 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9948 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9949 MAC_MODE_PORT_INT_LPBACK;
9950 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9951 mac_mode |= MAC_MODE_LINK_POLARITY;
9952 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9953 mac_mode |= MAC_MODE_PORT_MODE_MII;
9955 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9956 tw32(MAC_MODE, mac_mode);
9957 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9960 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9963 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9966 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9967 phytest | MII_TG3_EPHY_SHADOW_EN);
9968 if (!tg3_readphy(tp, 0x1b, &phy))
9969 tg3_writephy(tp, 0x1b, phy & ~0x20);
9970 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9972 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9974 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9976 tg3_phy_toggle_automdix(tp, 0);
9978 tg3_writephy(tp, MII_BMCR, val);
9981 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9982 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9983 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9984 mac_mode |= MAC_MODE_PORT_MODE_MII;
9986 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9988 /* reset to prevent losing 1st rx packet intermittently */
9989 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9990 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9992 tw32_f(MAC_RX_MODE, tp->rx_mode);
9994 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9995 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9996 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9997 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9998 mac_mode |= MAC_MODE_LINK_POLARITY;
9999 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10000 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10002 tw32(MAC_MODE, mac_mode);
10010 skb = netdev_alloc_skb(tp->dev, tx_len);
10014 tx_data = skb_put(skb, tx_len);
10015 memcpy(tx_data, tp->dev->dev_addr, 6);
10016 memset(tx_data + 6, 0x0, 8);
10018 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10020 for (i = 14; i < tx_len; i++)
10021 tx_data[i] = (u8) (i & 0xff);
10023 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10025 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10030 rx_start_idx = tp->hw_status->idx[0].rx_producer;
10034 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
10039 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10041 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
10045 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
10046 for (i = 0; i < 25; i++) {
10047 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10052 tx_idx = tp->hw_status->idx[0].tx_consumer;
10053 rx_idx = tp->hw_status->idx[0].rx_producer;
10054 if ((tx_idx == tp->tx_prod) &&
10055 (rx_idx == (rx_start_idx + num_pkts)))
10059 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10060 dev_kfree_skb(skb);
10062 if (tx_idx != tp->tx_prod)
10065 if (rx_idx != rx_start_idx + num_pkts)
10068 desc = &tp->rx_rcb[rx_start_idx];
10069 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10070 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10071 if (opaque_key != RXD_OPAQUE_RING_STD)
10074 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10075 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10078 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10079 if (rx_len != tx_len)
10082 rx_skb = tp->rx_std_buffers[desc_idx].skb;
10084 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10085 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10087 for (i = 14; i < tx_len; i++) {
10088 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10093 /* tg3_free_rings will unmap and free the rx_skb */
10098 #define TG3_MAC_LOOPBACK_FAILED 1
10099 #define TG3_PHY_LOOPBACK_FAILED 2
10100 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10101 TG3_PHY_LOOPBACK_FAILED)
10103 static int tg3_test_loopback(struct tg3 *tp)
10108 if (!netif_running(tp->dev))
10109 return TG3_LOOPBACK_FAILED;
10111 err = tg3_reset_hw(tp, 1);
10113 return TG3_LOOPBACK_FAILED;
10115 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10116 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10117 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10121 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10123 /* Wait for up to 40 microseconds to acquire lock. */
10124 for (i = 0; i < 4; i++) {
10125 status = tr32(TG3_CPMU_MUTEX_GNT);
10126 if (status == CPMU_MUTEX_GNT_DRIVER)
10131 if (status != CPMU_MUTEX_GNT_DRIVER)
10132 return TG3_LOOPBACK_FAILED;
10134 /* Turn off link-based power management. */
10135 cpmuctrl = tr32(TG3_CPMU_CTRL);
10136 tw32(TG3_CPMU_CTRL,
10137 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10138 CPMU_CTRL_LINK_AWARE_MODE));
10141 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10142 err |= TG3_MAC_LOOPBACK_FAILED;
10144 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10145 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10146 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10147 tw32(TG3_CPMU_CTRL, cpmuctrl);
10149 /* Release the mutex */
10150 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10153 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10154 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10155 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10156 err |= TG3_PHY_LOOPBACK_FAILED;
10162 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10165 struct tg3 *tp = netdev_priv(dev);
10167 if (tp->link_config.phy_is_low_power)
10168 tg3_set_power_state(tp, PCI_D0);
10170 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10172 if (tg3_test_nvram(tp) != 0) {
10173 etest->flags |= ETH_TEST_FL_FAILED;
10176 if (tg3_test_link(tp) != 0) {
10177 etest->flags |= ETH_TEST_FL_FAILED;
10180 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10181 int err, err2 = 0, irq_sync = 0;
10183 if (netif_running(dev)) {
10185 tg3_netif_stop(tp);
10189 tg3_full_lock(tp, irq_sync);
10191 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10192 err = tg3_nvram_lock(tp);
10193 tg3_halt_cpu(tp, RX_CPU_BASE);
10194 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10195 tg3_halt_cpu(tp, TX_CPU_BASE);
10197 tg3_nvram_unlock(tp);
10199 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10202 if (tg3_test_registers(tp) != 0) {
10203 etest->flags |= ETH_TEST_FL_FAILED;
10206 if (tg3_test_memory(tp) != 0) {
10207 etest->flags |= ETH_TEST_FL_FAILED;
10210 if ((data[4] = tg3_test_loopback(tp)) != 0)
10211 etest->flags |= ETH_TEST_FL_FAILED;
10213 tg3_full_unlock(tp);
10215 if (tg3_test_interrupt(tp) != 0) {
10216 etest->flags |= ETH_TEST_FL_FAILED;
10220 tg3_full_lock(tp, 0);
10222 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10223 if (netif_running(dev)) {
10224 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10225 err2 = tg3_restart_hw(tp, 1);
10227 tg3_netif_start(tp);
10230 tg3_full_unlock(tp);
10232 if (irq_sync && !err2)
10235 if (tp->link_config.phy_is_low_power)
10236 tg3_set_power_state(tp, PCI_D3hot);
10240 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10242 struct mii_ioctl_data *data = if_mii(ifr);
10243 struct tg3 *tp = netdev_priv(dev);
10246 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10247 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10249 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
10254 data->phy_id = PHY_ADDR;
10257 case SIOCGMIIREG: {
10260 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10261 break; /* We have no PHY */
10263 if (tp->link_config.phy_is_low_power)
10266 spin_lock_bh(&tp->lock);
10267 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10268 spin_unlock_bh(&tp->lock);
10270 data->val_out = mii_regval;
10276 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10277 break; /* We have no PHY */
10279 if (!capable(CAP_NET_ADMIN))
10282 if (tp->link_config.phy_is_low_power)
10285 spin_lock_bh(&tp->lock);
10286 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10287 spin_unlock_bh(&tp->lock);
10295 return -EOPNOTSUPP;
10298 #if TG3_VLAN_TAG_USED
10299 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10301 struct tg3 *tp = netdev_priv(dev);
10303 if (netif_running(dev))
10304 tg3_netif_stop(tp);
10306 tg3_full_lock(tp, 0);
10310 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10311 __tg3_set_rx_mode(dev);
10313 if (netif_running(dev))
10314 tg3_netif_start(tp);
10316 tg3_full_unlock(tp);
10320 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10322 struct tg3 *tp = netdev_priv(dev);
10324 memcpy(ec, &tp->coal, sizeof(*ec));
10328 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10330 struct tg3 *tp = netdev_priv(dev);
10331 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10332 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10334 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10335 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10336 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10337 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10338 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10341 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10342 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10343 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10344 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10345 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10346 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10347 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10348 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10349 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10350 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10353 /* No rx interrupts will be generated if both are zero */
10354 if ((ec->rx_coalesce_usecs == 0) &&
10355 (ec->rx_max_coalesced_frames == 0))
10358 /* No tx interrupts will be generated if both are zero */
10359 if ((ec->tx_coalesce_usecs == 0) &&
10360 (ec->tx_max_coalesced_frames == 0))
10363 /* Only copy relevant parameters, ignore all others. */
10364 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10365 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10366 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10367 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10368 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10369 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10370 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10371 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10372 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10374 if (netif_running(dev)) {
10375 tg3_full_lock(tp, 0);
10376 __tg3_set_coalesce(tp, &tp->coal);
10377 tg3_full_unlock(tp);
10382 static const struct ethtool_ops tg3_ethtool_ops = {
10383 .get_settings = tg3_get_settings,
10384 .set_settings = tg3_set_settings,
10385 .get_drvinfo = tg3_get_drvinfo,
10386 .get_regs_len = tg3_get_regs_len,
10387 .get_regs = tg3_get_regs,
10388 .get_wol = tg3_get_wol,
10389 .set_wol = tg3_set_wol,
10390 .get_msglevel = tg3_get_msglevel,
10391 .set_msglevel = tg3_set_msglevel,
10392 .nway_reset = tg3_nway_reset,
10393 .get_link = ethtool_op_get_link,
10394 .get_eeprom_len = tg3_get_eeprom_len,
10395 .get_eeprom = tg3_get_eeprom,
10396 .set_eeprom = tg3_set_eeprom,
10397 .get_ringparam = tg3_get_ringparam,
10398 .set_ringparam = tg3_set_ringparam,
10399 .get_pauseparam = tg3_get_pauseparam,
10400 .set_pauseparam = tg3_set_pauseparam,
10401 .get_rx_csum = tg3_get_rx_csum,
10402 .set_rx_csum = tg3_set_rx_csum,
10403 .set_tx_csum = tg3_set_tx_csum,
10404 .set_sg = ethtool_op_set_sg,
10405 .set_tso = tg3_set_tso,
10406 .self_test = tg3_self_test,
10407 .get_strings = tg3_get_strings,
10408 .phys_id = tg3_phys_id,
10409 .get_ethtool_stats = tg3_get_ethtool_stats,
10410 .get_coalesce = tg3_get_coalesce,
10411 .set_coalesce = tg3_set_coalesce,
10412 .get_sset_count = tg3_get_sset_count,
10415 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10417 u32 cursize, val, magic;
10419 tp->nvram_size = EEPROM_CHIP_SIZE;
10421 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
10424 if ((magic != TG3_EEPROM_MAGIC) &&
10425 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10426 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10430 * Size the chip by reading offsets at increasing powers of two.
10431 * When we encounter our validation signature, we know the addressing
10432 * has wrapped around, and thus have our chip size.
10436 while (cursize < tp->nvram_size) {
10437 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
10446 tp->nvram_size = cursize;
10449 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10453 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
10456 /* Selfboot format */
10457 if (val != TG3_EEPROM_MAGIC) {
10458 tg3_get_eeprom_size(tp);
10462 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10464 tp->nvram_size = (val >> 16) * 1024;
10468 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10471 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10475 nvcfg1 = tr32(NVRAM_CFG1);
10476 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10477 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10480 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10481 tw32(NVRAM_CFG1, nvcfg1);
10484 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10485 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10486 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10487 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10488 tp->nvram_jedecnum = JEDEC_ATMEL;
10489 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10490 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10492 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10493 tp->nvram_jedecnum = JEDEC_ATMEL;
10494 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10496 case FLASH_VENDOR_ATMEL_EEPROM:
10497 tp->nvram_jedecnum = JEDEC_ATMEL;
10498 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10499 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10501 case FLASH_VENDOR_ST:
10502 tp->nvram_jedecnum = JEDEC_ST;
10503 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10504 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10506 case FLASH_VENDOR_SAIFUN:
10507 tp->nvram_jedecnum = JEDEC_SAIFUN;
10508 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10510 case FLASH_VENDOR_SST_SMALL:
10511 case FLASH_VENDOR_SST_LARGE:
10512 tp->nvram_jedecnum = JEDEC_SST;
10513 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10518 tp->nvram_jedecnum = JEDEC_ATMEL;
10519 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10520 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10524 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10528 nvcfg1 = tr32(NVRAM_CFG1);
10530 /* NVRAM protection for TPM */
10531 if (nvcfg1 & (1 << 27))
10532 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10534 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10535 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10536 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10537 tp->nvram_jedecnum = JEDEC_ATMEL;
10538 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10540 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10541 tp->nvram_jedecnum = JEDEC_ATMEL;
10542 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10543 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10545 case FLASH_5752VENDOR_ST_M45PE10:
10546 case FLASH_5752VENDOR_ST_M45PE20:
10547 case FLASH_5752VENDOR_ST_M45PE40:
10548 tp->nvram_jedecnum = JEDEC_ST;
10549 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10550 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10554 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10555 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10556 case FLASH_5752PAGE_SIZE_256:
10557 tp->nvram_pagesize = 256;
10559 case FLASH_5752PAGE_SIZE_512:
10560 tp->nvram_pagesize = 512;
10562 case FLASH_5752PAGE_SIZE_1K:
10563 tp->nvram_pagesize = 1024;
10565 case FLASH_5752PAGE_SIZE_2K:
10566 tp->nvram_pagesize = 2048;
10568 case FLASH_5752PAGE_SIZE_4K:
10569 tp->nvram_pagesize = 4096;
10571 case FLASH_5752PAGE_SIZE_264:
10572 tp->nvram_pagesize = 264;
10577 /* For eeprom, set pagesize to maximum eeprom size */
10578 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10580 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10581 tw32(NVRAM_CFG1, nvcfg1);
10585 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10587 u32 nvcfg1, protect = 0;
10589 nvcfg1 = tr32(NVRAM_CFG1);
10591 /* NVRAM protection for TPM */
10592 if (nvcfg1 & (1 << 27)) {
10593 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10597 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10599 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10600 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10601 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10602 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10603 tp->nvram_jedecnum = JEDEC_ATMEL;
10604 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10605 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10606 tp->nvram_pagesize = 264;
10607 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10608 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10609 tp->nvram_size = (protect ? 0x3e200 :
10610 TG3_NVRAM_SIZE_512KB);
10611 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10612 tp->nvram_size = (protect ? 0x1f200 :
10613 TG3_NVRAM_SIZE_256KB);
10615 tp->nvram_size = (protect ? 0x1f200 :
10616 TG3_NVRAM_SIZE_128KB);
10618 case FLASH_5752VENDOR_ST_M45PE10:
10619 case FLASH_5752VENDOR_ST_M45PE20:
10620 case FLASH_5752VENDOR_ST_M45PE40:
10621 tp->nvram_jedecnum = JEDEC_ST;
10622 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10623 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10624 tp->nvram_pagesize = 256;
10625 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10626 tp->nvram_size = (protect ?
10627 TG3_NVRAM_SIZE_64KB :
10628 TG3_NVRAM_SIZE_128KB);
10629 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10630 tp->nvram_size = (protect ?
10631 TG3_NVRAM_SIZE_64KB :
10632 TG3_NVRAM_SIZE_256KB);
10634 tp->nvram_size = (protect ?
10635 TG3_NVRAM_SIZE_128KB :
10636 TG3_NVRAM_SIZE_512KB);
10641 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10645 nvcfg1 = tr32(NVRAM_CFG1);
10647 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10648 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10649 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10650 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10651 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10652 tp->nvram_jedecnum = JEDEC_ATMEL;
10653 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10654 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10656 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10657 tw32(NVRAM_CFG1, nvcfg1);
10659 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10660 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10661 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10662 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10663 tp->nvram_jedecnum = JEDEC_ATMEL;
10664 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10665 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10666 tp->nvram_pagesize = 264;
10668 case FLASH_5752VENDOR_ST_M45PE10:
10669 case FLASH_5752VENDOR_ST_M45PE20:
10670 case FLASH_5752VENDOR_ST_M45PE40:
10671 tp->nvram_jedecnum = JEDEC_ST;
10672 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10673 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10674 tp->nvram_pagesize = 256;
10679 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10681 u32 nvcfg1, protect = 0;
10683 nvcfg1 = tr32(NVRAM_CFG1);
10685 /* NVRAM protection for TPM */
10686 if (nvcfg1 & (1 << 27)) {
10687 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10691 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10693 case FLASH_5761VENDOR_ATMEL_ADB021D:
10694 case FLASH_5761VENDOR_ATMEL_ADB041D:
10695 case FLASH_5761VENDOR_ATMEL_ADB081D:
10696 case FLASH_5761VENDOR_ATMEL_ADB161D:
10697 case FLASH_5761VENDOR_ATMEL_MDB021D:
10698 case FLASH_5761VENDOR_ATMEL_MDB041D:
10699 case FLASH_5761VENDOR_ATMEL_MDB081D:
10700 case FLASH_5761VENDOR_ATMEL_MDB161D:
10701 tp->nvram_jedecnum = JEDEC_ATMEL;
10702 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10703 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10704 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10705 tp->nvram_pagesize = 256;
10707 case FLASH_5761VENDOR_ST_A_M45PE20:
10708 case FLASH_5761VENDOR_ST_A_M45PE40:
10709 case FLASH_5761VENDOR_ST_A_M45PE80:
10710 case FLASH_5761VENDOR_ST_A_M45PE16:
10711 case FLASH_5761VENDOR_ST_M_M45PE20:
10712 case FLASH_5761VENDOR_ST_M_M45PE40:
10713 case FLASH_5761VENDOR_ST_M_M45PE80:
10714 case FLASH_5761VENDOR_ST_M_M45PE16:
10715 tp->nvram_jedecnum = JEDEC_ST;
10716 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10717 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10718 tp->nvram_pagesize = 256;
10723 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10726 case FLASH_5761VENDOR_ATMEL_ADB161D:
10727 case FLASH_5761VENDOR_ATMEL_MDB161D:
10728 case FLASH_5761VENDOR_ST_A_M45PE16:
10729 case FLASH_5761VENDOR_ST_M_M45PE16:
10730 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10732 case FLASH_5761VENDOR_ATMEL_ADB081D:
10733 case FLASH_5761VENDOR_ATMEL_MDB081D:
10734 case FLASH_5761VENDOR_ST_A_M45PE80:
10735 case FLASH_5761VENDOR_ST_M_M45PE80:
10736 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10738 case FLASH_5761VENDOR_ATMEL_ADB041D:
10739 case FLASH_5761VENDOR_ATMEL_MDB041D:
10740 case FLASH_5761VENDOR_ST_A_M45PE40:
10741 case FLASH_5761VENDOR_ST_M_M45PE40:
10742 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10744 case FLASH_5761VENDOR_ATMEL_ADB021D:
10745 case FLASH_5761VENDOR_ATMEL_MDB021D:
10746 case FLASH_5761VENDOR_ST_A_M45PE20:
10747 case FLASH_5761VENDOR_ST_M_M45PE20:
10748 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10754 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10756 tp->nvram_jedecnum = JEDEC_ATMEL;
10757 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10758 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10761 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10762 static void __devinit tg3_nvram_init(struct tg3 *tp)
10764 tw32_f(GRC_EEPROM_ADDR,
10765 (EEPROM_ADDR_FSM_RESET |
10766 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10767 EEPROM_ADDR_CLKPERD_SHIFT)));
10771 /* Enable seeprom accesses. */
10772 tw32_f(GRC_LOCAL_CTRL,
10773 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10776 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10777 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10778 tp->tg3_flags |= TG3_FLAG_NVRAM;
10780 if (tg3_nvram_lock(tp)) {
10781 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10782 "tg3_nvram_init failed.\n", tp->dev->name);
10785 tg3_enable_nvram_access(tp);
10787 tp->nvram_size = 0;
10789 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10790 tg3_get_5752_nvram_info(tp);
10791 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10792 tg3_get_5755_nvram_info(tp);
10793 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10794 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10795 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10796 tg3_get_5787_nvram_info(tp);
10797 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10798 tg3_get_5761_nvram_info(tp);
10799 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10800 tg3_get_5906_nvram_info(tp);
10802 tg3_get_nvram_info(tp);
10804 if (tp->nvram_size == 0)
10805 tg3_get_nvram_size(tp);
10807 tg3_disable_nvram_access(tp);
10808 tg3_nvram_unlock(tp);
10811 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10813 tg3_get_eeprom_size(tp);
10817 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10818 u32 offset, u32 *val)
10823 if (offset > EEPROM_ADDR_ADDR_MASK ||
10827 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10828 EEPROM_ADDR_DEVID_MASK |
10830 tw32(GRC_EEPROM_ADDR,
10832 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10833 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10834 EEPROM_ADDR_ADDR_MASK) |
10835 EEPROM_ADDR_READ | EEPROM_ADDR_START);
10837 for (i = 0; i < 1000; i++) {
10838 tmp = tr32(GRC_EEPROM_ADDR);
10840 if (tmp & EEPROM_ADDR_COMPLETE)
10844 if (!(tmp & EEPROM_ADDR_COMPLETE))
10847 *val = tr32(GRC_EEPROM_DATA);
10851 #define NVRAM_CMD_TIMEOUT 10000
10853 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10857 tw32(NVRAM_CMD, nvram_cmd);
10858 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10860 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10865 if (i == NVRAM_CMD_TIMEOUT) {
10871 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10873 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10874 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10875 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10876 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10877 (tp->nvram_jedecnum == JEDEC_ATMEL))
10879 addr = ((addr / tp->nvram_pagesize) <<
10880 ATMEL_AT45DB0X1B_PAGE_POS) +
10881 (addr % tp->nvram_pagesize);
10886 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10888 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10889 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10890 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10891 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10892 (tp->nvram_jedecnum == JEDEC_ATMEL))
10894 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10895 tp->nvram_pagesize) +
10896 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10901 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10905 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10906 return tg3_nvram_read_using_eeprom(tp, offset, val);
10908 offset = tg3_nvram_phys_addr(tp, offset);
10910 if (offset > NVRAM_ADDR_MSK)
10913 ret = tg3_nvram_lock(tp);
10917 tg3_enable_nvram_access(tp);
10919 tw32(NVRAM_ADDR, offset);
10920 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10921 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10924 *val = swab32(tr32(NVRAM_RDDATA));
10926 tg3_disable_nvram_access(tp);
10928 tg3_nvram_unlock(tp);
10933 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10936 int res = tg3_nvram_read(tp, offset, &v);
10938 *val = cpu_to_le32(v);
10942 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10947 err = tg3_nvram_read(tp, offset, &tmp);
10948 *val = swab32(tmp);
10952 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10953 u32 offset, u32 len, u8 *buf)
10958 for (i = 0; i < len; i += 4) {
10964 memcpy(&data, buf + i, 4);
10966 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10968 val = tr32(GRC_EEPROM_ADDR);
10969 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10971 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10973 tw32(GRC_EEPROM_ADDR, val |
10974 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10975 (addr & EEPROM_ADDR_ADDR_MASK) |
10976 EEPROM_ADDR_START |
10977 EEPROM_ADDR_WRITE);
10979 for (j = 0; j < 1000; j++) {
10980 val = tr32(GRC_EEPROM_ADDR);
10982 if (val & EEPROM_ADDR_COMPLETE)
10986 if (!(val & EEPROM_ADDR_COMPLETE)) {
10995 /* offset and length are dword aligned */
10996 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11000 u32 pagesize = tp->nvram_pagesize;
11001 u32 pagemask = pagesize - 1;
11005 tmp = kmalloc(pagesize, GFP_KERNEL);
11011 u32 phy_addr, page_off, size;
11013 phy_addr = offset & ~pagemask;
11015 for (j = 0; j < pagesize; j += 4) {
11016 if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
11017 (__le32 *) (tmp + j))))
11023 page_off = offset & pagemask;
11030 memcpy(tmp + page_off, buf, size);
11032 offset = offset + (pagesize - page_off);
11034 tg3_enable_nvram_access(tp);
11037 * Before we can erase the flash page, we need
11038 * to issue a special "write enable" command.
11040 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11042 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11045 /* Erase the target page */
11046 tw32(NVRAM_ADDR, phy_addr);
11048 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11049 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11051 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11054 /* Issue another write enable to start the write. */
11055 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11057 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11060 for (j = 0; j < pagesize; j += 4) {
11063 data = *((__be32 *) (tmp + j));
11064 /* swab32(le32_to_cpu(data)), actually */
11065 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11067 tw32(NVRAM_ADDR, phy_addr + j);
11069 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11073 nvram_cmd |= NVRAM_CMD_FIRST;
11074 else if (j == (pagesize - 4))
11075 nvram_cmd |= NVRAM_CMD_LAST;
11077 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11084 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11085 tg3_nvram_exec_cmd(tp, nvram_cmd);
11092 /* offset and length are dword aligned */
11093 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11098 for (i = 0; i < len; i += 4, offset += 4) {
11099 u32 page_off, phy_addr, nvram_cmd;
11102 memcpy(&data, buf + i, 4);
11103 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11105 page_off = offset % tp->nvram_pagesize;
11107 phy_addr = tg3_nvram_phys_addr(tp, offset);
11109 tw32(NVRAM_ADDR, phy_addr);
11111 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11113 if ((page_off == 0) || (i == 0))
11114 nvram_cmd |= NVRAM_CMD_FIRST;
11115 if (page_off == (tp->nvram_pagesize - 4))
11116 nvram_cmd |= NVRAM_CMD_LAST;
11118 if (i == (len - 4))
11119 nvram_cmd |= NVRAM_CMD_LAST;
11121 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
11122 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
11123 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
11124 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
11125 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
11126 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
11127 (tp->nvram_jedecnum == JEDEC_ST) &&
11128 (nvram_cmd & NVRAM_CMD_FIRST)) {
11130 if ((ret = tg3_nvram_exec_cmd(tp,
11131 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11136 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11137 /* We always do complete word writes to eeprom. */
11138 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11141 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11147 /* offset and length are dword aligned */
11148 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11152 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11153 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11154 ~GRC_LCLCTRL_GPIO_OUTPUT1);
11158 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11159 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11164 ret = tg3_nvram_lock(tp);
11168 tg3_enable_nvram_access(tp);
11169 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11170 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
11171 tw32(NVRAM_WRITE1, 0x406);
11173 grc_mode = tr32(GRC_MODE);
11174 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11176 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11177 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11179 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11183 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11187 grc_mode = tr32(GRC_MODE);
11188 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11190 tg3_disable_nvram_access(tp);
11191 tg3_nvram_unlock(tp);
11194 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11195 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11202 struct subsys_tbl_ent {
11203 u16 subsys_vendor, subsys_devid;
11207 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11208 /* Broadcom boards. */
11209 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11210 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11211 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11212 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
11213 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11214 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11215 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
11216 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11217 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11218 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11219 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11222 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11223 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11224 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
11225 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11226 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11229 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11230 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11231 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11232 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11234 /* Compaq boards. */
11235 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11236 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11237 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
11238 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11239 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11242 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11245 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11249 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11250 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11251 tp->pdev->subsystem_vendor) &&
11252 (subsys_id_to_phy_id[i].subsys_devid ==
11253 tp->pdev->subsystem_device))
11254 return &subsys_id_to_phy_id[i];
11259 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11264 /* On some early chips the SRAM cannot be accessed in D3hot state,
11265 * so need make sure we're in D0.
11267 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11268 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11269 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11272 /* Make sure register accesses (indirect or otherwise)
11273 * will function correctly.
11275 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11276 tp->misc_host_ctrl);
11278 /* The memory arbiter has to be enabled in order for SRAM accesses
11279 * to succeed. Normally on powerup the tg3 chip firmware will make
11280 * sure it is enabled, but other entities such as system netboot
11281 * code might disable it.
11283 val = tr32(MEMARB_MODE);
11284 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11286 tp->phy_id = PHY_ID_INVALID;
11287 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11289 /* Assume an onboard device and WOL capable by default. */
11290 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11292 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11293 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11294 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11295 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11297 val = tr32(VCPU_CFGSHDW);
11298 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11299 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11300 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11301 (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11302 device_may_wakeup(&tp->pdev->dev))
11303 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11307 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11308 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11309 u32 nic_cfg, led_cfg;
11310 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11311 int eeprom_phy_serdes = 0;
11313 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11314 tp->nic_sram_data_cfg = nic_cfg;
11316 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11317 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11318 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11319 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11320 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11321 (ver > 0) && (ver < 0x100))
11322 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11324 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11325 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11327 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11328 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11329 eeprom_phy_serdes = 1;
11331 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11332 if (nic_phy_id != 0) {
11333 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11334 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11336 eeprom_phy_id = (id1 >> 16) << 10;
11337 eeprom_phy_id |= (id2 & 0xfc00) << 16;
11338 eeprom_phy_id |= (id2 & 0x03ff) << 0;
11342 tp->phy_id = eeprom_phy_id;
11343 if (eeprom_phy_serdes) {
11344 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11345 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11347 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11350 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11351 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11352 SHASTA_EXT_LED_MODE_MASK);
11354 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11358 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11359 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11362 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11363 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11366 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11367 tp->led_ctrl = LED_CTRL_MODE_MAC;
11369 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11370 * read on some older 5700/5701 bootcode.
11372 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11374 GET_ASIC_REV(tp->pci_chip_rev_id) ==
11376 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11380 case SHASTA_EXT_LED_SHARED:
11381 tp->led_ctrl = LED_CTRL_MODE_SHARED;
11382 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11383 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11384 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11385 LED_CTRL_MODE_PHY_2);
11388 case SHASTA_EXT_LED_MAC:
11389 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11392 case SHASTA_EXT_LED_COMBO:
11393 tp->led_ctrl = LED_CTRL_MODE_COMBO;
11394 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11395 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11396 LED_CTRL_MODE_PHY_2);
11401 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11402 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11403 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11404 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11406 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11407 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11409 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11410 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11411 if ((tp->pdev->subsystem_vendor ==
11412 PCI_VENDOR_ID_ARIMA) &&
11413 (tp->pdev->subsystem_device == 0x205a ||
11414 tp->pdev->subsystem_device == 0x2063))
11415 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11417 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11418 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11421 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11422 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11423 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11424 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11426 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
11427 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11428 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11429 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11430 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11432 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11433 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE) &&
11434 device_may_wakeup(&tp->pdev->dev))
11435 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11437 if (cfg2 & (1 << 17))
11438 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11440 /* serdes signal pre-emphasis in register 0x590 set by */
11441 /* bootcode if bit 18 is set */
11442 if (cfg2 & (1 << 18))
11443 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11445 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11448 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11449 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11450 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11453 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11454 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11455 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11456 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11457 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11458 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11462 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11467 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11468 tw32(OTP_CTRL, cmd);
11470 /* Wait for up to 1 ms for command to execute. */
11471 for (i = 0; i < 100; i++) {
11472 val = tr32(OTP_STATUS);
11473 if (val & OTP_STATUS_CMD_DONE)
11478 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11481 /* Read the gphy configuration from the OTP region of the chip. The gphy
11482 * configuration is a 32-bit value that straddles the alignment boundary.
11483 * We do two 32-bit reads and then shift and merge the results.
11485 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11487 u32 bhalf_otp, thalf_otp;
11489 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11491 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11494 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11496 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11499 thalf_otp = tr32(OTP_READ_DATA);
11501 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11503 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11506 bhalf_otp = tr32(OTP_READ_DATA);
11508 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11511 static int __devinit tg3_phy_probe(struct tg3 *tp)
11513 u32 hw_phy_id_1, hw_phy_id_2;
11514 u32 hw_phy_id, hw_phy_id_masked;
11517 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11518 return tg3_phy_init(tp);
11520 /* Reading the PHY ID register can conflict with ASF
11521 * firwmare access to the PHY hardware.
11524 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11525 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11526 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11528 /* Now read the physical PHY_ID from the chip and verify
11529 * that it is sane. If it doesn't look good, we fall back
11530 * to either the hard-coded table based PHY_ID and failing
11531 * that the value found in the eeprom area.
11533 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11534 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11536 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11537 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11538 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11540 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11543 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11544 tp->phy_id = hw_phy_id;
11545 if (hw_phy_id_masked == PHY_ID_BCM8002)
11546 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11548 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11550 if (tp->phy_id != PHY_ID_INVALID) {
11551 /* Do nothing, phy ID already set up in
11552 * tg3_get_eeprom_hw_cfg().
11555 struct subsys_tbl_ent *p;
11557 /* No eeprom signature? Try the hardcoded
11558 * subsys device table.
11560 p = lookup_by_subsys(tp);
11564 tp->phy_id = p->phy_id;
11566 tp->phy_id == PHY_ID_BCM8002)
11567 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11571 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11572 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11573 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11574 u32 bmsr, adv_reg, tg3_ctrl, mask;
11576 tg3_readphy(tp, MII_BMSR, &bmsr);
11577 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11578 (bmsr & BMSR_LSTATUS))
11579 goto skip_phy_reset;
11581 err = tg3_phy_reset(tp);
11585 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11586 ADVERTISE_100HALF | ADVERTISE_100FULL |
11587 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11589 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11590 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11591 MII_TG3_CTRL_ADV_1000_FULL);
11592 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11593 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11594 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11595 MII_TG3_CTRL_ENABLE_AS_MASTER);
11598 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11599 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11600 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11601 if (!tg3_copper_is_advertising_all(tp, mask)) {
11602 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11604 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11605 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11607 tg3_writephy(tp, MII_BMCR,
11608 BMCR_ANENABLE | BMCR_ANRESTART);
11610 tg3_phy_set_wirespeed(tp);
11612 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11613 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11614 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11618 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11619 err = tg3_init_5401phy_dsp(tp);
11624 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11625 err = tg3_init_5401phy_dsp(tp);
11628 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11629 tp->link_config.advertising =
11630 (ADVERTISED_1000baseT_Half |
11631 ADVERTISED_1000baseT_Full |
11632 ADVERTISED_Autoneg |
11634 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11635 tp->link_config.advertising &=
11636 ~(ADVERTISED_1000baseT_Half |
11637 ADVERTISED_1000baseT_Full);
11642 static void __devinit tg3_read_partno(struct tg3 *tp)
11644 unsigned char vpd_data[256];
11648 if (tg3_nvram_read_swab(tp, 0x0, &magic))
11649 goto out_not_found;
11651 if (magic == TG3_EEPROM_MAGIC) {
11652 for (i = 0; i < 256; i += 4) {
11655 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11656 goto out_not_found;
11658 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
11659 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
11660 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11661 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11666 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11667 for (i = 0; i < 256; i += 4) {
11672 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11674 while (j++ < 100) {
11675 pci_read_config_word(tp->pdev, vpd_cap +
11676 PCI_VPD_ADDR, &tmp16);
11677 if (tmp16 & 0x8000)
11681 if (!(tmp16 & 0x8000))
11682 goto out_not_found;
11684 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11686 v = cpu_to_le32(tmp);
11687 memcpy(&vpd_data[i], &v, 4);
11691 /* Now parse and find the part number. */
11692 for (i = 0; i < 254; ) {
11693 unsigned char val = vpd_data[i];
11694 unsigned int block_end;
11696 if (val == 0x82 || val == 0x91) {
11699 (vpd_data[i + 2] << 8)));
11704 goto out_not_found;
11706 block_end = (i + 3 +
11708 (vpd_data[i + 2] << 8)));
11711 if (block_end > 256)
11712 goto out_not_found;
11714 while (i < (block_end - 2)) {
11715 if (vpd_data[i + 0] == 'P' &&
11716 vpd_data[i + 1] == 'N') {
11717 int partno_len = vpd_data[i + 2];
11720 if (partno_len > 24 || (partno_len + i) > 256)
11721 goto out_not_found;
11723 memcpy(tp->board_part_number,
11724 &vpd_data[i], partno_len);
11729 i += 3 + vpd_data[i + 2];
11732 /* Part number not found. */
11733 goto out_not_found;
11737 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11738 strcpy(tp->board_part_number, "BCM95906");
11740 strcpy(tp->board_part_number, "none");
11743 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11747 if (tg3_nvram_read_swab(tp, offset, &val) ||
11748 (val & 0xfc000000) != 0x0c000000 ||
11749 tg3_nvram_read_swab(tp, offset + 4, &val) ||
11756 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11758 u32 val, offset, start;
11762 if (tg3_nvram_read_swab(tp, 0, &val))
11765 if (val != TG3_EEPROM_MAGIC)
11768 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11769 tg3_nvram_read_swab(tp, 0x4, &start))
11772 offset = tg3_nvram_logical_addr(tp, offset);
11774 if (!tg3_fw_img_is_valid(tp, offset) ||
11775 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11778 offset = offset + ver_offset - start;
11779 for (i = 0; i < 16; i += 4) {
11781 if (tg3_nvram_read_le(tp, offset + i, &v))
11784 memcpy(tp->fw_ver + i, &v, 4);
11787 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11788 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11791 for (offset = TG3_NVM_DIR_START;
11792 offset < TG3_NVM_DIR_END;
11793 offset += TG3_NVM_DIRENT_SIZE) {
11794 if (tg3_nvram_read_swab(tp, offset, &val))
11797 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11801 if (offset == TG3_NVM_DIR_END)
11804 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11805 start = 0x08000000;
11806 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11809 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11810 !tg3_fw_img_is_valid(tp, offset) ||
11811 tg3_nvram_read_swab(tp, offset + 8, &val))
11814 offset += val - start;
11816 bcnt = strlen(tp->fw_ver);
11818 tp->fw_ver[bcnt++] = ',';
11819 tp->fw_ver[bcnt++] = ' ';
11821 for (i = 0; i < 4; i++) {
11823 if (tg3_nvram_read_le(tp, offset, &v))
11826 offset += sizeof(v);
11828 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11829 memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11833 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11837 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11840 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11842 static int __devinit tg3_get_invariants(struct tg3 *tp)
11844 static struct pci_device_id write_reorder_chipsets[] = {
11845 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11846 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11847 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11848 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11849 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11850 PCI_DEVICE_ID_VIA_8385_0) },
11854 u32 cacheline_sz_reg;
11855 u32 pci_state_reg, grc_misc_cfg;
11860 /* Force memory write invalidate off. If we leave it on,
11861 * then on 5700_BX chips we have to enable a workaround.
11862 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11863 * to match the cacheline size. The Broadcom driver have this
11864 * workaround but turns MWI off all the times so never uses
11865 * it. This seems to suggest that the workaround is insufficient.
11867 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11868 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11869 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11871 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11872 * has the register indirect write enable bit set before
11873 * we try to access any of the MMIO registers. It is also
11874 * critical that the PCI-X hw workaround situation is decided
11875 * before that as well.
11877 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11880 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11881 MISC_HOST_CTRL_CHIPREV_SHIFT);
11882 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11883 u32 prod_id_asic_rev;
11885 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11886 &prod_id_asic_rev);
11887 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11890 /* Wrong chip ID in 5752 A0. This code can be removed later
11891 * as A0 is not in production.
11893 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11894 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11896 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11897 * we need to disable memory and use config. cycles
11898 * only to access all registers. The 5702/03 chips
11899 * can mistakenly decode the special cycles from the
11900 * ICH chipsets as memory write cycles, causing corruption
11901 * of register and memory space. Only certain ICH bridges
11902 * will drive special cycles with non-zero data during the
11903 * address phase which can fall within the 5703's address
11904 * range. This is not an ICH bug as the PCI spec allows
11905 * non-zero address during special cycles. However, only
11906 * these ICH bridges are known to drive non-zero addresses
11907 * during special cycles.
11909 * Since special cycles do not cross PCI bridges, we only
11910 * enable this workaround if the 5703 is on the secondary
11911 * bus of these ICH bridges.
11913 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11914 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11915 static struct tg3_dev_id {
11919 } ich_chipsets[] = {
11920 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11922 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11924 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11926 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11930 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11931 struct pci_dev *bridge = NULL;
11933 while (pci_id->vendor != 0) {
11934 bridge = pci_get_device(pci_id->vendor, pci_id->device,
11940 if (pci_id->rev != PCI_ANY_ID) {
11941 if (bridge->revision > pci_id->rev)
11944 if (bridge->subordinate &&
11945 (bridge->subordinate->number ==
11946 tp->pdev->bus->number)) {
11948 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11949 pci_dev_put(bridge);
11955 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11956 static struct tg3_dev_id {
11959 } bridge_chipsets[] = {
11960 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11961 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11964 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11965 struct pci_dev *bridge = NULL;
11967 while (pci_id->vendor != 0) {
11968 bridge = pci_get_device(pci_id->vendor,
11975 if (bridge->subordinate &&
11976 (bridge->subordinate->number <=
11977 tp->pdev->bus->number) &&
11978 (bridge->subordinate->subordinate >=
11979 tp->pdev->bus->number)) {
11980 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11981 pci_dev_put(bridge);
11987 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11988 * DMA addresses > 40-bit. This bridge may have other additional
11989 * 57xx devices behind it in some 4-port NIC designs for example.
11990 * Any tg3 device found behind the bridge will also need the 40-bit
11993 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11994 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11995 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11996 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11997 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12000 struct pci_dev *bridge = NULL;
12003 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12004 PCI_DEVICE_ID_SERVERWORKS_EPB,
12006 if (bridge && bridge->subordinate &&
12007 (bridge->subordinate->number <=
12008 tp->pdev->bus->number) &&
12009 (bridge->subordinate->subordinate >=
12010 tp->pdev->bus->number)) {
12011 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12012 pci_dev_put(bridge);
12018 /* Initialize misc host control in PCI block. */
12019 tp->misc_host_ctrl |= (misc_ctrl_reg &
12020 MISC_HOST_CTRL_CHIPREV);
12021 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12022 tp->misc_host_ctrl);
12024 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12025 &cacheline_sz_reg);
12027 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
12028 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
12029 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
12030 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
12032 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12033 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12034 tp->pdev_peer = tg3_find_peer(tp);
12036 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12037 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12038 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12039 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12040 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12041 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12042 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12043 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12044 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12045 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12047 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12048 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12049 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12051 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12052 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12053 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12054 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12055 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12056 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12057 tp->pdev_peer == tp->pdev))
12058 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12060 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12061 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12062 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12063 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12064 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12065 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12066 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12067 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12069 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12070 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12072 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12073 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12077 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12078 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12079 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12081 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12082 if (pcie_cap != 0) {
12083 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12085 pcie_set_readrq(tp->pdev, 4096);
12087 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12090 pci_read_config_word(tp->pdev,
12091 pcie_cap + PCI_EXP_LNKCTL,
12093 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
12094 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12098 /* If we have an AMD 762 or VIA K8T800 chipset, write
12099 * reordering to the mailbox registers done by the host
12100 * controller can cause major troubles. We read back from
12101 * every mailbox register write to force the writes to be
12102 * posted to the chip in order.
12104 if (pci_dev_present(write_reorder_chipsets) &&
12105 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12106 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12108 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12109 tp->pci_lat_timer < 64) {
12110 tp->pci_lat_timer = 64;
12112 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
12113 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
12114 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
12115 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
12117 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12121 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12122 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12123 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12124 if (!tp->pcix_cap) {
12125 printk(KERN_ERR PFX "Cannot find PCI-X "
12126 "capability, aborting.\n");
12131 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12134 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
12135 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12137 /* If this is a 5700 BX chipset, and we are in PCI-X
12138 * mode, enable register write workaround.
12140 * The workaround is to use indirect register accesses
12141 * for all chip writes not to mailbox registers.
12143 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12146 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12148 /* The chip can have it's power management PCI config
12149 * space registers clobbered due to this bug.
12150 * So explicitly force the chip into D0 here.
12152 pci_read_config_dword(tp->pdev,
12153 tp->pm_cap + PCI_PM_CTRL,
12155 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12156 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12157 pci_write_config_dword(tp->pdev,
12158 tp->pm_cap + PCI_PM_CTRL,
12161 /* Also, force SERR#/PERR# in PCI command. */
12162 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12163 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12164 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12168 /* 5700 BX chips need to have their TX producer index mailboxes
12169 * written twice to workaround a bug.
12171 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
12172 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12174 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12175 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12176 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12177 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12179 /* Chip-specific fixup from Broadcom driver */
12180 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12181 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12182 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12183 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12186 /* Default fast path register access methods */
12187 tp->read32 = tg3_read32;
12188 tp->write32 = tg3_write32;
12189 tp->read32_mbox = tg3_read32;
12190 tp->write32_mbox = tg3_write32;
12191 tp->write32_tx_mbox = tg3_write32;
12192 tp->write32_rx_mbox = tg3_write32;
12194 /* Various workaround register access methods */
12195 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12196 tp->write32 = tg3_write_indirect_reg32;
12197 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12198 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12199 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12201 * Back to back register writes can cause problems on these
12202 * chips, the workaround is to read back all reg writes
12203 * except those to mailbox regs.
12205 * See tg3_write_indirect_reg32().
12207 tp->write32 = tg3_write_flush_reg32;
12211 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12212 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12213 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12214 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12215 tp->write32_rx_mbox = tg3_write_flush_reg32;
12218 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12219 tp->read32 = tg3_read_indirect_reg32;
12220 tp->write32 = tg3_write_indirect_reg32;
12221 tp->read32_mbox = tg3_read_indirect_mbox;
12222 tp->write32_mbox = tg3_write_indirect_mbox;
12223 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12224 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12229 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12230 pci_cmd &= ~PCI_COMMAND_MEMORY;
12231 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12233 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12234 tp->read32_mbox = tg3_read32_mbox_5906;
12235 tp->write32_mbox = tg3_write32_mbox_5906;
12236 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12237 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12240 if (tp->write32 == tg3_write_indirect_reg32 ||
12241 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12242 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12243 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12244 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12246 /* Get eeprom hw config before calling tg3_set_power_state().
12247 * In particular, the TG3_FLG2_IS_NIC flag must be
12248 * determined before calling tg3_set_power_state() so that
12249 * we know whether or not to switch out of Vaux power.
12250 * When the flag is set, it means that GPIO1 is used for eeprom
12251 * write protect and also implies that it is a LOM where GPIOs
12252 * are not used to switch power.
12254 tg3_get_eeprom_hw_cfg(tp);
12256 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12257 /* Allow reads and writes to the
12258 * APE register and memory space.
12260 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12261 PCISTATE_ALLOW_APE_SHMEM_WR;
12262 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12266 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12267 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12268 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
12269 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12271 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
12272 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
12273 tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
12274 tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
12275 tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
12278 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12279 * GPIO1 driven high will bring 5700's external PHY out of reset.
12280 * It is also used as eeprom write protect on LOMs.
12282 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12283 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12284 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12285 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12286 GRC_LCLCTRL_GPIO_OUTPUT1);
12287 /* Unused GPIO3 must be driven as output on 5752 because there
12288 * are no pull-up resistors on unused GPIO pins.
12290 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12291 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12293 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12294 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12296 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12297 /* Turn off the debug UART. */
12298 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12299 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12300 /* Keep VMain power. */
12301 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12302 GRC_LCLCTRL_GPIO_OUTPUT0;
12305 /* Force the chip into D0. */
12306 err = tg3_set_power_state(tp, PCI_D0);
12308 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12309 pci_name(tp->pdev));
12313 /* 5700 B0 chips do not support checksumming correctly due
12314 * to hardware bugs.
12316 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12317 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12319 /* Derive initial jumbo mode from MTU assigned in
12320 * ether_setup() via the alloc_etherdev() call
12322 if (tp->dev->mtu > ETH_DATA_LEN &&
12323 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12324 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12326 /* Determine WakeOnLan speed to use. */
12327 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12328 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12329 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12330 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12331 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12333 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12336 /* A few boards don't want Ethernet@WireSpeed phy feature */
12337 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12338 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12339 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12340 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12341 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
12342 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12343 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12345 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12346 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12347 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12348 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12349 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12351 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12352 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12353 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12354 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12355 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12356 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12357 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12358 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12359 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12360 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12361 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12362 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
12363 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12366 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12367 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12368 tp->phy_otp = tg3_read_otp_phycfg(tp);
12369 if (tp->phy_otp == 0)
12370 tp->phy_otp = TG3_OTP_DEFAULT;
12373 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12374 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12376 tp->mi_mode = MAC_MI_MODE_BASE;
12378 tp->coalesce_mode = 0;
12379 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12380 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12381 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12383 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12384 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12386 err = tg3_mdio_init(tp);
12390 /* Initialize data/descriptor byte/word swapping. */
12391 val = tr32(GRC_MODE);
12392 val &= GRC_MODE_HOST_STACKUP;
12393 tw32(GRC_MODE, val | tp->grc_mode);
12395 tg3_switch_clocks(tp);
12397 /* Clear this out for sanity. */
12398 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12400 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12402 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12403 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12404 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12406 if (chiprevid == CHIPREV_ID_5701_A0 ||
12407 chiprevid == CHIPREV_ID_5701_B0 ||
12408 chiprevid == CHIPREV_ID_5701_B2 ||
12409 chiprevid == CHIPREV_ID_5701_B5) {
12410 void __iomem *sram_base;
12412 /* Write some dummy words into the SRAM status block
12413 * area, see if it reads back correctly. If the return
12414 * value is bad, force enable the PCIX workaround.
12416 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12418 writel(0x00000000, sram_base);
12419 writel(0x00000000, sram_base + 4);
12420 writel(0xffffffff, sram_base + 4);
12421 if (readl(sram_base) != 0x00000000)
12422 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12427 tg3_nvram_init(tp);
12429 grc_misc_cfg = tr32(GRC_MISC_CFG);
12430 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12432 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12433 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12434 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12435 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12437 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12438 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12439 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12440 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12441 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12442 HOSTCC_MODE_CLRTICK_TXBD);
12444 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12445 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12446 tp->misc_host_ctrl);
12449 /* Preserve the APE MAC_MODE bits */
12450 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12451 tp->mac_mode = tr32(MAC_MODE) |
12452 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12454 tp->mac_mode = TG3_DEF_MAC_MODE;
12456 /* these are limited to 10/100 only */
12457 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12458 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12459 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12460 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12461 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12462 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12463 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12464 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12465 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12466 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12467 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12468 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12469 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12471 err = tg3_phy_probe(tp);
12473 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12474 pci_name(tp->pdev), err);
12475 /* ... but do not return immediately ... */
12479 tg3_read_partno(tp);
12480 tg3_read_fw_ver(tp);
12482 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12483 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12485 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12486 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12488 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12491 /* 5700 {AX,BX} chips have a broken status block link
12492 * change bit implementation, so we must use the
12493 * status register in those cases.
12495 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12496 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12498 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12500 /* The led_ctrl is set during tg3_phy_probe, here we might
12501 * have to force the link status polling mechanism based
12502 * upon subsystem IDs.
12504 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12505 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12506 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12507 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12508 TG3_FLAG_USE_LINKCHG_REG);
12511 /* For all SERDES we poll the MAC status register. */
12512 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12513 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12515 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12517 /* All chips before 5787 can get confused if TX buffers
12518 * straddle the 4GB address boundary in some cases.
12520 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12521 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12522 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12523 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12524 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12525 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12526 tp->dev->hard_start_xmit = tg3_start_xmit;
12528 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
12531 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12532 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12535 tp->rx_std_max_post = TG3_RX_RING_SIZE;
12537 /* Increment the rx prod index on the rx std ring by at most
12538 * 8 for these chips to workaround hw errata.
12540 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12541 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12542 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12543 tp->rx_std_max_post = 8;
12545 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12546 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12547 PCIE_PWR_MGMT_L1_THRESH_MSK;
12552 #ifdef CONFIG_SPARC
12553 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12555 struct net_device *dev = tp->dev;
12556 struct pci_dev *pdev = tp->pdev;
12557 struct device_node *dp = pci_device_to_OF_node(pdev);
12558 const unsigned char *addr;
12561 addr = of_get_property(dp, "local-mac-address", &len);
12562 if (addr && len == 6) {
12563 memcpy(dev->dev_addr, addr, 6);
12564 memcpy(dev->perm_addr, dev->dev_addr, 6);
12570 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12572 struct net_device *dev = tp->dev;
12574 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12575 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12580 static int __devinit tg3_get_device_address(struct tg3 *tp)
12582 struct net_device *dev = tp->dev;
12583 u32 hi, lo, mac_offset;
12586 #ifdef CONFIG_SPARC
12587 if (!tg3_get_macaddr_sparc(tp))
12592 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12593 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12594 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12596 if (tg3_nvram_lock(tp))
12597 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12599 tg3_nvram_unlock(tp);
12601 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12604 /* First try to get it from MAC address mailbox. */
12605 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12606 if ((hi >> 16) == 0x484b) {
12607 dev->dev_addr[0] = (hi >> 8) & 0xff;
12608 dev->dev_addr[1] = (hi >> 0) & 0xff;
12610 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12611 dev->dev_addr[2] = (lo >> 24) & 0xff;
12612 dev->dev_addr[3] = (lo >> 16) & 0xff;
12613 dev->dev_addr[4] = (lo >> 8) & 0xff;
12614 dev->dev_addr[5] = (lo >> 0) & 0xff;
12616 /* Some old bootcode may report a 0 MAC address in SRAM */
12617 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12620 /* Next, try NVRAM. */
12621 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12622 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12623 dev->dev_addr[0] = ((hi >> 16) & 0xff);
12624 dev->dev_addr[1] = ((hi >> 24) & 0xff);
12625 dev->dev_addr[2] = ((lo >> 0) & 0xff);
12626 dev->dev_addr[3] = ((lo >> 8) & 0xff);
12627 dev->dev_addr[4] = ((lo >> 16) & 0xff);
12628 dev->dev_addr[5] = ((lo >> 24) & 0xff);
12630 /* Finally just fetch it out of the MAC control regs. */
12632 hi = tr32(MAC_ADDR_0_HIGH);
12633 lo = tr32(MAC_ADDR_0_LOW);
12635 dev->dev_addr[5] = lo & 0xff;
12636 dev->dev_addr[4] = (lo >> 8) & 0xff;
12637 dev->dev_addr[3] = (lo >> 16) & 0xff;
12638 dev->dev_addr[2] = (lo >> 24) & 0xff;
12639 dev->dev_addr[1] = hi & 0xff;
12640 dev->dev_addr[0] = (hi >> 8) & 0xff;
12644 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12645 #ifdef CONFIG_SPARC
12646 if (!tg3_get_default_macaddr_sparc(tp))
12651 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12655 #define BOUNDARY_SINGLE_CACHELINE 1
12656 #define BOUNDARY_MULTI_CACHELINE 2
12658 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12660 int cacheline_size;
12664 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12666 cacheline_size = 1024;
12668 cacheline_size = (int) byte * 4;
12670 /* On 5703 and later chips, the boundary bits have no
12673 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12674 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12675 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12678 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12679 goal = BOUNDARY_MULTI_CACHELINE;
12681 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12682 goal = BOUNDARY_SINGLE_CACHELINE;
12691 /* PCI controllers on most RISC systems tend to disconnect
12692 * when a device tries to burst across a cache-line boundary.
12693 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12695 * Unfortunately, for PCI-E there are only limited
12696 * write-side controls for this, and thus for reads
12697 * we will still get the disconnects. We'll also waste
12698 * these PCI cycles for both read and write for chips
12699 * other than 5700 and 5701 which do not implement the
12702 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12703 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12704 switch (cacheline_size) {
12709 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12710 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12711 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12713 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12714 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12719 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12720 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12724 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12725 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12728 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12729 switch (cacheline_size) {
12733 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12734 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12735 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12741 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12742 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12746 switch (cacheline_size) {
12748 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12749 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12750 DMA_RWCTRL_WRITE_BNDRY_16);
12755 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12756 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12757 DMA_RWCTRL_WRITE_BNDRY_32);
12762 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12763 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12764 DMA_RWCTRL_WRITE_BNDRY_64);
12769 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12770 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12771 DMA_RWCTRL_WRITE_BNDRY_128);
12776 val |= (DMA_RWCTRL_READ_BNDRY_256 |
12777 DMA_RWCTRL_WRITE_BNDRY_256);
12780 val |= (DMA_RWCTRL_READ_BNDRY_512 |
12781 DMA_RWCTRL_WRITE_BNDRY_512);
12785 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12786 DMA_RWCTRL_WRITE_BNDRY_1024);
12795 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12797 struct tg3_internal_buffer_desc test_desc;
12798 u32 sram_dma_descs;
12801 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12803 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12804 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12805 tw32(RDMAC_STATUS, 0);
12806 tw32(WDMAC_STATUS, 0);
12808 tw32(BUFMGR_MODE, 0);
12809 tw32(FTQ_RESET, 0);
12811 test_desc.addr_hi = ((u64) buf_dma) >> 32;
12812 test_desc.addr_lo = buf_dma & 0xffffffff;
12813 test_desc.nic_mbuf = 0x00002100;
12814 test_desc.len = size;
12817 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12818 * the *second* time the tg3 driver was getting loaded after an
12821 * Broadcom tells me:
12822 * ...the DMA engine is connected to the GRC block and a DMA
12823 * reset may affect the GRC block in some unpredictable way...
12824 * The behavior of resets to individual blocks has not been tested.
12826 * Broadcom noted the GRC reset will also reset all sub-components.
12829 test_desc.cqid_sqid = (13 << 8) | 2;
12831 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12834 test_desc.cqid_sqid = (16 << 8) | 7;
12836 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12839 test_desc.flags = 0x00000005;
12841 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12844 val = *(((u32 *)&test_desc) + i);
12845 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12846 sram_dma_descs + (i * sizeof(u32)));
12847 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12849 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12852 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12854 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12858 for (i = 0; i < 40; i++) {
12862 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12864 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12865 if ((val & 0xffff) == sram_dma_descs) {
12876 #define TEST_BUFFER_SIZE 0x2000
12878 static int __devinit tg3_test_dma(struct tg3 *tp)
12880 dma_addr_t buf_dma;
12881 u32 *buf, saved_dma_rwctrl;
12884 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12890 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12891 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12893 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12895 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12896 /* DMA read watermark not used on PCIE */
12897 tp->dma_rwctrl |= 0x00180000;
12898 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12899 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12900 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12901 tp->dma_rwctrl |= 0x003f0000;
12903 tp->dma_rwctrl |= 0x003f000f;
12905 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12906 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12907 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12908 u32 read_water = 0x7;
12910 /* If the 5704 is behind the EPB bridge, we can
12911 * do the less restrictive ONE_DMA workaround for
12912 * better performance.
12914 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12915 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12916 tp->dma_rwctrl |= 0x8000;
12917 else if (ccval == 0x6 || ccval == 0x7)
12918 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12920 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12922 /* Set bit 23 to enable PCIX hw bug fix */
12924 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12925 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12927 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12928 /* 5780 always in PCIX mode */
12929 tp->dma_rwctrl |= 0x00144000;
12930 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12931 /* 5714 always in PCIX mode */
12932 tp->dma_rwctrl |= 0x00148000;
12934 tp->dma_rwctrl |= 0x001b000f;
12938 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12939 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12940 tp->dma_rwctrl &= 0xfffffff0;
12942 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12943 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12944 /* Remove this if it causes problems for some boards. */
12945 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12947 /* On 5700/5701 chips, we need to set this bit.
12948 * Otherwise the chip will issue cacheline transactions
12949 * to streamable DMA memory with not all the byte
12950 * enables turned on. This is an error on several
12951 * RISC PCI controllers, in particular sparc64.
12953 * On 5703/5704 chips, this bit has been reassigned
12954 * a different meaning. In particular, it is used
12955 * on those chips to enable a PCI-X workaround.
12957 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12960 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12963 /* Unneeded, already done by tg3_get_invariants. */
12964 tg3_switch_clocks(tp);
12968 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12969 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12972 /* It is best to perform DMA test with maximum write burst size
12973 * to expose the 5700/5701 write DMA bug.
12975 saved_dma_rwctrl = tp->dma_rwctrl;
12976 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12977 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12982 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12985 /* Send the buffer to the chip. */
12986 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12988 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12993 /* validate data reached card RAM correctly. */
12994 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12996 tg3_read_mem(tp, 0x2100 + (i*4), &val);
12997 if (le32_to_cpu(val) != p[i]) {
12998 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
12999 /* ret = -ENODEV here? */
13004 /* Now read it back. */
13005 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13007 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13013 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13017 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13018 DMA_RWCTRL_WRITE_BNDRY_16) {
13019 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13020 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13021 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13024 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13030 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13036 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13037 DMA_RWCTRL_WRITE_BNDRY_16) {
13038 static struct pci_device_id dma_wait_state_chipsets[] = {
13039 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13040 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13044 /* DMA test passed without adjusting DMA boundary,
13045 * now look for chipsets that are known to expose the
13046 * DMA bug without failing the test.
13048 if (pci_dev_present(dma_wait_state_chipsets)) {
13049 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13050 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13053 /* Safe to use the calculated DMA boundary. */
13054 tp->dma_rwctrl = saved_dma_rwctrl;
13056 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13060 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13065 static void __devinit tg3_init_link_config(struct tg3 *tp)
13067 tp->link_config.advertising =
13068 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13069 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13070 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13071 ADVERTISED_Autoneg | ADVERTISED_MII);
13072 tp->link_config.speed = SPEED_INVALID;
13073 tp->link_config.duplex = DUPLEX_INVALID;
13074 tp->link_config.autoneg = AUTONEG_ENABLE;
13075 tp->link_config.active_speed = SPEED_INVALID;
13076 tp->link_config.active_duplex = DUPLEX_INVALID;
13077 tp->link_config.phy_is_low_power = 0;
13078 tp->link_config.orig_speed = SPEED_INVALID;
13079 tp->link_config.orig_duplex = DUPLEX_INVALID;
13080 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13083 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13085 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13086 tp->bufmgr_config.mbuf_read_dma_low_water =
13087 DEFAULT_MB_RDMA_LOW_WATER_5705;
13088 tp->bufmgr_config.mbuf_mac_rx_low_water =
13089 DEFAULT_MB_MACRX_LOW_WATER_5705;
13090 tp->bufmgr_config.mbuf_high_water =
13091 DEFAULT_MB_HIGH_WATER_5705;
13092 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13093 tp->bufmgr_config.mbuf_mac_rx_low_water =
13094 DEFAULT_MB_MACRX_LOW_WATER_5906;
13095 tp->bufmgr_config.mbuf_high_water =
13096 DEFAULT_MB_HIGH_WATER_5906;
13099 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13100 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13101 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13102 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13103 tp->bufmgr_config.mbuf_high_water_jumbo =
13104 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13106 tp->bufmgr_config.mbuf_read_dma_low_water =
13107 DEFAULT_MB_RDMA_LOW_WATER;
13108 tp->bufmgr_config.mbuf_mac_rx_low_water =
13109 DEFAULT_MB_MACRX_LOW_WATER;
13110 tp->bufmgr_config.mbuf_high_water =
13111 DEFAULT_MB_HIGH_WATER;
13113 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13114 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13115 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13116 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13117 tp->bufmgr_config.mbuf_high_water_jumbo =
13118 DEFAULT_MB_HIGH_WATER_JUMBO;
13121 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13122 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13125 static char * __devinit tg3_phy_string(struct tg3 *tp)
13127 switch (tp->phy_id & PHY_ID_MASK) {
13128 case PHY_ID_BCM5400: return "5400";
13129 case PHY_ID_BCM5401: return "5401";
13130 case PHY_ID_BCM5411: return "5411";
13131 case PHY_ID_BCM5701: return "5701";
13132 case PHY_ID_BCM5703: return "5703";
13133 case PHY_ID_BCM5704: return "5704";
13134 case PHY_ID_BCM5705: return "5705";
13135 case PHY_ID_BCM5750: return "5750";
13136 case PHY_ID_BCM5752: return "5752";
13137 case PHY_ID_BCM5714: return "5714";
13138 case PHY_ID_BCM5780: return "5780";
13139 case PHY_ID_BCM5755: return "5755";
13140 case PHY_ID_BCM5787: return "5787";
13141 case PHY_ID_BCM5784: return "5784";
13142 case PHY_ID_BCM5756: return "5722/5756";
13143 case PHY_ID_BCM5906: return "5906";
13144 case PHY_ID_BCM5761: return "5761";
13145 case PHY_ID_BCM8002: return "8002/serdes";
13146 case 0: return "serdes";
13147 default: return "unknown";
13151 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13153 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13154 strcpy(str, "PCI Express");
13156 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13157 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13159 strcpy(str, "PCIX:");
13161 if ((clock_ctrl == 7) ||
13162 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13163 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13164 strcat(str, "133MHz");
13165 else if (clock_ctrl == 0)
13166 strcat(str, "33MHz");
13167 else if (clock_ctrl == 2)
13168 strcat(str, "50MHz");
13169 else if (clock_ctrl == 4)
13170 strcat(str, "66MHz");
13171 else if (clock_ctrl == 6)
13172 strcat(str, "100MHz");
13174 strcpy(str, "PCI:");
13175 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13176 strcat(str, "66MHz");
13178 strcat(str, "33MHz");
13180 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13181 strcat(str, ":32-bit");
13183 strcat(str, ":64-bit");
13187 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13189 struct pci_dev *peer;
13190 unsigned int func, devnr = tp->pdev->devfn & ~7;
13192 for (func = 0; func < 8; func++) {
13193 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13194 if (peer && peer != tp->pdev)
13198 /* 5704 can be configured in single-port mode, set peer to
13199 * tp->pdev in that case.
13207 * We don't need to keep the refcount elevated; there's no way
13208 * to remove one half of this device without removing the other
13215 static void __devinit tg3_init_coal(struct tg3 *tp)
13217 struct ethtool_coalesce *ec = &tp->coal;
13219 memset(ec, 0, sizeof(*ec));
13220 ec->cmd = ETHTOOL_GCOALESCE;
13221 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13222 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13223 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13224 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13225 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13226 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13227 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13228 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13229 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13231 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13232 HOSTCC_MODE_CLRTICK_TXBD)) {
13233 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13234 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13235 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13236 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13239 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13240 ec->rx_coalesce_usecs_irq = 0;
13241 ec->tx_coalesce_usecs_irq = 0;
13242 ec->stats_block_coalesce_usecs = 0;
13246 static int __devinit tg3_init_one(struct pci_dev *pdev,
13247 const struct pci_device_id *ent)
13249 static int tg3_version_printed = 0;
13250 resource_size_t tg3reg_base;
13251 unsigned long tg3reg_len;
13252 struct net_device *dev;
13256 u64 dma_mask, persist_dma_mask;
13258 if (tg3_version_printed++ == 0)
13259 printk(KERN_INFO "%s", version);
13261 err = pci_enable_device(pdev);
13263 printk(KERN_ERR PFX "Cannot enable PCI device, "
13268 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
13269 printk(KERN_ERR PFX "Cannot find proper PCI device "
13270 "base address, aborting.\n");
13272 goto err_out_disable_pdev;
13275 err = pci_request_regions(pdev, DRV_MODULE_NAME);
13277 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13279 goto err_out_disable_pdev;
13282 pci_set_master(pdev);
13284 /* Find power-management capability. */
13285 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13287 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13290 goto err_out_free_res;
13293 tg3reg_base = pci_resource_start(pdev, 0);
13294 tg3reg_len = pci_resource_len(pdev, 0);
13296 dev = alloc_etherdev(sizeof(*tp));
13298 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13300 goto err_out_free_res;
13303 SET_NETDEV_DEV(dev, &pdev->dev);
13305 #if TG3_VLAN_TAG_USED
13306 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13307 dev->vlan_rx_register = tg3_vlan_rx_register;
13310 tp = netdev_priv(dev);
13313 tp->pm_cap = pm_cap;
13314 tp->rx_mode = TG3_DEF_RX_MODE;
13315 tp->tx_mode = TG3_DEF_TX_MODE;
13318 tp->msg_enable = tg3_debug;
13320 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13322 /* The word/byte swap controls here control register access byte
13323 * swapping. DMA data byte swapping is controlled in the GRC_MODE
13326 tp->misc_host_ctrl =
13327 MISC_HOST_CTRL_MASK_PCI_INT |
13328 MISC_HOST_CTRL_WORD_SWAP |
13329 MISC_HOST_CTRL_INDIR_ACCESS |
13330 MISC_HOST_CTRL_PCISTATE_RW;
13332 /* The NONFRM (non-frame) byte/word swap controls take effect
13333 * on descriptor entries, anything which isn't packet data.
13335 * The StrongARM chips on the board (one for tx, one for rx)
13336 * are running in big-endian mode.
13338 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13339 GRC_MODE_WSWAP_NONFRM_DATA);
13340 #ifdef __BIG_ENDIAN
13341 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13343 spin_lock_init(&tp->lock);
13344 spin_lock_init(&tp->indirect_lock);
13345 INIT_WORK(&tp->reset_task, tg3_reset_task);
13347 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
13349 printk(KERN_ERR PFX "Cannot map device registers, "
13352 goto err_out_free_dev;
13355 tg3_init_link_config(tp);
13357 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13358 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13359 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13361 dev->open = tg3_open;
13362 dev->stop = tg3_close;
13363 dev->get_stats = tg3_get_stats;
13364 dev->set_multicast_list = tg3_set_rx_mode;
13365 dev->set_mac_address = tg3_set_mac_addr;
13366 dev->do_ioctl = tg3_ioctl;
13367 dev->tx_timeout = tg3_tx_timeout;
13368 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
13369 dev->ethtool_ops = &tg3_ethtool_ops;
13370 dev->watchdog_timeo = TG3_TX_TIMEOUT;
13371 dev->change_mtu = tg3_change_mtu;
13372 dev->irq = pdev->irq;
13373 #ifdef CONFIG_NET_POLL_CONTROLLER
13374 dev->poll_controller = tg3_poll_controller;
13377 err = tg3_get_invariants(tp);
13379 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13381 goto err_out_iounmap;
13384 /* The EPB bridge inside 5714, 5715, and 5780 and any
13385 * device behind the EPB cannot support DMA addresses > 40-bit.
13386 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13387 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13388 * do DMA address check in tg3_start_xmit().
13390 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13391 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13392 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13393 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13394 #ifdef CONFIG_HIGHMEM
13395 dma_mask = DMA_64BIT_MASK;
13398 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13400 /* Configure DMA attributes. */
13401 if (dma_mask > DMA_32BIT_MASK) {
13402 err = pci_set_dma_mask(pdev, dma_mask);
13404 dev->features |= NETIF_F_HIGHDMA;
13405 err = pci_set_consistent_dma_mask(pdev,
13408 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13409 "DMA for consistent allocations\n");
13410 goto err_out_iounmap;
13414 if (err || dma_mask == DMA_32BIT_MASK) {
13415 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13417 printk(KERN_ERR PFX "No usable DMA configuration, "
13419 goto err_out_iounmap;
13423 tg3_init_bufmgr_config(tp);
13425 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13426 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13428 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13429 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13430 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13431 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13432 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13433 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13435 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13438 /* TSO is on by default on chips that support hardware TSO.
13439 * Firmware TSO on older chips gives lower performance, so it
13440 * is off by default, but can be enabled using ethtool.
13442 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13443 dev->features |= NETIF_F_TSO;
13444 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13445 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
13446 dev->features |= NETIF_F_TSO6;
13447 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13448 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13449 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13451 dev->features |= NETIF_F_TSO_ECN;
13455 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13456 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13457 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13458 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13459 tp->rx_pending = 63;
13462 err = tg3_get_device_address(tp);
13464 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13466 goto err_out_iounmap;
13469 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13470 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
13471 printk(KERN_ERR PFX "Cannot find proper PCI device "
13472 "base address for APE, aborting.\n");
13474 goto err_out_iounmap;
13477 tg3reg_base = pci_resource_start(pdev, 2);
13478 tg3reg_len = pci_resource_len(pdev, 2);
13480 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
13481 if (!tp->aperegs) {
13482 printk(KERN_ERR PFX "Cannot map APE registers, "
13485 goto err_out_iounmap;
13488 tg3_ape_lock_init(tp);
13492 * Reset chip in case UNDI or EFI driver did not shutdown
13493 * DMA self test will enable WDMAC and we'll see (spurious)
13494 * pending DMA on the PCI bus at that point.
13496 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13497 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13498 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13499 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13502 err = tg3_test_dma(tp);
13504 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13505 goto err_out_apeunmap;
13508 /* Tigon3 can do ipv4 only... and some chips have buggy
13511 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13512 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13513 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13514 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13515 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13516 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13517 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13518 dev->features |= NETIF_F_IPV6_CSUM;
13520 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13522 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13524 /* flow control autonegotiation is default behavior */
13525 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13526 tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
13530 pci_set_drvdata(pdev, dev);
13532 err = register_netdev(dev);
13534 printk(KERN_ERR PFX "Cannot register net device, "
13536 goto err_out_apeunmap;
13539 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
13540 "(%s) %s Ethernet %pM\n",
13542 tp->board_part_number,
13543 tp->pci_chip_rev_id,
13544 tg3_phy_string(tp),
13545 tg3_bus_string(tp, str),
13546 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13547 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13548 "10/100/1000Base-T")),
13551 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
13552 "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
13554 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13555 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13556 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13557 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13558 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
13559 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13560 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13561 dev->name, tp->dma_rwctrl,
13562 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13563 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13569 iounmap(tp->aperegs);
13570 tp->aperegs = NULL;
13583 pci_release_regions(pdev);
13585 err_out_disable_pdev:
13586 pci_disable_device(pdev);
13587 pci_set_drvdata(pdev, NULL);
13591 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13593 struct net_device *dev = pci_get_drvdata(pdev);
13596 struct tg3 *tp = netdev_priv(dev);
13598 flush_scheduled_work();
13600 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13605 unregister_netdev(dev);
13607 iounmap(tp->aperegs);
13608 tp->aperegs = NULL;
13615 pci_release_regions(pdev);
13616 pci_disable_device(pdev);
13617 pci_set_drvdata(pdev, NULL);
13621 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13623 struct net_device *dev = pci_get_drvdata(pdev);
13624 struct tg3 *tp = netdev_priv(dev);
13625 pci_power_t target_state;
13628 /* PCI register 4 needs to be saved whether netif_running() or not.
13629 * MSI address and data need to be saved if using MSI and
13632 pci_save_state(pdev);
13634 if (!netif_running(dev))
13637 flush_scheduled_work();
13639 tg3_netif_stop(tp);
13641 del_timer_sync(&tp->timer);
13643 tg3_full_lock(tp, 1);
13644 tg3_disable_ints(tp);
13645 tg3_full_unlock(tp);
13647 netif_device_detach(dev);
13649 tg3_full_lock(tp, 0);
13650 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13651 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13652 tg3_full_unlock(tp);
13654 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13656 err = tg3_set_power_state(tp, target_state);
13660 tg3_full_lock(tp, 0);
13662 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13663 err2 = tg3_restart_hw(tp, 1);
13667 tp->timer.expires = jiffies + tp->timer_offset;
13668 add_timer(&tp->timer);
13670 netif_device_attach(dev);
13671 tg3_netif_start(tp);
13674 tg3_full_unlock(tp);
13683 static int tg3_resume(struct pci_dev *pdev)
13685 struct net_device *dev = pci_get_drvdata(pdev);
13686 struct tg3 *tp = netdev_priv(dev);
13689 pci_restore_state(tp->pdev);
13691 if (!netif_running(dev))
13694 err = tg3_set_power_state(tp, PCI_D0);
13698 netif_device_attach(dev);
13700 tg3_full_lock(tp, 0);
13702 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13703 err = tg3_restart_hw(tp, 1);
13707 tp->timer.expires = jiffies + tp->timer_offset;
13708 add_timer(&tp->timer);
13710 tg3_netif_start(tp);
13713 tg3_full_unlock(tp);
13721 static struct pci_driver tg3_driver = {
13722 .name = DRV_MODULE_NAME,
13723 .id_table = tg3_pci_tbl,
13724 .probe = tg3_init_one,
13725 .remove = __devexit_p(tg3_remove_one),
13726 .suspend = tg3_suspend,
13727 .resume = tg3_resume
13730 static int __init tg3_init(void)
13732 return pci_register_driver(&tg3_driver);
13735 static void __exit tg3_cleanup(void)
13737 pci_unregister_driver(&tg3_driver);
13740 module_init(tg3_init);
13741 module_exit(tg3_cleanup);