2 * ipg.c: Device Driver for the IP1000 Gigabit Ethernet Adapter
4 * Copyright (C) 2003, 2007 IC Plus Corp
9 * Sundance Technology, Inc.
11 * craig_rich@sundanceti.com
16 * http://www.icplus.com.tw
17 * sorbica@icplus.com.tw
20 * http://www.icplus.com.tw
23 #include <linux/crc32.h>
24 #include <linux/ethtool.h>
25 #include <linux/mii.h>
26 #include <linux/mutex.h>
28 #include <asm/div64.h>
30 #define IPG_RX_RING_BYTES (sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH)
31 #define IPG_TX_RING_BYTES (sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH)
32 #define IPG_RESET_MASK \
33 (IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \
34 IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \
37 #define ipg_w32(val32,reg) iowrite32((val32), ioaddr + (reg))
38 #define ipg_w16(val16,reg) iowrite16((val16), ioaddr + (reg))
39 #define ipg_w8(val8,reg) iowrite8((val8), ioaddr + (reg))
41 #define ipg_r32(reg) ioread32(ioaddr + (reg))
42 #define ipg_r16(reg) ioread16(ioaddr + (reg))
43 #define ipg_r8(reg) ioread8(ioaddr + (reg))
45 #define JUMBO_FRAME_4k_ONLY
51 #define DRV_NAME "ipg"
53 MODULE_AUTHOR("IC Plus Corp. 2003");
54 MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver");
55 MODULE_LICENSE("GPL");
57 //variable record -- index by leading revision/length
58 //Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN
59 static unsigned short DefaultPhyParam[] = {
60 // 11/12/03 IP1000A v1-3 rev=0x40
61 /*--------------------------------------------------------------------------
62 (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2,
63 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6,
64 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700,
65 --------------------------------------------------------------------------*/
66 // 12/17/03 IP1000A v1-4 rev=0x40
67 (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
69 30, 0x005e, 9, 0x0700,
70 // 01/09/04 IP1000A v1-5 rev=0x41
71 (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
73 30, 0x005e, 9, 0x0700,
77 static const char *ipg_brand_name[] = {
78 "IC PLUS IP1000 1000/100/10 based NIC",
79 "Sundance Technology ST2021 based NIC",
80 "Tamarack Microelectronics TC9020/9021 based NIC",
81 "Tamarack Microelectronics TC9020/9021 based NIC",
86 static struct pci_device_id ipg_pci_tbl[] __devinitdata = {
87 { PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
88 { PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
89 { PCI_VDEVICE(SUNDANCE, 0x1021), 2 },
90 { PCI_VDEVICE(DLINK, 0x9021), 3 },
91 { PCI_VDEVICE(DLINK, 0x4000), 4 },
92 { PCI_VDEVICE(DLINK, 0x4020), 5 },
96 MODULE_DEVICE_TABLE(pci, ipg_pci_tbl);
98 static inline void __iomem *ipg_ioaddr(struct net_device *dev)
100 struct ipg_nic_private *sp = netdev_priv(dev);
105 static void ipg_dump_rfdlist(struct net_device *dev)
107 struct ipg_nic_private *sp = netdev_priv(dev);
108 void __iomem *ioaddr = sp->ioaddr;
112 IPG_DEBUG_MSG("_dump_rfdlist\n");
114 printk(KERN_INFO "rx_current = %2.2x\n", sp->rx_current);
115 printk(KERN_INFO "rx_dirty = %2.2x\n", sp->rx_dirty);
116 printk(KERN_INFO "RFDList start address = %16.16lx\n",
117 (unsigned long) sp->rxd_map);
118 printk(KERN_INFO "RFDListPtr register = %8.8x%8.8x\n",
119 ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0));
121 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
122 offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd;
123 printk(KERN_INFO "%2.2x %4.4x RFDNextPtr = %16.16lx\n", i,
124 offset, (unsigned long) sp->rxd[i].next_desc);
125 offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd;
126 printk(KERN_INFO "%2.2x %4.4x RFS = %16.16lx\n", i,
127 offset, (unsigned long) sp->rxd[i].rfs);
128 offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd;
129 printk(KERN_INFO "%2.2x %4.4x frag_info = %16.16lx\n", i,
130 offset, (unsigned long) sp->rxd[i].frag_info);
134 static void ipg_dump_tfdlist(struct net_device *dev)
136 struct ipg_nic_private *sp = netdev_priv(dev);
137 void __iomem *ioaddr = sp->ioaddr;
141 IPG_DEBUG_MSG("_dump_tfdlist\n");
143 printk(KERN_INFO "tx_current = %2.2x\n", sp->tx_current);
144 printk(KERN_INFO "tx_dirty = %2.2x\n", sp->tx_dirty);
145 printk(KERN_INFO "TFDList start address = %16.16lx\n",
146 (unsigned long) sp->txd_map);
147 printk(KERN_INFO "TFDListPtr register = %8.8x%8.8x\n",
148 ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0));
150 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
151 offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd;
152 printk(KERN_INFO "%2.2x %4.4x TFDNextPtr = %16.16lx\n", i,
153 offset, (unsigned long) sp->txd[i].next_desc);
155 offset = (u32) &sp->txd[i].tfc - (u32) sp->txd;
156 printk(KERN_INFO "%2.2x %4.4x TFC = %16.16lx\n", i,
157 offset, (unsigned long) sp->txd[i].tfc);
158 offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd;
159 printk(KERN_INFO "%2.2x %4.4x frag_info = %16.16lx\n", i,
160 offset, (unsigned long) sp->txd[i].frag_info);
165 static void ipg_write_phy_ctl(void __iomem *ioaddr, u8 data)
167 ipg_w8(IPG_PC_RSVD_MASK & data, PHY_CTRL);
168 ndelay(IPG_PC_PHYCTRLWAIT_NS);
171 static void ipg_drive_phy_ctl_low_high(void __iomem *ioaddr, u8 data)
173 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | data);
174 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | data);
177 static void send_three_state(void __iomem *ioaddr, u8 phyctrlpolarity)
179 phyctrlpolarity |= (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR;
181 ipg_drive_phy_ctl_low_high(ioaddr, phyctrlpolarity);
184 static void send_end(void __iomem *ioaddr, u8 phyctrlpolarity)
186 ipg_w8((IPG_PC_MGMTCLK_LO | (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR |
187 phyctrlpolarity) & IPG_PC_RSVD_MASK, PHY_CTRL);
190 static u16 read_phy_bit(void __iomem * ioaddr, u8 phyctrlpolarity)
194 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | phyctrlpolarity);
196 bit_data = ((ipg_r8(PHY_CTRL) & IPG_PC_MGMTDATA) >> 1) & 1;
198 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | phyctrlpolarity);
204 * Read a register from the Physical Layer device located
205 * on the IPG NIC, using the IPG PHYCTRL register.
207 static int mdio_read(struct net_device * dev, int phy_id, int phy_reg)
209 void __iomem *ioaddr = ipg_ioaddr(dev);
211 * The GMII mangement frame structure for a read is as follows:
213 * |Preamble|st|op|phyad|regad|ta| data |idle|
214 * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z |
216 * <32 1s> = 32 consecutive logic 1 values
217 * A = bit of Physical Layer device address (MSB first)
218 * R = bit of register address (MSB first)
219 * z = High impedance state
220 * D = bit of read data (MSB first)
222 * Transmission order is 'Preamble' field first, bits transmitted
223 * left to right (first to last).
229 { GMII_PREAMBLE, 32 }, /* Preamble */
230 { GMII_ST, 2 }, /* ST */
231 { GMII_READ, 2 }, /* OP */
232 { phy_id, 5 }, /* PHYAD */
233 { phy_reg, 5 }, /* REGAD */
234 { 0x0000, 2 }, /* TA */
235 { 0x0000, 16 }, /* DATA */
236 { 0x0000, 1 } /* IDLE */
241 polarity = ipg_r8(PHY_CTRL);
242 polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY);
244 /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
245 for (j = 0; j < 5; j++) {
246 for (i = 0; i < p[j].len; i++) {
247 /* For each variable length field, the MSB must be
248 * transmitted first. Rotate through the field bits,
249 * starting with the MSB, and move each bit into the
250 * the 1st (2^1) bit position (this is the bit position
251 * corresponding to the MgmtData bit of the PhyCtrl
252 * register for the IPG).
256 * First write a '0' to bit 1 of the PhyCtrl
257 * register, then write a '1' to bit 1 of the
260 * To do this, right shift the MSB of ST by the value:
261 * [field length - 1 - #ST bits already written]
262 * then left shift this result by 1.
264 data = (p[j].field >> (p[j].len - 1 - i)) << 1;
265 data &= IPG_PC_MGMTDATA;
266 data |= polarity | IPG_PC_MGMTDIR;
268 ipg_drive_phy_ctl_low_high(ioaddr, data);
272 send_three_state(ioaddr, polarity);
274 read_phy_bit(ioaddr, polarity);
277 * For a read cycle, the bits for the next two fields (TA and
278 * DATA) are driven by the PHY (the IPG reads these bits).
280 for (i = 0; i < p[6].len; i++) {
282 (read_phy_bit(ioaddr, polarity) << (p[6].len - 1 - i));
285 send_three_state(ioaddr, polarity);
286 send_three_state(ioaddr, polarity);
287 send_three_state(ioaddr, polarity);
288 send_end(ioaddr, polarity);
290 /* Return the value of the DATA field. */
295 * Write to a register from the Physical Layer device located
296 * on the IPG NIC, using the IPG PHYCTRL register.
298 static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val)
300 void __iomem *ioaddr = ipg_ioaddr(dev);
302 * The GMII mangement frame structure for a read is as follows:
304 * |Preamble|st|op|phyad|regad|ta| data |idle|
305 * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z |
307 * <32 1s> = 32 consecutive logic 1 values
308 * A = bit of Physical Layer device address (MSB first)
309 * R = bit of register address (MSB first)
310 * z = High impedance state
311 * D = bit of write data (MSB first)
313 * Transmission order is 'Preamble' field first, bits transmitted
314 * left to right (first to last).
320 { GMII_PREAMBLE, 32 }, /* Preamble */
321 { GMII_ST, 2 }, /* ST */
322 { GMII_WRITE, 2 }, /* OP */
323 { phy_id, 5 }, /* PHYAD */
324 { phy_reg, 5 }, /* REGAD */
325 { 0x0002, 2 }, /* TA */
326 { val & 0xffff, 16 }, /* DATA */
327 { 0x0000, 1 } /* IDLE */
332 polarity = ipg_r8(PHY_CTRL);
333 polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY);
335 /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
336 for (j = 0; j < 7; j++) {
337 for (i = 0; i < p[j].len; i++) {
338 /* For each variable length field, the MSB must be
339 * transmitted first. Rotate through the field bits,
340 * starting with the MSB, and move each bit into the
341 * the 1st (2^1) bit position (this is the bit position
342 * corresponding to the MgmtData bit of the PhyCtrl
343 * register for the IPG).
347 * First write a '0' to bit 1 of the PhyCtrl
348 * register, then write a '1' to bit 1 of the
351 * To do this, right shift the MSB of ST by the value:
352 * [field length - 1 - #ST bits already written]
353 * then left shift this result by 1.
355 data = (p[j].field >> (p[j].len - 1 - i)) << 1;
356 data &= IPG_PC_MGMTDATA;
357 data |= polarity | IPG_PC_MGMTDIR;
359 ipg_drive_phy_ctl_low_high(ioaddr, data);
363 /* The last cycle is a tri-state, so read from the PHY. */
364 for (j = 7; j < 8; j++) {
365 for (i = 0; i < p[j].len; i++) {
366 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
368 p[j].field |= ((ipg_r8(PHY_CTRL) &
369 IPG_PC_MGMTDATA) >> 1) << (p[j].len - 1 - i);
371 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
376 static void ipg_set_led_mode(struct net_device *dev)
378 struct ipg_nic_private *sp = netdev_priv(dev);
379 void __iomem *ioaddr = sp->ioaddr;
382 mode = ipg_r32(ASIC_CTRL);
383 mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED);
385 if ((sp->led_mode & 0x03) > 1)
386 mode |= IPG_AC_LED_MODE_BIT_1; /* Write Asic Control Bit 29 */
388 if ((sp->led_mode & 0x01) == 1)
389 mode |= IPG_AC_LED_MODE; /* Write Asic Control Bit 14 */
391 if ((sp->led_mode & 0x08) == 8)
392 mode |= IPG_AC_LED_SPEED; /* Write Asic Control Bit 27 */
394 ipg_w32(mode, ASIC_CTRL);
397 static void ipg_set_phy_set(struct net_device *dev)
399 struct ipg_nic_private *sp = netdev_priv(dev);
400 void __iomem *ioaddr = sp->ioaddr;
403 physet = ipg_r8(PHY_SET);
404 physet &= ~(IPG_PS_MEM_LENB9B | IPG_PS_MEM_LEN9 | IPG_PS_NON_COMPDET);
405 physet |= ((sp->led_mode & 0x70) >> 4);
406 ipg_w8(physet, PHY_SET);
409 static int ipg_reset(struct net_device *dev, u32 resetflags)
411 /* Assert functional resets via the IPG AsicCtrl
412 * register as specified by the 'resetflags' input
415 void __iomem *ioaddr = ipg_ioaddr(dev);
416 unsigned int timeout_count = 0;
418 IPG_DEBUG_MSG("_reset\n");
420 ipg_w32(ipg_r32(ASIC_CTRL) | resetflags, ASIC_CTRL);
422 /* Delay added to account for problem with 10Mbps reset. */
423 mdelay(IPG_AC_RESETWAIT);
425 while (IPG_AC_RESET_BUSY & ipg_r32(ASIC_CTRL)) {
426 mdelay(IPG_AC_RESETWAIT);
427 if (++timeout_count > IPG_AC_RESET_TIMEOUT)
430 /* Set LED Mode in Asic Control */
431 ipg_set_led_mode(dev);
433 /* Set PHYSet Register Value */
434 ipg_set_phy_set(dev);
438 /* Find the GMII PHY address. */
439 static int ipg_find_phyaddr(struct net_device *dev)
441 unsigned int phyaddr, i;
443 for (i = 0; i < 32; i++) {
446 /* Search for the correct PHY address among 32 possible. */
447 phyaddr = (IPG_NIC_PHY_ADDRESS + i) % 32;
449 /* 10/22/03 Grace change verify from GMII_PHY_STATUS to
453 status = mdio_read(dev, phyaddr, MII_BMSR);
455 if ((status != 0xFFFF) && (status != 0))
463 * Configure IPG based on result of IEEE 802.3 PHY
466 static int ipg_config_autoneg(struct net_device *dev)
468 struct ipg_nic_private *sp = netdev_priv(dev);
469 void __iomem *ioaddr = sp->ioaddr;
470 unsigned int txflowcontrol;
471 unsigned int rxflowcontrol;
472 unsigned int fullduplex;
478 IPG_DEBUG_MSG("_config_autoneg\n");
480 asicctrl = ipg_r32(ASIC_CTRL);
481 phyctrl = ipg_r8(PHY_CTRL);
482 mac_ctrl_val = ipg_r32(MAC_CTRL);
484 /* Set flags for use in resolving auto-negotation, assuming
485 * non-1000Mbps, half duplex, no flow control.
492 /* To accomodate a problem in 10Mbps operation,
493 * set a global flag if PHY running in 10Mbps mode.
497 printk(KERN_INFO "%s: Link speed = ", dev->name);
499 /* Determine actual speed of operation. */
500 switch (phyctrl & IPG_PC_LINK_SPEED) {
501 case IPG_PC_LINK_SPEED_10MBPS:
503 printk(KERN_INFO "%s: 10Mbps operational mode enabled.\n",
507 case IPG_PC_LINK_SPEED_100MBPS:
508 printk("100Mbps.\n");
510 case IPG_PC_LINK_SPEED_1000MBPS:
511 printk("1000Mbps.\n");
515 printk("undefined!\n");
519 if (phyctrl & IPG_PC_DUPLEX_STATUS) {
525 /* Configure full duplex, and flow control. */
526 if (fullduplex == 1) {
527 /* Configure IPG for full duplex operation. */
528 printk(KERN_INFO "%s: setting full duplex, ", dev->name);
530 mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD;
532 if (txflowcontrol == 1) {
533 printk("TX flow control");
534 mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE;
536 printk("no TX flow control");
537 mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE;
540 if (rxflowcontrol == 1) {
541 printk(", RX flow control.");
542 mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE;
544 printk(", no RX flow control.");
545 mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE;
550 /* Configure IPG for half duplex operation. */
551 printk(KERN_INFO "%s: setting half duplex, "
552 "no TX flow control, no RX flow control.\n", dev->name);
554 mac_ctrl_val &= ~IPG_MC_DUPLEX_SELECT_FD &
555 ~IPG_MC_TX_FLOW_CONTROL_ENABLE &
556 ~IPG_MC_RX_FLOW_CONTROL_ENABLE;
558 ipg_w32(mac_ctrl_val, MAC_CTRL);
562 /* Determine and configure multicast operation and set
563 * receive mode for IPG.
565 static void ipg_nic_set_multicast_list(struct net_device *dev)
567 void __iomem *ioaddr = ipg_ioaddr(dev);
568 struct dev_mc_list *mc_list_ptr;
569 unsigned int hashindex;
573 IPG_DEBUG_MSG("_nic_set_multicast_list\n");
575 receivemode = IPG_RM_RECEIVEUNICAST | IPG_RM_RECEIVEBROADCAST;
577 if (dev->flags & IFF_PROMISC) {
578 /* NIC to be configured in promiscuous mode. */
579 receivemode = IPG_RM_RECEIVEALLFRAMES;
580 } else if ((dev->flags & IFF_ALLMULTI) ||
581 (dev->flags & IFF_MULTICAST &
582 (dev->mc_count > IPG_MULTICAST_HASHTABLE_SIZE))) {
583 /* NIC to be configured to receive all multicast
585 receivemode |= IPG_RM_RECEIVEMULTICAST;
586 } else if (dev->flags & IFF_MULTICAST & (dev->mc_count > 0)) {
587 /* NIC to be configured to receive selected
588 * multicast addresses. */
589 receivemode |= IPG_RM_RECEIVEMULTICASTHASH;
592 /* Calculate the bits to set for the 64 bit, IPG HASHTABLE.
593 * The IPG applies a cyclic-redundancy-check (the same CRC
594 * used to calculate the frame data FCS) to the destination
595 * address all incoming multicast frames whose destination
596 * address has the multicast bit set. The least significant
597 * 6 bits of the CRC result are used as an addressing index
598 * into the hash table. If the value of the bit addressed by
599 * this index is a 1, the frame is passed to the host system.
602 /* Clear hashtable. */
603 hashtable[0] = 0x00000000;
604 hashtable[1] = 0x00000000;
606 /* Cycle through all multicast addresses to filter. */
607 for (mc_list_ptr = dev->mc_list;
608 mc_list_ptr != NULL; mc_list_ptr = mc_list_ptr->next) {
609 /* Calculate CRC result for each multicast address. */
610 hashindex = crc32_le(0xffffffff, mc_list_ptr->dmi_addr,
613 /* Use only the least significant 6 bits. */
614 hashindex = hashindex & 0x3F;
616 /* Within "hashtable", set bit number "hashindex"
619 set_bit(hashindex, (void *)hashtable);
622 /* Write the value of the hashtable, to the 4, 16 bit
623 * HASHTABLE IPG registers.
625 ipg_w32(hashtable[0], HASHTABLE_0);
626 ipg_w32(hashtable[1], HASHTABLE_1);
628 ipg_w8(IPG_RM_RSVD_MASK & receivemode, RECEIVE_MODE);
630 IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE));
633 static int ipg_io_config(struct net_device *dev)
635 void __iomem *ioaddr = ipg_ioaddr(dev);
639 IPG_DEBUG_MSG("_io_config\n");
641 origmacctrl = ipg_r32(MAC_CTRL);
643 restoremacctrl = origmacctrl | IPG_MC_STATISTICS_ENABLE;
645 /* Based on compilation option, determine if FCS is to be
646 * stripped on receive frames by IPG.
648 if (!IPG_STRIP_FCS_ON_RX)
649 restoremacctrl |= IPG_MC_RCV_FCS;
651 /* Determine if transmitter and/or receiver are
652 * enabled so we may restore MACCTRL correctly.
654 if (origmacctrl & IPG_MC_TX_ENABLED)
655 restoremacctrl |= IPG_MC_TX_ENABLE;
657 if (origmacctrl & IPG_MC_RX_ENABLED)
658 restoremacctrl |= IPG_MC_RX_ENABLE;
660 /* Transmitter and receiver must be disabled before setting
663 ipg_w32((origmacctrl & (IPG_MC_RX_DISABLE | IPG_MC_TX_DISABLE)) &
664 IPG_MC_RSVD_MASK, MAC_CTRL);
666 /* Now that transmitter and receiver are disabled, write
669 ipg_w32((origmacctrl & IPG_MC_IFS_96BIT) & IPG_MC_RSVD_MASK, MAC_CTRL);
671 /* Set RECEIVEMODE register. */
672 ipg_nic_set_multicast_list(dev);
674 ipg_w16(IPG_MAX_RXFRAME_SIZE, MAX_FRAME_SIZE);
676 ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE, RX_DMA_POLL_PERIOD);
677 ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE, RX_DMA_URGENT_THRESH);
678 ipg_w8(IPG_RXDMABURSTTHRESH_VALUE, RX_DMA_BURST_THRESH);
679 ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE, TX_DMA_POLL_PERIOD);
680 ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE, TX_DMA_URGENT_THRESH);
681 ipg_w8(IPG_TXDMABURSTTHRESH_VALUE, TX_DMA_BURST_THRESH);
682 ipg_w16((IPG_IE_HOST_ERROR | IPG_IE_TX_DMA_COMPLETE |
683 IPG_IE_TX_COMPLETE | IPG_IE_INT_REQUESTED |
684 IPG_IE_UPDATE_STATS | IPG_IE_LINK_EVENT |
685 IPG_IE_RX_DMA_COMPLETE | IPG_IE_RX_DMA_PRIORITY), INT_ENABLE);
686 ipg_w16(IPG_FLOWONTHRESH_VALUE, FLOW_ON_THRESH);
687 ipg_w16(IPG_FLOWOFFTHRESH_VALUE, FLOW_OFF_THRESH);
689 /* IPG multi-frag frame bug workaround.
690 * Per silicon revision B3 eratta.
692 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0200, DEBUG_CTRL);
694 /* IPG TX poll now bug workaround.
695 * Per silicon revision B3 eratta.
697 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0010, DEBUG_CTRL);
699 /* IPG RX poll now bug workaround.
700 * Per silicon revision B3 eratta.
702 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0020, DEBUG_CTRL);
704 /* Now restore MACCTRL to original setting. */
705 ipg_w32(IPG_MC_RSVD_MASK & restoremacctrl, MAC_CTRL);
707 /* Disable unused RMON statistics. */
708 ipg_w32(IPG_RZ_ALL, RMON_STATISTICS_MASK);
710 /* Disable unused MIB statistics. */
711 ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD | IPG_SM_MACCONTROLFRAMESRCVD |
712 IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK | IPG_SM_TXJUMBOFRAMES |
713 IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK | IPG_SM_RXJUMBOFRAMES |
714 IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK |
715 IPG_SM_UDPCHECKSUMERRORS | IPG_SM_TCPCHECKSUMERRORS |
716 IPG_SM_IPCHECKSUMERRORS, STATISTICS_MASK);
722 * Create a receive buffer within system memory and update
723 * NIC private structure appropriately.
725 static int ipg_get_rxbuff(struct net_device *dev, int entry)
727 struct ipg_nic_private *sp = netdev_priv(dev);
728 struct ipg_rx *rxfd = sp->rxd + entry;
732 IPG_DEBUG_MSG("_get_rxbuff\n");
734 skb = netdev_alloc_skb(dev, IPG_RXSUPPORT_SIZE + NET_IP_ALIGN);
736 sp->rx_buff[entry] = NULL;
740 /* Adjust the data start location within the buffer to
741 * align IP address field to a 16 byte boundary.
743 skb_reserve(skb, NET_IP_ALIGN);
745 /* Associate the receive buffer with the IPG NIC. */
748 /* Save the address of the sk_buff structure. */
749 sp->rx_buff[entry] = skb;
751 rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
752 sp->rx_buf_sz, PCI_DMA_FROMDEVICE));
754 /* Set the RFD fragment length. */
755 rxfragsize = IPG_RXFRAG_SIZE;
756 rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN);
761 static int init_rfdlist(struct net_device *dev)
763 struct ipg_nic_private *sp = netdev_priv(dev);
764 void __iomem *ioaddr = sp->ioaddr;
767 IPG_DEBUG_MSG("_init_rfdlist\n");
769 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
770 struct ipg_rx *rxfd = sp->rxd + i;
772 if (sp->rx_buff[i]) {
773 pci_unmap_single(sp->pdev,
774 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
775 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
776 dev_kfree_skb_irq(sp->rx_buff[i]);
777 sp->rx_buff[i] = NULL;
780 /* Clear out the RFS field. */
781 rxfd->rfs = 0x0000000000000000;
783 if (ipg_get_rxbuff(dev, i) < 0) {
785 * A receive buffer was not ready, break the
788 IPG_DEBUG_MSG("Cannot allocate Rx buffer.\n");
790 /* Just in case we cannot allocate a single RFD.
794 printk(KERN_ERR "%s: No memory available"
795 " for RFD list.\n", dev->name);
800 rxfd->next_desc = cpu_to_le64(sp->rxd_map +
801 sizeof(struct ipg_rx)*(i + 1));
803 sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map);
808 /* Write the location of the RFDList to the IPG. */
809 ipg_w32((u32) sp->rxd_map, RFD_LIST_PTR_0);
810 ipg_w32(0x00000000, RFD_LIST_PTR_1);
815 static void init_tfdlist(struct net_device *dev)
817 struct ipg_nic_private *sp = netdev_priv(dev);
818 void __iomem *ioaddr = sp->ioaddr;
821 IPG_DEBUG_MSG("_init_tfdlist\n");
823 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
824 struct ipg_tx *txfd = sp->txd + i;
826 txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
828 if (sp->tx_buff[i]) {
829 dev_kfree_skb_irq(sp->tx_buff[i]);
830 sp->tx_buff[i] = NULL;
833 txfd->next_desc = cpu_to_le64(sp->txd_map +
834 sizeof(struct ipg_tx)*(i + 1));
836 sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map);
841 /* Write the location of the TFDList to the IPG. */
842 IPG_DDEBUG_MSG("Starting TFDListPtr = %8.8x\n",
844 ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0);
845 ipg_w32(0x00000000, TFD_LIST_PTR_1);
847 sp->reset_current_tfd = 1;
851 * Free all transmit buffers which have already been transfered
852 * via DMA to the IPG.
854 static void ipg_nic_txfree(struct net_device *dev)
856 struct ipg_nic_private *sp = netdev_priv(dev);
857 unsigned int released, pending, dirty;
859 IPG_DEBUG_MSG("_nic_txfree\n");
861 pending = sp->tx_current - sp->tx_dirty;
862 dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH;
864 for (released = 0; released < pending; released++) {
865 struct sk_buff *skb = sp->tx_buff[dirty];
866 struct ipg_tx *txfd = sp->txd + dirty;
868 IPG_DEBUG_MSG("TFC = %16.16lx\n", (unsigned long) txfd->tfc);
870 /* Look at each TFD's TFC field beginning
871 * at the last freed TFD up to the current TFD.
872 * If the TFDDone bit is set, free the associated
875 if (!(txfd->tfc & cpu_to_le64(IPG_TFC_TFDDONE)))
878 /* Free the transmit buffer. */
880 pci_unmap_single(sp->pdev,
881 le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,
882 skb->len, PCI_DMA_TODEVICE);
884 dev_kfree_skb_irq(skb);
886 sp->tx_buff[dirty] = NULL;
888 dirty = (dirty + 1) % IPG_TFDLIST_LENGTH;
891 sp->tx_dirty += released;
893 if (netif_queue_stopped(dev) &&
894 (sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) {
895 netif_wake_queue(dev);
899 static void ipg_tx_timeout(struct net_device *dev)
901 struct ipg_nic_private *sp = netdev_priv(dev);
902 void __iomem *ioaddr = sp->ioaddr;
904 ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | IPG_AC_NETWORK |
907 spin_lock_irq(&sp->lock);
909 /* Re-configure after DMA reset. */
910 if (ipg_io_config(dev) < 0) {
911 printk(KERN_INFO "%s: Error during re-configuration.\n",
917 spin_unlock_irq(&sp->lock);
919 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & IPG_MC_RSVD_MASK,
924 * For TxComplete interrupts, free all transmit
925 * buffers which have already been transfered via DMA
928 static void ipg_nic_txcleanup(struct net_device *dev)
930 struct ipg_nic_private *sp = netdev_priv(dev);
931 void __iomem *ioaddr = sp->ioaddr;
934 IPG_DEBUG_MSG("_nic_txcleanup\n");
936 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
937 /* Reading the TXSTATUS register clears the
938 * TX_COMPLETE interrupt.
940 u32 txstatusdword = ipg_r32(TX_STATUS);
942 IPG_DEBUG_MSG("TxStatus = %8.8x\n", txstatusdword);
944 /* Check for Transmit errors. Error bits only valid if
945 * TX_COMPLETE bit in the TXSTATUS register is a 1.
947 if (!(txstatusdword & IPG_TS_TX_COMPLETE))
950 /* If in 10Mbps mode, indicate transmit is ready. */
951 if (sp->tenmbpsmode) {
952 netif_wake_queue(dev);
955 /* Transmit error, increment stat counters. */
956 if (txstatusdword & IPG_TS_TX_ERROR) {
957 IPG_DEBUG_MSG("Transmit error.\n");
958 sp->stats.tx_errors++;
961 /* Late collision, re-enable transmitter. */
962 if (txstatusdword & IPG_TS_LATE_COLLISION) {
963 IPG_DEBUG_MSG("Late collision on transmit.\n");
964 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
965 IPG_MC_RSVD_MASK, MAC_CTRL);
968 /* Maximum collisions, re-enable transmitter. */
969 if (txstatusdword & IPG_TS_TX_MAX_COLL) {
970 IPG_DEBUG_MSG("Maximum collisions on transmit.\n");
971 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
972 IPG_MC_RSVD_MASK, MAC_CTRL);
975 /* Transmit underrun, reset and re-enable
978 if (txstatusdword & IPG_TS_TX_UNDERRUN) {
979 IPG_DEBUG_MSG("Transmitter underrun.\n");
980 sp->stats.tx_fifo_errors++;
981 ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA |
982 IPG_AC_NETWORK | IPG_AC_FIFO);
984 /* Re-configure after DMA reset. */
985 if (ipg_io_config(dev) < 0) {
987 "%s: Error during re-configuration.\n",
992 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
993 IPG_MC_RSVD_MASK, MAC_CTRL);
1000 /* Provides statistical information about the IPG NIC. */
1001 static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev)
1003 struct ipg_nic_private *sp = netdev_priv(dev);
1004 void __iomem *ioaddr = sp->ioaddr;
1008 IPG_DEBUG_MSG("_nic_get_stats\n");
1010 /* Check to see if the NIC has been initialized via nic_open,
1011 * before trying to read statistic registers.
1013 if (!test_bit(__LINK_STATE_START, &dev->state))
1016 sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK);
1017 sp->stats.tx_packets += ipg_r32(IPG_FRAMESXMTDOK);
1018 sp->stats.rx_bytes += ipg_r32(IPG_OCTETRCVOK);
1019 sp->stats.tx_bytes += ipg_r32(IPG_OCTETXMTOK);
1020 temp1 = ipg_r16(IPG_FRAMESLOSTRXERRORS);
1021 sp->stats.rx_errors += temp1;
1022 sp->stats.rx_missed_errors += temp1;
1023 temp1 = ipg_r32(IPG_SINGLECOLFRAMES) + ipg_r32(IPG_MULTICOLFRAMES) +
1024 ipg_r32(IPG_LATECOLLISIONS);
1025 temp2 = ipg_r16(IPG_CARRIERSENSEERRORS);
1026 sp->stats.collisions += temp1;
1027 sp->stats.tx_dropped += ipg_r16(IPG_FRAMESABORTXSCOLLS);
1028 sp->stats.tx_errors += ipg_r16(IPG_FRAMESWEXDEFERRAL) +
1029 ipg_r32(IPG_FRAMESWDEFERREDXMT) + temp1 + temp2;
1030 sp->stats.multicast += ipg_r32(IPG_MCSTOCTETRCVDOK);
1032 /* detailed tx_errors */
1033 sp->stats.tx_carrier_errors += temp2;
1035 /* detailed rx_errors */
1036 sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) +
1037 ipg_r16(IPG_FRAMETOOLONGERRRORS);
1038 sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS);
1040 /* Unutilized IPG statistic registers. */
1041 ipg_r32(IPG_MCSTFRAMESRCVDOK);
1046 /* Restore used receive buffers. */
1047 static int ipg_nic_rxrestore(struct net_device *dev)
1049 struct ipg_nic_private *sp = netdev_priv(dev);
1050 const unsigned int curr = sp->rx_current;
1051 unsigned int dirty = sp->rx_dirty;
1053 IPG_DEBUG_MSG("_nic_rxrestore\n");
1055 for (dirty = sp->rx_dirty; curr - dirty > 0; dirty++) {
1056 unsigned int entry = dirty % IPG_RFDLIST_LENGTH;
1058 /* rx_copybreak may poke hole here and there. */
1059 if (sp->rx_buff[entry])
1062 /* Generate a new receive buffer to replace the
1063 * current buffer (which will be released by the
1066 if (ipg_get_rxbuff(dev, entry) < 0) {
1067 IPG_DEBUG_MSG("Cannot allocate new Rx buffer.\n");
1072 /* Reset the RFS field. */
1073 sp->rxd[entry].rfs = 0x0000000000000000;
1075 sp->rx_dirty = dirty;
1082 /* use jumboindex and jumbosize to control jumbo frame status
1083 initial status is jumboindex=-1 and jumbosize=0
1084 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done.
1085 2. jumboindex != -1 and jumbosize != 0 : jumbo frame is not over size and receiving
1086 3. jumboindex = -1 and jumbosize != 0 : jumbo frame is over size, already dump
1087 previous receiving and need to continue dumping the current one
1095 FRAME_NO_START_NO_END = 0,
1096 FRAME_WITH_START = 1,
1097 FRAME_WITH_END = 10,
1098 FRAME_WITH_START_WITH_END = 11
1101 inline void ipg_nic_rx_free_skb(struct net_device *dev)
1103 struct ipg_nic_private *sp = netdev_priv(dev);
1104 unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
1106 if (sp->rx_buff[entry]) {
1107 struct ipg_rx *rxfd = sp->rxd + entry;
1109 pci_unmap_single(sp->pdev,
1110 le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN),
1111 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1112 dev_kfree_skb_irq(sp->rx_buff[entry]);
1113 sp->rx_buff[entry] = NULL;
1117 inline int ipg_nic_rx_check_frame_type(struct net_device *dev)
1119 struct ipg_nic_private *sp = netdev_priv(dev);
1120 struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH);
1121 int type = FRAME_NO_START_NO_END;
1123 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART)
1124 type += FRAME_WITH_START;
1125 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND)
1126 type += FRAME_WITH_END;
1130 inline int ipg_nic_rx_check_error(struct net_device *dev)
1132 struct ipg_nic_private *sp = netdev_priv(dev);
1133 unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
1134 struct ipg_rx *rxfd = sp->rxd + entry;
1136 if (IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
1137 (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
1138 IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
1139 IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))) {
1140 IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n",
1141 (unsigned long) rxfd->rfs);
1143 /* Increment general receive error statistic. */
1144 sp->stats.rx_errors++;
1146 /* Increment detailed receive error statistics. */
1147 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
1148 IPG_DEBUG_MSG("RX FIFO overrun occured.\n");
1150 sp->stats.rx_fifo_errors++;
1153 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
1154 IPG_DEBUG_MSG("RX runt occured.\n");
1155 sp->stats.rx_length_errors++;
1158 /* Do nothing for IPG_RFS_RXOVERSIZEDFRAME,
1159 * error count handled by a IPG statistic register.
1162 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
1163 IPG_DEBUG_MSG("RX alignment error occured.\n");
1164 sp->stats.rx_frame_errors++;
1167 /* Do nothing for IPG_RFS_RXFCSERROR, error count
1168 * handled by a IPG statistic register.
1171 /* Free the memory associated with the RX
1172 * buffer since it is erroneous and we will
1173 * not pass it to higher layer processes.
1175 if (sp->rx_buff[entry]) {
1176 pci_unmap_single(sp->pdev,
1177 le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN),
1178 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1180 dev_kfree_skb_irq(sp->rx_buff[entry]);
1181 sp->rx_buff[entry] = NULL;
1183 return ERROR_PACKET;
1185 return NORMAL_PACKET;
1188 static void ipg_nic_rx_with_start_and_end(struct net_device *dev,
1189 struct ipg_nic_private *sp,
1190 struct ipg_rx *rxfd, unsigned entry)
1192 struct ipg_jumbo *jumbo = &sp->jumbo;
1193 struct sk_buff *skb;
1196 if (jumbo->found_start) {
1197 dev_kfree_skb_irq(jumbo->skb);
1198 jumbo->found_start = 0;
1199 jumbo->current_size = 0;
1203 // 1: found error, 0 no error
1204 if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
1207 skb = sp->rx_buff[entry];
1211 // accept this frame and send to upper layer
1212 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1213 if (framelen > IPG_RXFRAG_SIZE)
1214 framelen = IPG_RXFRAG_SIZE;
1216 skb_put(skb, framelen);
1217 skb->protocol = eth_type_trans(skb, dev);
1218 skb->ip_summed = CHECKSUM_NONE;
1220 dev->last_rx = jiffies;
1221 sp->rx_buff[entry] = NULL;
1224 static void ipg_nic_rx_with_start(struct net_device *dev,
1225 struct ipg_nic_private *sp,
1226 struct ipg_rx *rxfd, unsigned entry)
1228 struct ipg_jumbo *jumbo = &sp->jumbo;
1229 struct pci_dev *pdev = sp->pdev;
1230 struct sk_buff *skb;
1232 // 1: found error, 0 no error
1233 if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
1236 // accept this frame and send to upper layer
1237 skb = sp->rx_buff[entry];
1241 if (jumbo->found_start)
1242 dev_kfree_skb_irq(jumbo->skb);
1244 pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN),
1245 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1247 skb_put(skb, IPG_RXFRAG_SIZE);
1249 jumbo->found_start = 1;
1250 jumbo->current_size = IPG_RXFRAG_SIZE;
1253 sp->rx_buff[entry] = NULL;
1254 dev->last_rx = jiffies;
1257 static void ipg_nic_rx_with_end(struct net_device *dev,
1258 struct ipg_nic_private *sp,
1259 struct ipg_rx *rxfd, unsigned entry)
1261 struct ipg_jumbo *jumbo = &sp->jumbo;
1263 //1: found error, 0 no error
1264 if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
1265 struct sk_buff *skb = sp->rx_buff[entry];
1270 if (jumbo->found_start) {
1271 int framelen, endframelen;
1273 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1275 endframeLen = framelen - jumbo->current_size;
1277 if (framelen > IPG_RXFRAG_SIZE)
1278 framelen=IPG_RXFRAG_SIZE;
1280 if (framelen > IPG_RXSUPPORT_SIZE)
1281 dev_kfree_skb_irq(jumbo->skb);
1283 memcpy(skb_put(jumbo->skb, endframeLen),
1284 skb->data, endframeLen);
1286 jumbo->skb->protocol =
1287 eth_type_trans(jumbo->skb, dev);
1289 jumbo->skb->ip_summed = CHECKSUM_NONE;
1290 netif_rx(jumbo->skb);
1294 dev->last_rx = jiffies;
1295 jumbo->found_start = 0;
1296 jumbo->current_size = 0;
1299 ipg_nic_rx_free_skb(dev);
1301 dev_kfree_skb_irq(jumbo->skb);
1302 jumbo->found_start = 0;
1303 jumbo->current_size = 0;
1308 static void ipg_nic_rx_no_start_no_end(struct net_device *dev,
1309 struct ipg_nic_private *sp,
1310 struct ipg_rx *rxfd, unsigned entry)
1312 struct ipg_jumbo *jumbo = &sp->jumbo;
1314 //1: found error, 0 no error
1315 if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
1316 struct sk_buff *skb = sp->rx_buff[entry];
1319 if (jumbo->found_start) {
1320 jumbo->current_size += IPG_RXFRAG_SIZE;
1321 if (jumbo->current_size <= IPG_RXSUPPORT_SIZE) {
1322 memcpy(skb_put(jumbo->skb,
1324 skb->data, IPG_RXFRAG_SIZE);
1327 dev->last_rx = jiffies;
1328 ipg_nic_rx_free_skb(dev);
1331 dev_kfree_skb_irq(jumbo->skb);
1332 jumbo->found_start = 0;
1333 jumbo->current_size = 0;
1338 static int ipg_nic_rx(struct net_device *dev)
1340 struct ipg_nic_private *sp = netdev_priv(dev);
1341 unsigned int curr = sp->rx_current;
1342 void __iomem *ioaddr = sp->ioaddr;
1345 IPG_DEBUG_MSG("_nic_rx\n");
1347 for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
1348 unsigned int entry = curr % IPG_RFDLIST_LENGTH;
1349 struct ipg_rx *rxfd = sp->rxd + entry;
1351 if (!(rxfd->rfs & le64_to_cpu(IPG_RFS_RFDDONE)))
1354 switch (ipg_nic_rx_check_frame_type(dev)) {
1355 case FRAME_WITH_START_WITH_END:
1356 ipg_nic_rx_with_start_and_end(dev, tp, rxfd, entry);
1358 case FRAME_WITH_START:
1359 ipg_nic_rx_with_start(dev, tp, rxfd, entry);
1361 case FRAME_WITH_END:
1362 ipg_nic_rx_with_end(dev, tp, rxfd, entry);
1364 case FRAME_NO_START_NO_END:
1365 ipg_nic_rx_no_start_no_end(dev, tp, rxfd, entry);
1370 sp->rx_current = curr;
1372 if (i == IPG_MAXRFDPROCESS_COUNT) {
1373 /* There are more RFDs to process, however the
1374 * allocated amount of RFD processing time has
1375 * expired. Assert Interrupt Requested to make
1376 * sure we come back to process the remaining RFDs.
1378 ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL);
1381 ipg_nic_rxrestore(dev);
1387 static int ipg_nic_rx(struct net_device *dev)
1389 /* Transfer received Ethernet frames to higher network layers. */
1390 struct ipg_nic_private *sp = netdev_priv(dev);
1391 unsigned int curr = sp->rx_current;
1392 void __iomem *ioaddr = sp->ioaddr;
1393 struct ipg_rx *rxfd;
1396 IPG_DEBUG_MSG("_nic_rx\n");
1398 #define __RFS_MASK \
1399 cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND)
1401 for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
1402 unsigned int entry = curr % IPG_RFDLIST_LENGTH;
1403 struct sk_buff *skb = sp->rx_buff[entry];
1404 unsigned int framelen;
1406 rxfd = sp->rxd + entry;
1408 if (((rxfd->rfs & __RFS_MASK) != __RFS_MASK) || !skb)
1411 /* Get received frame length. */
1412 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1414 /* Check for jumbo frame arrival with too small
1417 if (framelen > IPG_RXFRAG_SIZE) {
1419 ("RFS FrameLen > allocated fragment size.\n");
1421 framelen = IPG_RXFRAG_SIZE;
1424 if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
1425 (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
1426 IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
1427 IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR)))) {
1429 IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n",
1430 (unsigned long int) rxfd->rfs);
1432 /* Increment general receive error statistic. */
1433 sp->stats.rx_errors++;
1435 /* Increment detailed receive error statistics. */
1436 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
1437 IPG_DEBUG_MSG("RX FIFO overrun occured.\n");
1438 sp->stats.rx_fifo_errors++;
1441 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
1442 IPG_DEBUG_MSG("RX runt occured.\n");
1443 sp->stats.rx_length_errors++;
1446 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXOVERSIZEDFRAME) ;
1447 /* Do nothing, error count handled by a IPG
1448 * statistic register.
1451 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
1452 IPG_DEBUG_MSG("RX alignment error occured.\n");
1453 sp->stats.rx_frame_errors++;
1456 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFCSERROR) ;
1457 /* Do nothing, error count handled by a IPG
1458 * statistic register.
1461 /* Free the memory associated with the RX
1462 * buffer since it is erroneous and we will
1463 * not pass it to higher layer processes.
1466 __le64 info = rxfd->frag_info;
1468 pci_unmap_single(sp->pdev,
1469 le64_to_cpu(info) & ~IPG_RFI_FRAGLEN,
1470 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1472 dev_kfree_skb_irq(skb);
1476 /* Adjust the new buffer length to accomodate the size
1477 * of the received frame.
1479 skb_put(skb, framelen);
1481 /* Set the buffer's protocol field to Ethernet. */
1482 skb->protocol = eth_type_trans(skb, dev);
1484 /* The IPG encountered an error with (or
1485 * there were no) IP/TCP/UDP checksums.
1486 * This may or may not indicate an invalid
1487 * IP/TCP/UDP frame was received. Let the
1488 * upper layer decide.
1490 skb->ip_summed = CHECKSUM_NONE;
1492 /* Hand off frame for higher layer processing.
1493 * The function netif_rx() releases the sk_buff
1494 * when processing completes.
1498 /* Record frame receive time (jiffies = Linux
1499 * kernel current time stamp).
1501 dev->last_rx = jiffies;
1504 /* Assure RX buffer is not reused by IPG. */
1505 sp->rx_buff[entry] = NULL;
1509 * If there are more RFDs to proces and the allocated amount of RFD
1510 * processing time has expired, assert Interrupt Requested to make
1511 * sure we come back to process the remaining RFDs.
1513 if (i == IPG_MAXRFDPROCESS_COUNT)
1514 ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL);
1517 /* Check if the RFD list contained no receive frame data. */
1519 sp->EmptyRFDListCount++;
1521 while ((le64_to_cpu(rxfd->rfs) & IPG_RFS_RFDDONE) &&
1522 !((le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) &&
1523 (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND))) {
1524 unsigned int entry = curr++ % IPG_RFDLIST_LENGTH;
1526 rxfd = sp->rxd + entry;
1528 IPG_DEBUG_MSG("Frame requires multiple RFDs.\n");
1530 /* An unexpected event, additional code needed to handle
1531 * properly. So for the time being, just disregard the
1535 /* Free the memory associated with the RX
1536 * buffer since it is erroneous and we will
1537 * not pass it to higher layer processes.
1539 if (sp->rx_buff[entry]) {
1540 pci_unmap_single(sp->pdev,
1541 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1542 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1543 dev_kfree_skb_irq(sp->rx_buff[entry]);
1546 /* Assure RX buffer is not reused by IPG. */
1547 sp->rx_buff[entry] = NULL;
1550 sp->rx_current = curr;
1552 /* Check to see if there are a minimum number of used
1553 * RFDs before restoring any (should improve performance.)
1555 if ((curr - sp->rx_dirty) >= IPG_MINUSEDRFDSTOFREE)
1556 ipg_nic_rxrestore(dev);
1562 static void ipg_reset_after_host_error(struct work_struct *work)
1564 struct ipg_nic_private *sp =
1565 container_of(work, struct ipg_nic_private, task.work);
1566 struct net_device *dev = sp->dev;
1568 IPG_DDEBUG_MSG("DMACtrl = %8.8x\n", ioread32(sp->ioaddr + IPG_DMACTRL));
1571 * Acknowledge HostError interrupt by resetting
1574 ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
1579 if (ipg_io_config(dev) < 0) {
1580 printk(KERN_INFO "%s: Cannot recover from PCI error.\n",
1582 schedule_delayed_work(&sp->task, HZ);
1586 static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst)
1588 struct net_device *dev = dev_inst;
1589 struct ipg_nic_private *sp = netdev_priv(dev);
1590 void __iomem *ioaddr = sp->ioaddr;
1591 unsigned int handled = 0;
1594 IPG_DEBUG_MSG("_interrupt_handler\n");
1597 ipg_nic_rxrestore(dev);
1599 spin_lock(&sp->lock);
1601 /* Get interrupt source information, and acknowledge
1602 * some (i.e. TxDMAComplete, RxDMAComplete, RxEarly,
1603 * IntRequested, MacControlFrame, LinkEvent) interrupts
1604 * if issued. Also, all IPG interrupts are disabled by
1605 * reading IntStatusAck.
1607 status = ipg_r16(INT_STATUS_ACK);
1609 IPG_DEBUG_MSG("IntStatusAck = %4.4x\n", status);
1611 /* Shared IRQ of remove event. */
1612 if (!(status & IPG_IS_RSVD_MASK))
1617 if (unlikely(!netif_running(dev)))
1620 /* If RFDListEnd interrupt, restore all used RFDs. */
1621 if (status & IPG_IS_RFD_LIST_END) {
1622 IPG_DEBUG_MSG("RFDListEnd Interrupt.\n");
1624 /* The RFD list end indicates an RFD was encountered
1625 * with a 0 NextPtr, or with an RFDDone bit set to 1
1626 * (indicating the RFD is not read for use by the
1627 * IPG.) Try to restore all RFDs.
1629 ipg_nic_rxrestore(dev);
1632 /* Increment the RFDlistendCount counter. */
1633 sp->RFDlistendCount++;
1637 /* If RFDListEnd, RxDMAPriority, RxDMAComplete, or
1638 * IntRequested interrupt, process received frames. */
1639 if ((status & IPG_IS_RX_DMA_PRIORITY) ||
1640 (status & IPG_IS_RFD_LIST_END) ||
1641 (status & IPG_IS_RX_DMA_COMPLETE) ||
1642 (status & IPG_IS_INT_REQUESTED)) {
1644 /* Increment the RFD list checked counter if interrupted
1645 * only to check the RFD list. */
1646 if (status & (~(IPG_IS_RX_DMA_PRIORITY | IPG_IS_RFD_LIST_END |
1647 IPG_IS_RX_DMA_COMPLETE | IPG_IS_INT_REQUESTED) &
1648 (IPG_IS_HOST_ERROR | IPG_IS_TX_DMA_COMPLETE |
1649 IPG_IS_LINK_EVENT | IPG_IS_TX_COMPLETE |
1650 IPG_IS_UPDATE_STATS)))
1651 sp->RFDListCheckedCount++;
1657 /* If TxDMAComplete interrupt, free used TFDs. */
1658 if (status & IPG_IS_TX_DMA_COMPLETE)
1659 ipg_nic_txfree(dev);
1661 /* TxComplete interrupts indicate one of numerous actions.
1662 * Determine what action to take based on TXSTATUS register.
1664 if (status & IPG_IS_TX_COMPLETE)
1665 ipg_nic_txcleanup(dev);
1667 /* If UpdateStats interrupt, update Linux Ethernet statistics */
1668 if (status & IPG_IS_UPDATE_STATS)
1669 ipg_nic_get_stats(dev);
1671 /* If HostError interrupt, reset IPG. */
1672 if (status & IPG_IS_HOST_ERROR) {
1673 IPG_DDEBUG_MSG("HostError Interrupt\n");
1675 schedule_delayed_work(&sp->task, 0);
1678 /* If LinkEvent interrupt, resolve autonegotiation. */
1679 if (status & IPG_IS_LINK_EVENT) {
1680 if (ipg_config_autoneg(dev) < 0)
1681 printk(KERN_INFO "%s: Auto-negotiation error.\n",
1685 /* If MACCtrlFrame interrupt, do nothing. */
1686 if (status & IPG_IS_MAC_CTRL_FRAME)
1687 IPG_DEBUG_MSG("MACCtrlFrame interrupt.\n");
1689 /* If RxComplete interrupt, do nothing. */
1690 if (status & IPG_IS_RX_COMPLETE)
1691 IPG_DEBUG_MSG("RxComplete interrupt.\n");
1693 /* If RxEarly interrupt, do nothing. */
1694 if (status & IPG_IS_RX_EARLY)
1695 IPG_DEBUG_MSG("RxEarly interrupt.\n");
1698 /* Re-enable IPG interrupts. */
1699 ipg_w16(IPG_IE_TX_DMA_COMPLETE | IPG_IE_RX_DMA_COMPLETE |
1700 IPG_IE_HOST_ERROR | IPG_IE_INT_REQUESTED | IPG_IE_TX_COMPLETE |
1701 IPG_IE_LINK_EVENT | IPG_IE_UPDATE_STATS, INT_ENABLE);
1703 spin_unlock(&sp->lock);
1705 return IRQ_RETVAL(handled);
1708 static void ipg_rx_clear(struct ipg_nic_private *sp)
1712 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
1713 if (sp->rx_buff[i]) {
1714 struct ipg_rx *rxfd = sp->rxd + i;
1716 dev_kfree_skb_irq(sp->rx_buff[i]);
1717 sp->rx_buff[i] = NULL;
1718 pci_unmap_single(sp->pdev,
1719 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1720 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1725 static void ipg_tx_clear(struct ipg_nic_private *sp)
1729 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
1730 if (sp->tx_buff[i]) {
1731 struct ipg_tx *txfd = sp->txd + i;
1733 pci_unmap_single(sp->pdev,
1734 le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,
1735 sp->tx_buff[i]->len, PCI_DMA_TODEVICE);
1737 dev_kfree_skb_irq(sp->tx_buff[i]);
1739 sp->tx_buff[i] = NULL;
1744 static int ipg_nic_open(struct net_device *dev)
1746 struct ipg_nic_private *sp = netdev_priv(dev);
1747 void __iomem *ioaddr = sp->ioaddr;
1748 struct pci_dev *pdev = sp->pdev;
1751 IPG_DEBUG_MSG("_nic_open\n");
1753 sp->rx_buf_sz = IPG_RXSUPPORT_SIZE;
1755 /* Check for interrupt line conflicts, and request interrupt
1758 * IMPORTANT: Disable IPG interrupts prior to registering
1761 ipg_w16(0x0000, INT_ENABLE);
1763 /* Register the interrupt line to be used by the IPG within
1766 rc = request_irq(pdev->irq, &ipg_interrupt_handler, IRQF_SHARED,
1769 printk(KERN_INFO "%s: Error when requesting interrupt.\n",
1774 dev->irq = pdev->irq;
1778 sp->rxd = dma_alloc_coherent(&pdev->dev, IPG_RX_RING_BYTES,
1779 &sp->rxd_map, GFP_KERNEL);
1781 goto err_free_irq_0;
1783 sp->txd = dma_alloc_coherent(&pdev->dev, IPG_TX_RING_BYTES,
1784 &sp->txd_map, GFP_KERNEL);
1788 rc = init_rfdlist(dev);
1790 printk(KERN_INFO "%s: Error during configuration.\n",
1797 rc = ipg_io_config(dev);
1799 printk(KERN_INFO "%s: Error during configuration.\n",
1801 goto err_release_tfdlist_3;
1804 /* Resolve autonegotiation. */
1805 if (ipg_config_autoneg(dev) < 0)
1806 printk(KERN_INFO "%s: Auto-negotiation error.\n", dev->name);
1809 /* initialize JUMBO Frame control variable */
1810 sp->jumbo.found_start = 0;
1811 sp->jumbo.current_size = 0;
1813 dev->mtu = IPG_TXFRAG_SIZE;
1816 /* Enable transmit and receive operation of the IPG. */
1817 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_RX_ENABLE | IPG_MC_TX_ENABLE) &
1818 IPG_MC_RSVD_MASK, MAC_CTRL);
1820 netif_start_queue(dev);
1824 err_release_tfdlist_3:
1828 dma_free_coherent(&pdev->dev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
1830 dma_free_coherent(&pdev->dev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
1832 free_irq(pdev->irq, dev);
1836 static int ipg_nic_stop(struct net_device *dev)
1838 struct ipg_nic_private *sp = netdev_priv(dev);
1839 void __iomem *ioaddr = sp->ioaddr;
1840 struct pci_dev *pdev = sp->pdev;
1842 IPG_DEBUG_MSG("_nic_stop\n");
1844 netif_stop_queue(dev);
1846 IPG_DDEBUG_MSG("RFDlistendCount = %i\n", sp->RFDlistendCount);
1847 IPG_DDEBUG_MSG("RFDListCheckedCount = %i\n", sp->rxdCheckedCount);
1848 IPG_DDEBUG_MSG("EmptyRFDListCount = %i\n", sp->EmptyRFDListCount);
1849 IPG_DUMPTFDLIST(dev);
1852 (void) ipg_r16(INT_STATUS_ACK);
1854 ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
1856 synchronize_irq(pdev->irq);
1857 } while (ipg_r16(INT_ENABLE) & IPG_IE_RSVD_MASK);
1863 pci_free_consistent(pdev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
1864 pci_free_consistent(pdev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
1866 free_irq(pdev->irq, dev);
1871 static int ipg_nic_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1873 struct ipg_nic_private *sp = netdev_priv(dev);
1874 void __iomem *ioaddr = sp->ioaddr;
1875 unsigned int entry = sp->tx_current % IPG_TFDLIST_LENGTH;
1876 unsigned long flags;
1877 struct ipg_tx *txfd;
1879 IPG_DDEBUG_MSG("_nic_hard_start_xmit\n");
1881 /* If in 10Mbps mode, stop the transmit queue so
1882 * no more transmit frames are accepted.
1884 if (sp->tenmbpsmode)
1885 netif_stop_queue(dev);
1887 if (sp->reset_current_tfd) {
1888 sp->reset_current_tfd = 0;
1892 txfd = sp->txd + entry;
1894 sp->tx_buff[entry] = skb;
1896 /* Clear all TFC fields, except TFDDONE. */
1897 txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
1899 /* Specify the TFC field within the TFD. */
1900 txfd->tfc |= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED |
1901 (IPG_TFC_FRAMEID & cpu_to_le64(sp->tx_current)) |
1902 (IPG_TFC_FRAGCOUNT & (1 << 24)));
1904 /* Request TxComplete interrupts at an interval defined
1905 * by the constant IPG_FRAMESBETWEENTXCOMPLETES.
1906 * Request TxComplete interrupt for every frame
1907 * if in 10Mbps mode to accomodate problem with 10Mbps
1910 if (sp->tenmbpsmode)
1911 txfd->tfc |= cpu_to_le64(IPG_TFC_TXINDICATE);
1912 txfd->tfc |= cpu_to_le64(IPG_TFC_TXDMAINDICATE);
1913 /* Based on compilation option, determine if FCS is to be
1914 * appended to transmit frame by IPG.
1916 if (!(IPG_APPEND_FCS_ON_TX))
1917 txfd->tfc |= cpu_to_le64(IPG_TFC_FCSAPPENDDISABLE);
1919 /* Based on compilation option, determine if IP, TCP and/or
1920 * UDP checksums are to be added to transmit frame by IPG.
1922 if (IPG_ADD_IPCHECKSUM_ON_TX)
1923 txfd->tfc |= cpu_to_le64(IPG_TFC_IPCHECKSUMENABLE);
1925 if (IPG_ADD_TCPCHECKSUM_ON_TX)
1926 txfd->tfc |= cpu_to_le64(IPG_TFC_TCPCHECKSUMENABLE);
1928 if (IPG_ADD_UDPCHECKSUM_ON_TX)
1929 txfd->tfc |= cpu_to_le64(IPG_TFC_UDPCHECKSUMENABLE);
1931 /* Based on compilation option, determine if VLAN tag info is to be
1932 * inserted into transmit frame by IPG.
1934 if (IPG_INSERT_MANUAL_VLAN_TAG) {
1935 txfd->tfc |= cpu_to_le64(IPG_TFC_VLANTAGINSERT |
1936 ((u64) IPG_MANUAL_VLAN_VID << 32) |
1937 ((u64) IPG_MANUAL_VLAN_CFI << 44) |
1938 ((u64) IPG_MANUAL_VLAN_USERPRIORITY << 45));
1941 /* The fragment start location within system memory is defined
1942 * by the sk_buff structure's data field. The physical address
1943 * of this location within the system's virtual memory space
1944 * is determined using the IPG_HOST2BUS_MAP function.
1946 txfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
1947 skb->len, PCI_DMA_TODEVICE));
1949 /* The length of the fragment within system memory is defined by
1950 * the sk_buff structure's len field.
1952 txfd->frag_info |= cpu_to_le64(IPG_TFI_FRAGLEN &
1953 ((u64) (skb->len & 0xffff) << 48));
1955 /* Clear the TFDDone bit last to indicate the TFD is ready
1956 * for transfer to the IPG.
1958 txfd->tfc &= cpu_to_le64(~IPG_TFC_TFDDONE);
1960 spin_lock_irqsave(&sp->lock, flags);
1966 ipg_w32(IPG_DC_TX_DMA_POLL_NOW, DMA_CTRL);
1968 if (sp->tx_current == (sp->tx_dirty + IPG_TFDLIST_LENGTH))
1969 netif_stop_queue(dev);
1971 spin_unlock_irqrestore(&sp->lock, flags);
1973 return NETDEV_TX_OK;
1976 static void ipg_set_phy_default_param(unsigned char rev,
1977 struct net_device *dev, int phy_address)
1979 unsigned short length;
1980 unsigned char revision;
1981 unsigned short *phy_param;
1982 unsigned short address, value;
1984 phy_param = &DefaultPhyParam[0];
1985 length = *phy_param & 0x00FF;
1986 revision = (unsigned char)((*phy_param) >> 8);
1988 while (length != 0) {
1989 if (rev == revision) {
1990 while (length > 1) {
1991 address = *phy_param;
1992 value = *(phy_param + 1);
1994 mdio_write(dev, phy_address, address, value);
1999 phy_param += length / 2;
2000 length = *phy_param & 0x00FF;
2001 revision = (unsigned char)((*phy_param) >> 8);
2007 static int read_eeprom(struct net_device *dev, int eep_addr)
2009 void __iomem *ioaddr = ipg_ioaddr(dev);
2014 value = IPG_EC_EEPROM_READOPCODE | (eep_addr & 0xff);
2015 ipg_w16(value, EEPROM_CTRL);
2017 for (i = 0; i < 1000; i++) {
2021 data = ipg_r16(EEPROM_CTRL);
2022 if (!(data & IPG_EC_EEPROM_BUSY)) {
2023 ret = ipg_r16(EEPROM_DATA);
2030 static void ipg_init_mii(struct net_device *dev)
2032 struct ipg_nic_private *sp = netdev_priv(dev);
2033 struct mii_if_info *mii_if = &sp->mii_if;
2037 mii_if->mdio_read = mdio_read;
2038 mii_if->mdio_write = mdio_write;
2039 mii_if->phy_id_mask = 0x1f;
2040 mii_if->reg_num_mask = 0x1f;
2042 mii_if->phy_id = phyaddr = ipg_find_phyaddr(dev);
2044 if (phyaddr != 0x1f) {
2045 u16 mii_phyctrl, mii_1000cr;
2048 mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000);
2049 mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF |
2050 GMII_PHY_1000BASETCONTROL_PreferMaster;
2051 mdio_write(dev, phyaddr, MII_CTRL1000, mii_1000cr);
2053 mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR);
2055 /* Set default phyparam */
2056 pci_read_config_byte(sp->pdev, PCI_REVISION_ID, &revisionid);
2057 ipg_set_phy_default_param(revisionid, dev, phyaddr);
2060 mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART;
2061 mdio_write(dev, phyaddr, MII_BMCR, mii_phyctrl);
2066 static int ipg_hw_init(struct net_device *dev)
2068 struct ipg_nic_private *sp = netdev_priv(dev);
2069 void __iomem *ioaddr = sp->ioaddr;
2073 /* Read/Write and Reset EEPROM Value */
2074 /* Read LED Mode Configuration from EEPROM */
2075 sp->led_mode = read_eeprom(dev, 6);
2077 /* Reset all functions within the IPG. Do not assert
2078 * RST_OUT as not compatible with some PHYs.
2080 rc = ipg_reset(dev, IPG_RESET_MASK);
2086 /* Read MAC Address from EEPROM */
2087 for (i = 0; i < 3; i++)
2088 sp->station_addr[i] = read_eeprom(dev, 16 + i);
2090 for (i = 0; i < 3; i++)
2091 ipg_w16(sp->station_addr[i], STATION_ADDRESS_0 + 2*i);
2093 /* Set station address in ethernet_device structure. */
2094 dev->dev_addr[0] = ipg_r16(STATION_ADDRESS_0) & 0x00ff;
2095 dev->dev_addr[1] = (ipg_r16(STATION_ADDRESS_0) & 0xff00) >> 8;
2096 dev->dev_addr[2] = ipg_r16(STATION_ADDRESS_1) & 0x00ff;
2097 dev->dev_addr[3] = (ipg_r16(STATION_ADDRESS_1) & 0xff00) >> 8;
2098 dev->dev_addr[4] = ipg_r16(STATION_ADDRESS_2) & 0x00ff;
2099 dev->dev_addr[5] = (ipg_r16(STATION_ADDRESS_2) & 0xff00) >> 8;
2104 static int ipg_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2106 struct ipg_nic_private *sp = netdev_priv(dev);
2109 mutex_lock(&sp->mii_mutex);
2110 rc = generic_mii_ioctl(&sp->mii_if, if_mii(ifr), cmd, NULL);
2111 mutex_unlock(&sp->mii_mutex);
2116 static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu)
2118 /* Function to accomodate changes to Maximum Transfer Unit
2119 * (or MTU) of IPG NIC. Cannot use default function since
2120 * the default will not allow for MTU > 1500 bytes.
2123 IPG_DEBUG_MSG("_nic_change_mtu\n");
2125 /* Check that the new MTU value is between 68 (14 byte header, 46
2126 * byte payload, 4 byte FCS) and IPG_MAX_RXFRAME_SIZE, which
2127 * corresponds to the MAXFRAMESIZE register in the IPG.
2129 if ((new_mtu < 68) || (new_mtu > IPG_MAX_RXFRAME_SIZE))
2137 static int ipg_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2139 struct ipg_nic_private *sp = netdev_priv(dev);
2142 mutex_lock(&sp->mii_mutex);
2143 rc = mii_ethtool_gset(&sp->mii_if, cmd);
2144 mutex_unlock(&sp->mii_mutex);
2149 static int ipg_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2151 struct ipg_nic_private *sp = netdev_priv(dev);
2154 mutex_lock(&sp->mii_mutex);
2155 rc = mii_ethtool_sset(&sp->mii_if, cmd);
2156 mutex_unlock(&sp->mii_mutex);
2161 static int ipg_nway_reset(struct net_device *dev)
2163 struct ipg_nic_private *sp = netdev_priv(dev);
2166 mutex_lock(&sp->mii_mutex);
2167 rc = mii_nway_restart(&sp->mii_if);
2168 mutex_unlock(&sp->mii_mutex);
2173 static struct ethtool_ops ipg_ethtool_ops = {
2174 .get_settings = ipg_get_settings,
2175 .set_settings = ipg_set_settings,
2176 .nway_reset = ipg_nway_reset,
2179 static void ipg_remove(struct pci_dev *pdev)
2181 struct net_device *dev = pci_get_drvdata(pdev);
2182 struct ipg_nic_private *sp = netdev_priv(dev);
2184 IPG_DEBUG_MSG("_remove\n");
2186 /* Un-register Ethernet device. */
2187 unregister_netdev(dev);
2189 pci_iounmap(pdev, sp->ioaddr);
2191 pci_release_regions(pdev);
2194 pci_disable_device(pdev);
2195 pci_set_drvdata(pdev, NULL);
2198 static int __devinit ipg_probe(struct pci_dev *pdev,
2199 const struct pci_device_id *id)
2201 unsigned int i = id->driver_data;
2202 struct ipg_nic_private *sp;
2203 struct net_device *dev;
2204 void __iomem *ioaddr;
2207 rc = pci_enable_device(pdev);
2211 printk(KERN_INFO "%s: %s\n", pci_name(pdev), ipg_brand_name[i]);
2213 pci_set_master(pdev);
2215 rc = pci_set_dma_mask(pdev, DMA_40BIT_MASK);
2217 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2219 printk(KERN_ERR "%s: DMA config failed.\n",
2226 * Initialize net device.
2228 dev = alloc_etherdev(sizeof(struct ipg_nic_private));
2230 printk(KERN_ERR "%s: alloc_etherdev failed\n", pci_name(pdev));
2235 sp = netdev_priv(dev);
2236 spin_lock_init(&sp->lock);
2237 mutex_init(&sp->mii_mutex);
2239 /* Declare IPG NIC functions for Ethernet device methods.
2241 dev->open = &ipg_nic_open;
2242 dev->stop = &ipg_nic_stop;
2243 dev->hard_start_xmit = &ipg_nic_hard_start_xmit;
2244 dev->get_stats = &ipg_nic_get_stats;
2245 dev->set_multicast_list = &ipg_nic_set_multicast_list;
2246 dev->do_ioctl = ipg_ioctl;
2247 dev->tx_timeout = ipg_tx_timeout;
2248 dev->change_mtu = &ipg_nic_change_mtu;
2250 SET_NETDEV_DEV(dev, &pdev->dev);
2251 SET_ETHTOOL_OPS(dev, &ipg_ethtool_ops);
2253 rc = pci_request_regions(pdev, DRV_NAME);
2255 goto err_free_dev_1;
2257 ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
2259 printk(KERN_ERR "%s cannot map MMIO\n", pci_name(pdev));
2261 goto err_release_regions_2;
2264 /* Save the pointer to the PCI device information. */
2265 sp->ioaddr = ioaddr;
2269 INIT_DELAYED_WORK(&sp->task, ipg_reset_after_host_error);
2271 pci_set_drvdata(pdev, dev);
2273 rc = ipg_hw_init(dev);
2277 rc = register_netdev(dev);
2281 printk(KERN_INFO "Ethernet device registered as: %s\n", dev->name);
2286 pci_iounmap(pdev, ioaddr);
2287 err_release_regions_2:
2288 pci_release_regions(pdev);
2292 pci_disable_device(pdev);
2296 static struct pci_driver ipg_pci_driver = {
2297 .name = IPG_DRIVER_NAME,
2298 .id_table = ipg_pci_tbl,
2300 .remove = __devexit_p(ipg_remove),
2303 static int __init ipg_init_module(void)
2305 return pci_register_driver(&ipg_pci_driver);
2308 static void __exit ipg_exit_module(void)
2310 pci_unregister_driver(&ipg_pci_driver);
2313 module_init(ipg_init_module);
2314 module_exit(ipg_exit_module);