1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
3 Written 1998-2001 by Donald Becker.
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
30 Linux kernel version history:
33 - Jeff Garzik: softnet 'n stuff
36 - Justin Guyett: softnet and locking fixes
37 - Jeff Garzik: use PCI interface
40 - Urban Widmark: minor cleanups, merges from Becker 1.03a/1.04 versions
43 - Urban Widmark: use PCI DMA interface (with thanks to the eepro100.c
44 code) update "Theory of Operation" with
45 softnet/locking changes
46 - Dave Miller: PCI DMA and endian fixups
47 - Jeff Garzik: MOD_xxx race fixes, updated PCI resource allocation
50 - Urban Widmark: fix gcc 2.95.2 problem and
51 remove writel's to fixed address 0x7c
54 - Urban Widmark: mdio locking, bounce buffer changes
55 merges from Beckers 1.05 version
56 added netif_running_on/off support
59 - Urban Widmark: merges from Beckers 1.08b version (VT6102 + mdio)
60 set netif_running_on/off on startup, del_timer_sync
63 - Manfred Spraul: added reset into tx_timeout
66 - Urban Widmark: merges from Beckers 1.10 version
67 (media selection + eeprom reload)
68 - David Vrabel: merges from D-Link "1.11" version
69 (disable WOL and PME on startup)
72 - Manfred Spraul: use "singlecopy" for unaligned buffers
73 don't allocate bounce buffers for !ReqTxAlign cards
76 - David Woodhouse: Set dev->base_addr before the first time we call
77 wait_for_reset(). It's a lot happier that way.
78 Free np->tx_bufs only if we actually allocated it.
81 - Martin Eriksson: Allow Memory-Mapped IO to be enabled.
85 - Replace some MII-related magic numbers with constants
88 - fixes comments for Rhine-III
89 - removes W_MAX_TIMEOUT (unused)
90 - adds HasDavicomPhy for Rhine-I (basis: linuxfet driver; my card
91 is R-I and has Davicom chip, flag is referenced in kernel driver)
92 - sends chip_id as a parameter to wait_for_reset since np is not
93 initialized on first call
94 - changes mmio "else if (chip_id==VT6102)" to "else" so it will work
95 for Rhine-III's (documentation says same bit is correct)
96 - transmit frame queue message is off by one - fixed
97 - adds IntrNormalSummary to "Something Wicked" exclusion list
98 so normal interrupts will not trigger the message (src: Donald Becker)
100 - show confused chip where to continue after Tx error
101 - location of collision counter is chip specific
102 - allow selecting backoff algorithm (module parameter)
105 - Use new MII lib helper generic_mii_ioctl
107 LK1.1.16 (Roger Luethi)
109 - Handle Tx buffer underrun
110 - Fix bugs in full duplex handling
111 - New reset code uses "force reset" cmd on Rhine-II
114 LK1.1.17 (Roger Luethi)
115 - Fix race in via_rhine_start_tx()
116 - On errors, wait for Tx engine to turn off before scavenging
117 - Handle Tx descriptor write-back race on Rhine-II
118 - Force flushing for PCI posted writes
119 - More reset code changes
121 LK1.1.18 (Roger Luethi)
122 - No filtering multicast in promisc mode (Edward Peng)
123 - Fix for Rhine-I Tx timeouts
125 LK1.1.19 (Roger Luethi)
126 - Increase Tx threshold for unspecified errors
128 LK1.2.0-2.6 (Roger Luethi)
130 - Rewrite PHY, media handling (remove options, full_duplex, backoff)
131 - Fix Tx engine race for good
135 #define DRV_NAME "via-rhine"
136 #define DRV_VERSION "1.2.0-2.6"
137 #define DRV_RELDATE "June-10-2004"
140 /* A few user-configurable values.
141 These may be modified when a driver module is loaded. */
143 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
144 static int max_interrupt_work = 20;
146 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
147 Setting to > 1518 effectively disables this feature. */
148 static int rx_copybreak;
151 * In case you are looking for 'options[]' or 'full_duplex[]', they
152 * are gone. Use ethtool(8) instead.
155 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
156 The Rhine has a 64 element 8390-like hash table. */
157 static const int multicast_filter_limit = 32;
160 /* Operational parameters that are set at compile time. */
162 /* Keep the ring sizes a power of two for compile efficiency.
163 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
164 Making the Tx ring too large decreases the effectiveness of channel
165 bonding and packet priority.
166 There are no ill effects from too-large receive rings. */
167 #define TX_RING_SIZE 16
168 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
169 #define RX_RING_SIZE 16
172 /* Operational parameters that usually are not changed. */
174 /* Time in jiffies before concluding the transmitter is hung. */
175 #define TX_TIMEOUT (2*HZ)
177 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
179 #include <linux/module.h>
180 #include <linux/moduleparam.h>
181 #include <linux/kernel.h>
182 #include <linux/string.h>
183 #include <linux/timer.h>
184 #include <linux/errno.h>
185 #include <linux/ioport.h>
186 #include <linux/slab.h>
187 #include <linux/interrupt.h>
188 #include <linux/pci.h>
189 #include <linux/dma-mapping.h>
190 #include <linux/netdevice.h>
191 #include <linux/etherdevice.h>
192 #include <linux/skbuff.h>
193 #include <linux/init.h>
194 #include <linux/delay.h>
195 #include <linux/mii.h>
196 #include <linux/ethtool.h>
197 #include <linux/crc32.h>
198 #include <linux/bitops.h>
199 #include <asm/processor.h> /* Processor type for cache alignment. */
202 #include <asm/uaccess.h>
204 /* These identify the driver base version and may not be removed. */
205 static char version[] __devinitdata =
206 KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
208 /* This driver was written to use PCI memory space. Some early versions
209 of the Rhine may only work correctly with I/O space accesses. */
210 #ifdef CONFIG_VIA_RHINE_MMIO
215 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
216 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
217 MODULE_LICENSE("GPL");
219 module_param(max_interrupt_work, int, 0);
220 module_param(debug, int, 0);
221 module_param(rx_copybreak, int, 0);
222 MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
223 MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
224 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
229 I. Board Compatibility
231 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
234 II. Board-specific settings
236 Boards with this chip are functional only in a bus-master PCI slot.
238 Many operational settings are loaded from the EEPROM to the Config word at
239 offset 0x78. For most of these settings, this driver assumes that they are
241 If this driver is compiled to use PCI memory space operations the EEPROM
242 must be configured to enable memory ops.
244 III. Driver operation
248 This driver uses two statically allocated fixed-size descriptor lists
249 formed into rings by a branch from the final descriptor to the beginning of
250 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
252 IIIb/c. Transmit/Receive Structure
254 This driver attempts to use a zero-copy receive and transmit scheme.
256 Alas, all data buffers are required to start on a 32 bit boundary, so
257 the driver must often copy transmit packets into bounce buffers.
259 The driver allocates full frame size skbuffs for the Rx ring buffers at
260 open() time and passes the skb->data field to the chip as receive data
261 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
262 a fresh skbuff is allocated and the frame is copied to the new skbuff.
263 When the incoming frame is larger, the skbuff is passed directly up the
264 protocol stack. Buffers consumed this way are replaced by newly allocated
265 skbuffs in the last phase of rhine_rx().
267 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
268 using a full-sized skbuff for small frames vs. the copying costs of larger
269 frames. New boards are typically used in generously configured machines
270 and the underfilled buffers have negligible impact compared to the benefit of
271 a single allocation size, so the default value of zero results in never
272 copying packets. When copying is done, the cost is usually mitigated by using
273 a combined copy/checksum routine. Copying also preloads the cache, which is
274 most useful with small frames.
276 Since the VIA chips are only able to transfer data to buffers on 32 bit
277 boundaries, the IP header at offset 14 in an ethernet frame isn't
278 longword aligned for further processing. Copying these unaligned buffers
279 has the beneficial effect of 16-byte aligning the IP header.
281 IIId. Synchronization
283 The driver runs as two independent, single-threaded flows of control. One
284 is the send-packet routine, which enforces single-threaded use by the
285 dev->priv->lock spinlock. The other thread is the interrupt handler, which
286 is single threaded by the hardware and interrupt handling software.
288 The send packet thread has partial control over the Tx ring. It locks the
289 dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
290 is not available it stops the transmit queue by calling netif_stop_queue.
292 The interrupt handler has exclusive control over the Rx ring and records stats
293 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
294 empty by incrementing the dirty_tx mark. If at least half of the entries in
295 the Rx ring are available the transmit queue is woken up if it was stopped.
301 Preliminary VT86C100A manual from http://www.via.com.tw/
302 http://www.scyld.com/expert/100mbps.html
303 http://www.scyld.com/expert/NWay.html
304 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
305 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
310 The VT86C100A manual is not reliable information.
311 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
312 in significant performance degradation for bounce buffer copies on transmit
313 and unaligned IP headers on receive.
314 The chip does not pad to minimum transmit length.
319 /* This table drives the PCI probe routines. It's mostly boilerplate in all
320 of the drivers, and will likely be provided by some future kernel.
321 Note the matching code -- the first table entry matchs all 56** cards but
322 second only the 1234 card.
329 VT8231 = 0x50, /* Integrated MAC */
330 VT8233 = 0x60, /* Integrated MAC */
331 VT8235 = 0x74, /* Integrated MAC */
332 VT8237 = 0x78, /* Integrated MAC */
339 VT6105M = 0x90, /* Management adapter */
343 rqWOL = 0x0001, /* Wake-On-LAN support */
344 rqForceReset = 0x0002,
345 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
346 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
347 rqRhineI = 0x0100, /* See comment below */
350 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
351 * MMIO as well as for the collision counter and the Tx FIFO underflow
352 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
355 /* Beware of PCI posted writes */
356 #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
358 static struct pci_device_id rhine_pci_tbl[] =
360 {0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT86C100A */
361 {0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6102 */
362 {0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* 6105{,L,LOM} */
363 {0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6105M */
364 { } /* terminate list */
366 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
369 /* Offsets to the device registers. */
370 enum register_offsets {
371 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
373 IntrStatus=0x0C, IntrEnable=0x0E,
374 MulticastFilter0=0x10, MulticastFilter1=0x14,
375 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
376 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
377 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
378 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
379 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
380 StickyHW=0x83, IntrStatus2=0x84,
381 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
382 WOLcrClr1=0xA6, WOLcgClr=0xA7,
383 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
386 /* Bits in ConfigD */
388 BackOptional=0x01, BackModify=0x02,
389 BackCaptureEffect=0x04, BackRandom=0x08
393 /* Registers we check that mmio and reg are the same. */
394 static const int mmio_verify_registers[] = {
395 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
400 /* Bits in the interrupt status/mask registers. */
401 enum intr_status_bits {
402 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
403 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
405 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
406 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
407 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
409 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
410 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
411 IntrTxErrSummary=0x082218,
414 /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
423 /* The Rx and Tx buffer descriptors. */
426 u32 desc_length; /* Chain flag, Buffer/frame length */
432 u32 desc_length; /* Chain flag, Tx Config, Frame length */
437 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
438 #define TXDESC 0x00e08000
440 enum rx_status_bits {
441 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
444 /* Bits in *_desc.*_status */
445 enum desc_status_bits {
449 /* Bits in ChipCmd. */
451 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
452 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
453 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
454 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
457 struct rhine_private {
458 /* Descriptor rings */
459 struct rx_desc *rx_ring;
460 struct tx_desc *tx_ring;
461 dma_addr_t rx_ring_dma;
462 dma_addr_t tx_ring_dma;
464 /* The addresses of receive-in-place skbuffs. */
465 struct sk_buff *rx_skbuff[RX_RING_SIZE];
466 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
468 /* The saved address of a sent-in-place packet/buffer, for later free(). */
469 struct sk_buff *tx_skbuff[TX_RING_SIZE];
470 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
472 /* Tx bounce buffers */
473 unsigned char *tx_buf[TX_RING_SIZE];
474 unsigned char *tx_bufs;
475 dma_addr_t tx_bufs_dma;
477 struct pci_dev *pdev;
479 struct net_device_stats stats;
482 /* Frequently used values: keep some adjacent for cache effect. */
484 struct rx_desc *rx_head_desc;
485 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
486 unsigned int cur_tx, dirty_tx;
487 unsigned int rx_buf_sz; /* Based on MTU+slack. */
490 u8 tx_thresh, rx_thresh;
492 struct mii_if_info mii_if;
496 static int mdio_read(struct net_device *dev, int phy_id, int location);
497 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
498 static int rhine_open(struct net_device *dev);
499 static void rhine_tx_timeout(struct net_device *dev);
500 static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
501 static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
502 static void rhine_tx(struct net_device *dev);
503 static void rhine_rx(struct net_device *dev);
504 static void rhine_error(struct net_device *dev, int intr_status);
505 static void rhine_set_rx_mode(struct net_device *dev);
506 static struct net_device_stats *rhine_get_stats(struct net_device *dev);
507 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
508 static struct ethtool_ops netdev_ethtool_ops;
509 static int rhine_close(struct net_device *dev);
510 static void rhine_shutdown (struct pci_dev *pdev);
512 #define RHINE_WAIT_FOR(condition) do { \
514 while (!(condition) && --i) \
516 if (debug > 1 && i < 512) \
517 printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \
518 DRV_NAME, 1024-i, __func__, __LINE__); \
521 static inline u32 get_intr_status(struct net_device *dev)
523 struct rhine_private *rp = netdev_priv(dev);
524 void __iomem *ioaddr = rp->base;
527 intr_status = ioread16(ioaddr + IntrStatus);
528 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
529 if (rp->quirks & rqStatusWBRace)
530 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
535 * Get power related registers into sane state.
536 * Notify user about past WOL event.
538 static void rhine_power_init(struct net_device *dev)
540 struct rhine_private *rp = netdev_priv(dev);
541 void __iomem *ioaddr = rp->base;
544 if (rp->quirks & rqWOL) {
545 /* Make sure chip is in power state D0 */
546 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
548 /* Disable "force PME-enable" */
549 iowrite8(0x80, ioaddr + WOLcgClr);
551 /* Clear power-event config bits (WOL) */
552 iowrite8(0xFF, ioaddr + WOLcrClr);
553 /* More recent cards can manage two additional patterns */
554 if (rp->quirks & rq6patterns)
555 iowrite8(0x03, ioaddr + WOLcrClr1);
557 /* Save power-event status bits */
558 wolstat = ioread8(ioaddr + PwrcsrSet);
559 if (rp->quirks & rq6patterns)
560 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
562 /* Clear power-event status bits */
563 iowrite8(0xFF, ioaddr + PwrcsrClr);
564 if (rp->quirks & rq6patterns)
565 iowrite8(0x03, ioaddr + PwrcsrClr1);
571 reason = "Magic packet";
574 reason = "Link went up";
577 reason = "Link went down";
580 reason = "Unicast packet";
583 reason = "Multicast/broadcast packet";
588 printk(KERN_INFO "%s: Woke system up. Reason: %s.\n",
594 static void rhine_chip_reset(struct net_device *dev)
596 struct rhine_private *rp = netdev_priv(dev);
597 void __iomem *ioaddr = rp->base;
599 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
602 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
603 printk(KERN_INFO "%s: Reset not complete yet. "
604 "Trying harder.\n", DRV_NAME);
607 if (rp->quirks & rqForceReset)
608 iowrite8(0x40, ioaddr + MiscCmd);
610 /* Reset can take somewhat longer (rare) */
611 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
615 printk(KERN_INFO "%s: Reset %s.\n", dev->name,
616 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
617 "failed" : "succeeded");
621 static void enable_mmio(long pioaddr, u32 quirks)
624 if (quirks & rqRhineI) {
625 /* More recent docs say that this bit is reserved ... */
626 n = inb(pioaddr + ConfigA) | 0x20;
627 outb(n, pioaddr + ConfigA);
629 n = inb(pioaddr + ConfigD) | 0x80;
630 outb(n, pioaddr + ConfigD);
636 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
637 * (plus 0x6C for Rhine-I/II)
639 static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
641 struct rhine_private *rp = netdev_priv(dev);
642 void __iomem *ioaddr = rp->base;
644 outb(0x20, pioaddr + MACRegEEcsr);
645 RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
649 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
650 * MMIO. If reloading EEPROM was done first this could be avoided, but
651 * it is not known if that still works with the "win98-reboot" problem.
653 enable_mmio(pioaddr, rp->quirks);
656 /* Turn off EEPROM-controlled wake-up (magic packet) */
657 if (rp->quirks & rqWOL)
658 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
662 #ifdef CONFIG_NET_POLL_CONTROLLER
663 static void rhine_poll(struct net_device *dev)
665 disable_irq(dev->irq);
666 rhine_interrupt(dev->irq, (void *)dev, NULL);
667 enable_irq(dev->irq);
671 static void rhine_hw_init(struct net_device *dev, long pioaddr)
673 struct rhine_private *rp = netdev_priv(dev);
675 /* Reset the chip to erase previous misconfiguration. */
676 rhine_chip_reset(dev);
678 /* Rhine-I needs extra time to recuperate before EEPROM reload */
679 if (rp->quirks & rqRhineI)
682 /* Reload EEPROM controlled bytes cleared by soft reset */
683 rhine_reload_eeprom(pioaddr, dev);
686 static int __devinit rhine_init_one(struct pci_dev *pdev,
687 const struct pci_device_id *ent)
689 struct net_device *dev;
690 struct rhine_private *rp;
696 void __iomem *ioaddr;
705 /* when built into the kernel, we only print version if device is found */
707 static int printed_version;
708 if (!printed_version++)
712 pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
718 if (pci_rev < VTunknown0) {
722 else if (pci_rev >= VT6102) {
723 quirks = rqWOL | rqForceReset;
724 if (pci_rev < VT6105) {
726 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
729 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
730 if (pci_rev >= VT6105_B0)
731 quirks |= rq6patterns;
732 if (pci_rev < VT6105M)
735 name = "Rhine III (Management Adapter)";
739 rc = pci_enable_device(pdev);
743 /* this should always be supported */
744 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
746 printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
752 if ((pci_resource_len(pdev, 0) < io_size) ||
753 (pci_resource_len(pdev, 1) < io_size)) {
755 printk(KERN_ERR "Insufficient PCI resources, aborting\n");
759 pioaddr = pci_resource_start(pdev, 0);
760 memaddr = pci_resource_start(pdev, 1);
762 pci_set_master(pdev);
764 dev = alloc_etherdev(sizeof(struct rhine_private));
767 printk(KERN_ERR "alloc_etherdev failed\n");
770 SET_MODULE_OWNER(dev);
771 SET_NETDEV_DEV(dev, &pdev->dev);
773 rp = netdev_priv(dev);
775 rp->pioaddr = pioaddr;
778 rc = pci_request_regions(pdev, DRV_NAME);
780 goto err_out_free_netdev;
782 ioaddr = pci_iomap(pdev, bar, io_size);
785 printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
786 "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
787 goto err_out_free_res;
791 enable_mmio(pioaddr, quirks);
793 /* Check that selected MMIO registers match the PIO ones */
795 while (mmio_verify_registers[i]) {
796 int reg = mmio_verify_registers[i++];
797 unsigned char a = inb(pioaddr+reg);
798 unsigned char b = readb(ioaddr+reg);
801 printk(KERN_ERR "MMIO do not match PIO [%02x] "
802 "(%02x != %02x)\n", reg, a, b);
806 #endif /* USE_MMIO */
808 dev->base_addr = (unsigned long)ioaddr;
811 /* Get chip registers into a sane state */
812 rhine_power_init(dev);
813 rhine_hw_init(dev, pioaddr);
815 for (i = 0; i < 6; i++)
816 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
817 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
819 if (!is_valid_ether_addr(dev->perm_addr)) {
821 printk(KERN_ERR "Invalid MAC address\n");
825 /* For Rhine-I/II, phy_id is loaded from EEPROM */
827 phy_id = ioread8(ioaddr + 0x6C);
829 dev->irq = pdev->irq;
831 spin_lock_init(&rp->lock);
832 rp->mii_if.dev = dev;
833 rp->mii_if.mdio_read = mdio_read;
834 rp->mii_if.mdio_write = mdio_write;
835 rp->mii_if.phy_id_mask = 0x1f;
836 rp->mii_if.reg_num_mask = 0x1f;
838 /* The chip-specific entries in the device structure. */
839 dev->open = rhine_open;
840 dev->hard_start_xmit = rhine_start_tx;
841 dev->stop = rhine_close;
842 dev->get_stats = rhine_get_stats;
843 dev->set_multicast_list = rhine_set_rx_mode;
844 dev->do_ioctl = netdev_ioctl;
845 dev->ethtool_ops = &netdev_ethtool_ops;
846 dev->tx_timeout = rhine_tx_timeout;
847 dev->watchdog_timeo = TX_TIMEOUT;
848 #ifdef CONFIG_NET_POLL_CONTROLLER
849 dev->poll_controller = rhine_poll;
851 if (rp->quirks & rqRhineI)
852 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
854 /* dev->name not defined before register_netdev()! */
855 rc = register_netdev(dev);
859 printk(KERN_INFO "%s: VIA %s at 0x%lx, ",
868 for (i = 0; i < 5; i++)
869 printk("%2.2x:", dev->dev_addr[i]);
870 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
872 pci_set_drvdata(pdev, dev);
876 int mii_status = mdio_read(dev, phy_id, 1);
877 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
878 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
879 if (mii_status != 0xffff && mii_status != 0x0000) {
880 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
881 printk(KERN_INFO "%s: MII PHY found at address "
882 "%d, status 0x%4.4x advertising %4.4x "
883 "Link %4.4x.\n", dev->name, phy_id,
884 mii_status, rp->mii_if.advertising,
885 mdio_read(dev, phy_id, 5));
887 /* set IFF_RUNNING */
888 if (mii_status & BMSR_LSTATUS)
889 netif_carrier_on(dev);
891 netif_carrier_off(dev);
895 rp->mii_if.phy_id = phy_id;
900 pci_iounmap(pdev, ioaddr);
902 pci_release_regions(pdev);
909 static int alloc_ring(struct net_device* dev)
911 struct rhine_private *rp = netdev_priv(dev);
915 ring = pci_alloc_consistent(rp->pdev,
916 RX_RING_SIZE * sizeof(struct rx_desc) +
917 TX_RING_SIZE * sizeof(struct tx_desc),
920 printk(KERN_ERR "Could not allocate DMA memory.\n");
923 if (rp->quirks & rqRhineI) {
924 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
925 PKT_BUF_SZ * TX_RING_SIZE,
927 if (rp->tx_bufs == NULL) {
928 pci_free_consistent(rp->pdev,
929 RX_RING_SIZE * sizeof(struct rx_desc) +
930 TX_RING_SIZE * sizeof(struct tx_desc),
937 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
938 rp->rx_ring_dma = ring_dma;
939 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
944 static void free_ring(struct net_device* dev)
946 struct rhine_private *rp = netdev_priv(dev);
948 pci_free_consistent(rp->pdev,
949 RX_RING_SIZE * sizeof(struct rx_desc) +
950 TX_RING_SIZE * sizeof(struct tx_desc),
951 rp->rx_ring, rp->rx_ring_dma);
955 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
956 rp->tx_bufs, rp->tx_bufs_dma);
962 static void alloc_rbufs(struct net_device *dev)
964 struct rhine_private *rp = netdev_priv(dev);
968 rp->dirty_rx = rp->cur_rx = 0;
970 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
971 rp->rx_head_desc = &rp->rx_ring[0];
972 next = rp->rx_ring_dma;
974 /* Init the ring entries */
975 for (i = 0; i < RX_RING_SIZE; i++) {
976 rp->rx_ring[i].rx_status = 0;
977 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
978 next += sizeof(struct rx_desc);
979 rp->rx_ring[i].next_desc = cpu_to_le32(next);
980 rp->rx_skbuff[i] = NULL;
982 /* Mark the last entry as wrapping the ring. */
983 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
985 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
986 for (i = 0; i < RX_RING_SIZE; i++) {
987 struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz);
988 rp->rx_skbuff[i] = skb;
991 skb->dev = dev; /* Mark as being used by this device. */
993 rp->rx_skbuff_dma[i] =
994 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
997 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
998 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1000 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1003 static void free_rbufs(struct net_device* dev)
1005 struct rhine_private *rp = netdev_priv(dev);
1008 /* Free all the skbuffs in the Rx queue. */
1009 for (i = 0; i < RX_RING_SIZE; i++) {
1010 rp->rx_ring[i].rx_status = 0;
1011 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1012 if (rp->rx_skbuff[i]) {
1013 pci_unmap_single(rp->pdev,
1014 rp->rx_skbuff_dma[i],
1015 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1016 dev_kfree_skb(rp->rx_skbuff[i]);
1018 rp->rx_skbuff[i] = NULL;
1022 static void alloc_tbufs(struct net_device* dev)
1024 struct rhine_private *rp = netdev_priv(dev);
1028 rp->dirty_tx = rp->cur_tx = 0;
1029 next = rp->tx_ring_dma;
1030 for (i = 0; i < TX_RING_SIZE; i++) {
1031 rp->tx_skbuff[i] = NULL;
1032 rp->tx_ring[i].tx_status = 0;
1033 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1034 next += sizeof(struct tx_desc);
1035 rp->tx_ring[i].next_desc = cpu_to_le32(next);
1036 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1038 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1042 static void free_tbufs(struct net_device* dev)
1044 struct rhine_private *rp = netdev_priv(dev);
1047 for (i = 0; i < TX_RING_SIZE; i++) {
1048 rp->tx_ring[i].tx_status = 0;
1049 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1050 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1051 if (rp->tx_skbuff[i]) {
1052 if (rp->tx_skbuff_dma[i]) {
1053 pci_unmap_single(rp->pdev,
1054 rp->tx_skbuff_dma[i],
1055 rp->tx_skbuff[i]->len,
1058 dev_kfree_skb(rp->tx_skbuff[i]);
1060 rp->tx_skbuff[i] = NULL;
1061 rp->tx_buf[i] = NULL;
1065 static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1067 struct rhine_private *rp = netdev_priv(dev);
1068 void __iomem *ioaddr = rp->base;
1070 mii_check_media(&rp->mii_if, debug, init_media);
1072 if (rp->mii_if.full_duplex)
1073 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1076 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1080 static void init_registers(struct net_device *dev)
1082 struct rhine_private *rp = netdev_priv(dev);
1083 void __iomem *ioaddr = rp->base;
1086 for (i = 0; i < 6; i++)
1087 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1089 /* Initialize other registers. */
1090 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1091 /* Configure initial FIFO thresholds. */
1092 iowrite8(0x20, ioaddr + TxConfig);
1093 rp->tx_thresh = 0x20;
1094 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1096 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1097 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1099 rhine_set_rx_mode(dev);
1101 /* Enable interrupts by setting the interrupt mask. */
1102 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1103 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1104 IntrTxDone | IntrTxError | IntrTxUnderrun |
1105 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1106 ioaddr + IntrEnable);
1108 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1110 rhine_check_media(dev, 1);
1113 /* Enable MII link status auto-polling (required for IntrLinkChange) */
1114 static void rhine_enable_linkmon(void __iomem *ioaddr)
1116 iowrite8(0, ioaddr + MIICmd);
1117 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1118 iowrite8(0x80, ioaddr + MIICmd);
1120 RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1122 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1125 /* Disable MII link status auto-polling (required for MDIO access) */
1126 static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1128 iowrite8(0, ioaddr + MIICmd);
1130 if (quirks & rqRhineI) {
1131 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1133 /* Can be called from ISR. Evil. */
1136 /* 0x80 must be set immediately before turning it off */
1137 iowrite8(0x80, ioaddr + MIICmd);
1139 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1141 /* Heh. Now clear 0x80 again. */
1142 iowrite8(0, ioaddr + MIICmd);
1145 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1148 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1150 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1152 struct rhine_private *rp = netdev_priv(dev);
1153 void __iomem *ioaddr = rp->base;
1156 rhine_disable_linkmon(ioaddr, rp->quirks);
1158 /* rhine_disable_linkmon already cleared MIICmd */
1159 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1160 iowrite8(regnum, ioaddr + MIIRegAddr);
1161 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1162 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1163 result = ioread16(ioaddr + MIIData);
1165 rhine_enable_linkmon(ioaddr);
1169 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1171 struct rhine_private *rp = netdev_priv(dev);
1172 void __iomem *ioaddr = rp->base;
1174 rhine_disable_linkmon(ioaddr, rp->quirks);
1176 /* rhine_disable_linkmon already cleared MIICmd */
1177 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1178 iowrite8(regnum, ioaddr + MIIRegAddr);
1179 iowrite16(value, ioaddr + MIIData);
1180 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1181 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1183 rhine_enable_linkmon(ioaddr);
1186 static int rhine_open(struct net_device *dev)
1188 struct rhine_private *rp = netdev_priv(dev);
1189 void __iomem *ioaddr = rp->base;
1192 rc = request_irq(rp->pdev->irq, &rhine_interrupt, SA_SHIRQ, dev->name,
1198 printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
1199 dev->name, rp->pdev->irq);
1201 rc = alloc_ring(dev);
1203 free_irq(rp->pdev->irq, dev);
1208 rhine_chip_reset(dev);
1209 init_registers(dev);
1211 printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
1212 "MII status: %4.4x.\n",
1213 dev->name, ioread16(ioaddr + ChipCmd),
1214 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1216 netif_start_queue(dev);
1221 static void rhine_tx_timeout(struct net_device *dev)
1223 struct rhine_private *rp = netdev_priv(dev);
1224 void __iomem *ioaddr = rp->base;
1226 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1227 "%4.4x, resetting...\n",
1228 dev->name, ioread16(ioaddr + IntrStatus),
1229 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1231 /* protect against concurrent rx interrupts */
1232 disable_irq(rp->pdev->irq);
1234 spin_lock(&rp->lock);
1236 /* clear all descriptors */
1242 /* Reinitialize the hardware. */
1243 rhine_chip_reset(dev);
1244 init_registers(dev);
1246 spin_unlock(&rp->lock);
1247 enable_irq(rp->pdev->irq);
1249 dev->trans_start = jiffies;
1250 rp->stats.tx_errors++;
1251 netif_wake_queue(dev);
1254 static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1256 struct rhine_private *rp = netdev_priv(dev);
1257 void __iomem *ioaddr = rp->base;
1260 /* Caution: the write order is important here, set the field
1261 with the "ownership" bits last. */
1263 /* Calculate the next Tx descriptor entry. */
1264 entry = rp->cur_tx % TX_RING_SIZE;
1266 if (skb->len < ETH_ZLEN) {
1267 skb = skb_padto(skb, ETH_ZLEN);
1272 rp->tx_skbuff[entry] = skb;
1274 if ((rp->quirks & rqRhineI) &&
1275 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) {
1276 /* Must use alignment buffer. */
1277 if (skb->len > PKT_BUF_SZ) {
1278 /* packet too long, drop it */
1280 rp->tx_skbuff[entry] = NULL;
1281 rp->stats.tx_dropped++;
1284 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1285 rp->tx_skbuff_dma[entry] = 0;
1286 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1287 (rp->tx_buf[entry] -
1290 rp->tx_skbuff_dma[entry] =
1291 pci_map_single(rp->pdev, skb->data, skb->len,
1293 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1296 rp->tx_ring[entry].desc_length =
1297 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1300 spin_lock_irq(&rp->lock);
1302 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1307 /* Non-x86 Todo: explicitly flush cache lines here. */
1309 /* Wake the potentially-idle transmit channel */
1310 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1314 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1315 netif_stop_queue(dev);
1317 dev->trans_start = jiffies;
1319 spin_unlock_irq(&rp->lock);
1322 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1323 dev->name, rp->cur_tx-1, entry);
1328 /* The interrupt handler does all of the Rx thread work and cleans up
1329 after the Tx thread. */
1330 static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1332 struct net_device *dev = dev_instance;
1333 struct rhine_private *rp = netdev_priv(dev);
1334 void __iomem *ioaddr = rp->base;
1336 int boguscnt = max_interrupt_work;
1339 while ((intr_status = get_intr_status(dev))) {
1342 /* Acknowledge all of the current interrupt sources ASAP. */
1343 if (intr_status & IntrTxDescRace)
1344 iowrite8(0x08, ioaddr + IntrStatus2);
1345 iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1349 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1350 dev->name, intr_status);
1352 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1353 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
1356 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1357 if (intr_status & IntrTxErrSummary) {
1358 /* Avoid scavenging before Tx engine turned off */
1359 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1361 ioread8(ioaddr+ChipCmd) & CmdTxOn)
1362 printk(KERN_WARNING "%s: "
1363 "rhine_interrupt() Tx engine"
1364 "still on.\n", dev->name);
1369 /* Abnormal error summary/uncommon events handlers. */
1370 if (intr_status & (IntrPCIErr | IntrLinkChange |
1371 IntrStatsMax | IntrTxError | IntrTxAborted |
1372 IntrTxUnderrun | IntrTxDescRace))
1373 rhine_error(dev, intr_status);
1375 if (--boguscnt < 0) {
1376 printk(KERN_WARNING "%s: Too much work at interrupt, "
1378 dev->name, intr_status);
1384 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1385 dev->name, ioread16(ioaddr + IntrStatus));
1386 return IRQ_RETVAL(handled);
1389 /* This routine is logically part of the interrupt handler, but isolated
1391 static void rhine_tx(struct net_device *dev)
1393 struct rhine_private *rp = netdev_priv(dev);
1394 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1396 spin_lock(&rp->lock);
1398 /* find and cleanup dirty tx descriptors */
1399 while (rp->dirty_tx != rp->cur_tx) {
1400 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1402 printk(KERN_DEBUG "Tx scavenge %d status %8.8x.\n",
1404 if (txstatus & DescOwn)
1406 if (txstatus & 0x8000) {
1408 printk(KERN_DEBUG "%s: Transmit error, "
1409 "Tx status %8.8x.\n",
1410 dev->name, txstatus);
1411 rp->stats.tx_errors++;
1412 if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;
1413 if (txstatus & 0x0200) rp->stats.tx_window_errors++;
1414 if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;
1415 if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;
1416 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1417 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1418 rp->stats.tx_fifo_errors++;
1419 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1420 break; /* Keep the skb - we try again */
1422 /* Transmitter restarted in 'abnormal' handler. */
1424 if (rp->quirks & rqRhineI)
1425 rp->stats.collisions += (txstatus >> 3) & 0x0F;
1427 rp->stats.collisions += txstatus & 0x0F;
1429 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1430 (txstatus >> 3) & 0xF,
1432 rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1433 rp->stats.tx_packets++;
1435 /* Free the original skb. */
1436 if (rp->tx_skbuff_dma[entry]) {
1437 pci_unmap_single(rp->pdev,
1438 rp->tx_skbuff_dma[entry],
1439 rp->tx_skbuff[entry]->len,
1442 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1443 rp->tx_skbuff[entry] = NULL;
1444 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1446 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1447 netif_wake_queue(dev);
1449 spin_unlock(&rp->lock);
1452 /* This routine is logically part of the interrupt handler, but isolated
1453 for clarity and better register allocation. */
1454 static void rhine_rx(struct net_device *dev)
1456 struct rhine_private *rp = netdev_priv(dev);
1457 int entry = rp->cur_rx % RX_RING_SIZE;
1458 int boguscnt = rp->dirty_rx + RX_RING_SIZE - rp->cur_rx;
1461 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
1463 le32_to_cpu(rp->rx_head_desc->rx_status));
1466 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1467 while (!(rp->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
1468 struct rx_desc *desc = rp->rx_head_desc;
1469 u32 desc_status = le32_to_cpu(desc->rx_status);
1470 int data_size = desc_status >> 16;
1473 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
1477 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1478 if ((desc_status & RxWholePkt) != RxWholePkt) {
1479 printk(KERN_WARNING "%s: Oversized Ethernet "
1480 "frame spanned multiple buffers, entry "
1481 "%#x length %d status %8.8x!\n",
1482 dev->name, entry, data_size,
1484 printk(KERN_WARNING "%s: Oversized Ethernet "
1485 "frame %p vs %p.\n", dev->name,
1486 rp->rx_head_desc, &rp->rx_ring[entry]);
1487 rp->stats.rx_length_errors++;
1488 } else if (desc_status & RxErr) {
1489 /* There was a error. */
1491 printk(KERN_DEBUG "rhine_rx() Rx "
1492 "error was %8.8x.\n",
1494 rp->stats.rx_errors++;
1495 if (desc_status & 0x0030) rp->stats.rx_length_errors++;
1496 if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;
1497 if (desc_status & 0x0004) rp->stats.rx_frame_errors++;
1498 if (desc_status & 0x0002) {
1499 /* this can also be updated outside the interrupt handler */
1500 spin_lock(&rp->lock);
1501 rp->stats.rx_crc_errors++;
1502 spin_unlock(&rp->lock);
1506 struct sk_buff *skb;
1507 /* Length should omit the CRC */
1508 int pkt_len = data_size - 4;
1510 /* Check if the packet is long enough to accept without
1511 copying to a minimally-sized skbuff. */
1512 if (pkt_len < rx_copybreak &&
1513 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1515 skb_reserve(skb, 2); /* 16 byte align the IP header */
1516 pci_dma_sync_single_for_cpu(rp->pdev,
1517 rp->rx_skbuff_dma[entry],
1519 PCI_DMA_FROMDEVICE);
1521 eth_copy_and_sum(skb,
1522 rp->rx_skbuff[entry]->data,
1524 skb_put(skb, pkt_len);
1525 pci_dma_sync_single_for_device(rp->pdev,
1526 rp->rx_skbuff_dma[entry],
1528 PCI_DMA_FROMDEVICE);
1530 skb = rp->rx_skbuff[entry];
1532 printk(KERN_ERR "%s: Inconsistent Rx "
1533 "descriptor chain.\n",
1537 rp->rx_skbuff[entry] = NULL;
1538 skb_put(skb, pkt_len);
1539 pci_unmap_single(rp->pdev,
1540 rp->rx_skbuff_dma[entry],
1542 PCI_DMA_FROMDEVICE);
1544 skb->protocol = eth_type_trans(skb, dev);
1546 dev->last_rx = jiffies;
1547 rp->stats.rx_bytes += pkt_len;
1548 rp->stats.rx_packets++;
1550 entry = (++rp->cur_rx) % RX_RING_SIZE;
1551 rp->rx_head_desc = &rp->rx_ring[entry];
1554 /* Refill the Rx ring buffers. */
1555 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1556 struct sk_buff *skb;
1557 entry = rp->dirty_rx % RX_RING_SIZE;
1558 if (rp->rx_skbuff[entry] == NULL) {
1559 skb = dev_alloc_skb(rp->rx_buf_sz);
1560 rp->rx_skbuff[entry] = skb;
1562 break; /* Better luck next round. */
1563 skb->dev = dev; /* Mark as being used by this device. */
1564 rp->rx_skbuff_dma[entry] =
1565 pci_map_single(rp->pdev, skb->data,
1567 PCI_DMA_FROMDEVICE);
1568 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1570 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1575 * Clears the "tally counters" for CRC errors and missed frames(?).
1576 * It has been reported that some chips need a write of 0 to clear
1577 * these, for others the counters are set to 1 when written to and
1578 * instead cleared when read. So we clear them both ways ...
1580 static inline void clear_tally_counters(void __iomem *ioaddr)
1582 iowrite32(0, ioaddr + RxMissed);
1583 ioread16(ioaddr + RxCRCErrs);
1584 ioread16(ioaddr + RxMissed);
1587 static void rhine_restart_tx(struct net_device *dev) {
1588 struct rhine_private *rp = netdev_priv(dev);
1589 void __iomem *ioaddr = rp->base;
1590 int entry = rp->dirty_tx % TX_RING_SIZE;
1594 * If new errors occured, we need to sort them out before doing Tx.
1595 * In that case the ISR will be back here RSN anyway.
1597 intr_status = get_intr_status(dev);
1599 if ((intr_status & IntrTxErrSummary) == 0) {
1601 /* We know better than the chip where it should continue. */
1602 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1603 ioaddr + TxRingPtr);
1605 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1607 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1612 /* This should never happen */
1614 printk(KERN_WARNING "%s: rhine_restart_tx() "
1615 "Another error occured %8.8x.\n",
1616 dev->name, intr_status);
1621 static void rhine_error(struct net_device *dev, int intr_status)
1623 struct rhine_private *rp = netdev_priv(dev);
1624 void __iomem *ioaddr = rp->base;
1626 spin_lock(&rp->lock);
1628 if (intr_status & IntrLinkChange)
1629 rhine_check_media(dev, 0);
1630 if (intr_status & IntrStatsMax) {
1631 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1632 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1633 clear_tally_counters(ioaddr);
1635 if (intr_status & IntrTxAborted) {
1637 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1638 dev->name, intr_status);
1640 if (intr_status & IntrTxUnderrun) {
1641 if (rp->tx_thresh < 0xE0)
1642 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1644 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1645 "threshold now %2.2x.\n",
1646 dev->name, rp->tx_thresh);
1648 if (intr_status & IntrTxDescRace) {
1650 printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1653 if ((intr_status & IntrTxError) &&
1654 (intr_status & (IntrTxAborted |
1655 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1656 if (rp->tx_thresh < 0xE0) {
1657 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1660 printk(KERN_INFO "%s: Unspecified error. Tx "
1661 "threshold now %2.2x.\n",
1662 dev->name, rp->tx_thresh);
1664 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1666 rhine_restart_tx(dev);
1668 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1669 IntrTxError | IntrTxAborted | IntrNormalSummary |
1672 printk(KERN_ERR "%s: Something Wicked happened! "
1673 "%8.8x.\n", dev->name, intr_status);
1676 spin_unlock(&rp->lock);
1679 static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1681 struct rhine_private *rp = netdev_priv(dev);
1682 void __iomem *ioaddr = rp->base;
1683 unsigned long flags;
1685 spin_lock_irqsave(&rp->lock, flags);
1686 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1687 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1688 clear_tally_counters(ioaddr);
1689 spin_unlock_irqrestore(&rp->lock, flags);
1694 static void rhine_set_rx_mode(struct net_device *dev)
1696 struct rhine_private *rp = netdev_priv(dev);
1697 void __iomem *ioaddr = rp->base;
1698 u32 mc_filter[2]; /* Multicast hash filter */
1699 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1701 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1702 /* Unconditionally log net taps. */
1703 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
1706 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1707 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1708 } else if ((dev->mc_count > multicast_filter_limit)
1709 || (dev->flags & IFF_ALLMULTI)) {
1710 /* Too many to match, or accept all multicasts. */
1711 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1712 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1715 struct dev_mc_list *mclist;
1717 memset(mc_filter, 0, sizeof(mc_filter));
1718 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1719 i++, mclist = mclist->next) {
1720 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1722 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1724 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1725 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1728 iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1731 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1733 struct rhine_private *rp = netdev_priv(dev);
1735 strcpy(info->driver, DRV_NAME);
1736 strcpy(info->version, DRV_VERSION);
1737 strcpy(info->bus_info, pci_name(rp->pdev));
1740 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1742 struct rhine_private *rp = netdev_priv(dev);
1745 spin_lock_irq(&rp->lock);
1746 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1747 spin_unlock_irq(&rp->lock);
1752 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1754 struct rhine_private *rp = netdev_priv(dev);
1757 spin_lock_irq(&rp->lock);
1758 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1759 spin_unlock_irq(&rp->lock);
1764 static int netdev_nway_reset(struct net_device *dev)
1766 struct rhine_private *rp = netdev_priv(dev);
1768 return mii_nway_restart(&rp->mii_if);
1771 static u32 netdev_get_link(struct net_device *dev)
1773 struct rhine_private *rp = netdev_priv(dev);
1775 return mii_link_ok(&rp->mii_if);
1778 static u32 netdev_get_msglevel(struct net_device *dev)
1783 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1788 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1790 struct rhine_private *rp = netdev_priv(dev);
1792 if (!(rp->quirks & rqWOL))
1795 spin_lock_irq(&rp->lock);
1796 wol->supported = WAKE_PHY | WAKE_MAGIC |
1797 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1798 wol->wolopts = rp->wolopts;
1799 spin_unlock_irq(&rp->lock);
1802 static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1804 struct rhine_private *rp = netdev_priv(dev);
1805 u32 support = WAKE_PHY | WAKE_MAGIC |
1806 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1808 if (!(rp->quirks & rqWOL))
1811 if (wol->wolopts & ~support)
1814 spin_lock_irq(&rp->lock);
1815 rp->wolopts = wol->wolopts;
1816 spin_unlock_irq(&rp->lock);
1821 static struct ethtool_ops netdev_ethtool_ops = {
1822 .get_drvinfo = netdev_get_drvinfo,
1823 .get_settings = netdev_get_settings,
1824 .set_settings = netdev_set_settings,
1825 .nway_reset = netdev_nway_reset,
1826 .get_link = netdev_get_link,
1827 .get_msglevel = netdev_get_msglevel,
1828 .set_msglevel = netdev_set_msglevel,
1829 .get_wol = rhine_get_wol,
1830 .set_wol = rhine_set_wol,
1831 .get_sg = ethtool_op_get_sg,
1832 .get_tx_csum = ethtool_op_get_tx_csum,
1833 .get_perm_addr = ethtool_op_get_perm_addr,
1836 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1838 struct rhine_private *rp = netdev_priv(dev);
1841 if (!netif_running(dev))
1844 spin_lock_irq(&rp->lock);
1845 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1846 spin_unlock_irq(&rp->lock);
1851 static int rhine_close(struct net_device *dev)
1853 struct rhine_private *rp = netdev_priv(dev);
1854 void __iomem *ioaddr = rp->base;
1856 spin_lock_irq(&rp->lock);
1858 netif_stop_queue(dev);
1861 printk(KERN_DEBUG "%s: Shutting down ethercard, "
1862 "status was %4.4x.\n",
1863 dev->name, ioread16(ioaddr + ChipCmd));
1865 /* Switch to loopback mode to avoid hardware races. */
1866 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1868 /* Disable interrupts by clearing the interrupt mask. */
1869 iowrite16(0x0000, ioaddr + IntrEnable);
1871 /* Stop the chip's Tx and Rx processes. */
1872 iowrite16(CmdStop, ioaddr + ChipCmd);
1874 spin_unlock_irq(&rp->lock);
1876 free_irq(rp->pdev->irq, dev);
1885 static void __devexit rhine_remove_one(struct pci_dev *pdev)
1887 struct net_device *dev = pci_get_drvdata(pdev);
1888 struct rhine_private *rp = netdev_priv(dev);
1890 unregister_netdev(dev);
1892 pci_iounmap(pdev, rp->base);
1893 pci_release_regions(pdev);
1896 pci_disable_device(pdev);
1897 pci_set_drvdata(pdev, NULL);
1900 static void rhine_shutdown (struct pci_dev *pdev)
1902 struct net_device *dev = pci_get_drvdata(pdev);
1903 struct rhine_private *rp = netdev_priv(dev);
1904 void __iomem *ioaddr = rp->base;
1906 if (!(rp->quirks & rqWOL))
1907 return; /* Nothing to do for non-WOL adapters */
1909 rhine_power_init(dev);
1911 /* Make sure we use pattern 0, 1 and not 4, 5 */
1912 if (rp->quirks & rq6patterns)
1913 iowrite8(0x04, ioaddr + 0xA7);
1915 if (rp->wolopts & WAKE_MAGIC) {
1916 iowrite8(WOLmagic, ioaddr + WOLcrSet);
1918 * Turn EEPROM-controlled wake-up back on -- some hardware may
1919 * not cooperate otherwise.
1921 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
1924 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
1925 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
1927 if (rp->wolopts & WAKE_PHY)
1928 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
1930 if (rp->wolopts & WAKE_UCAST)
1931 iowrite8(WOLucast, ioaddr + WOLcrSet);
1934 /* Enable legacy WOL (for old motherboards) */
1935 iowrite8(0x01, ioaddr + PwcfgSet);
1936 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
1939 /* Hit power state D3 (sleep) */
1940 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1942 /* TODO: Check use of pci_enable_wake() */
1947 static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
1949 struct net_device *dev = pci_get_drvdata(pdev);
1950 struct rhine_private *rp = netdev_priv(dev);
1951 unsigned long flags;
1953 if (!netif_running(dev))
1956 netif_device_detach(dev);
1957 pci_save_state(pdev);
1959 spin_lock_irqsave(&rp->lock, flags);
1960 rhine_shutdown(pdev);
1961 spin_unlock_irqrestore(&rp->lock, flags);
1963 free_irq(dev->irq, dev);
1967 static int rhine_resume(struct pci_dev *pdev)
1969 struct net_device *dev = pci_get_drvdata(pdev);
1970 struct rhine_private *rp = netdev_priv(dev);
1971 unsigned long flags;
1974 if (!netif_running(dev))
1977 if (request_irq(dev->irq, rhine_interrupt, SA_SHIRQ, dev->name, dev))
1978 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
1980 ret = pci_set_power_state(pdev, PCI_D0);
1982 printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
1983 dev->name, ret ? "failed" : "succeeded", ret);
1985 pci_restore_state(pdev);
1987 spin_lock_irqsave(&rp->lock, flags);
1989 enable_mmio(rp->pioaddr, rp->quirks);
1991 rhine_power_init(dev);
1996 init_registers(dev);
1997 spin_unlock_irqrestore(&rp->lock, flags);
1999 netif_device_attach(dev);
2003 #endif /* CONFIG_PM */
2005 static struct pci_driver rhine_driver = {
2007 .id_table = rhine_pci_tbl,
2008 .probe = rhine_init_one,
2009 .remove = __devexit_p(rhine_remove_one),
2011 .suspend = rhine_suspend,
2012 .resume = rhine_resume,
2013 #endif /* CONFIG_PM */
2014 .shutdown = rhine_shutdown,
2018 static int __init rhine_init(void)
2020 /* when a module, this is printed whether or not devices are found in probe */
2024 return pci_module_init(&rhine_driver);
2028 static void __exit rhine_cleanup(void)
2030 pci_unregister_driver(&rhine_driver);
2034 module_init(rhine_init);
2035 module_exit(rhine_cleanup);