]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/e1000/e1000_main.c
e1000: Support for 82571 and 82572 controllers
[linux-2.6-omap-h63xx.git] / drivers / net / e1000 / e1000_main.c
1 /*******************************************************************************
2
3   
4   Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5   
6   This program is free software; you can redistribute it and/or modify it 
7   under the terms of the GNU General Public License as published by the Free 
8   Software Foundation; either version 2 of the License, or (at your option) 
9   any later version.
10   
11   This program is distributed in the hope that it will be useful, but WITHOUT 
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for 
14   more details.
15   
16   You should have received a copy of the GNU General Public License along with
17   this program; if not, write to the Free Software Foundation, Inc., 59 
18   Temple Place - Suite 330, Boston, MA  02111-1307, USA.
19   
20   The full GNU General Public License is included in this distribution in the
21   file called LICENSE.
22   
23   Contact Information:
24   Linux NICS <linux.nics@intel.com>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include "e1000.h"
30
31 /* Change Log
32  * 6.0.58       4/20/05
33  *   o Accepted ethtool cleanup patch from Stephen Hemminger 
34  * 6.0.44+      2/15/05
35  *   o applied Anton's patch to resolve tx hang in hardware
36  *   o Applied Andrew Mortons patch - e1000 stops working after resume
37  */
38
39 char e1000_driver_name[] = "e1000";
40 char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
41 #ifndef CONFIG_E1000_NAPI
42 #define DRIVERNAPI
43 #else
44 #define DRIVERNAPI "-NAPI"
45 #endif
46 #define DRV_VERSION             "6.0.60-k2"DRIVERNAPI
47 char e1000_driver_version[] = DRV_VERSION;
48 char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
49
50 /* e1000_pci_tbl - PCI Device ID Table
51  *
52  * Last entry must be all 0s
53  *
54  * Macro expands to...
55  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
56  */
57 static struct pci_device_id e1000_pci_tbl[] = {
58         INTEL_E1000_ETHERNET_DEVICE(0x1000),
59         INTEL_E1000_ETHERNET_DEVICE(0x1001),
60         INTEL_E1000_ETHERNET_DEVICE(0x1004),
61         INTEL_E1000_ETHERNET_DEVICE(0x1008),
62         INTEL_E1000_ETHERNET_DEVICE(0x1009),
63         INTEL_E1000_ETHERNET_DEVICE(0x100C),
64         INTEL_E1000_ETHERNET_DEVICE(0x100D),
65         INTEL_E1000_ETHERNET_DEVICE(0x100E),
66         INTEL_E1000_ETHERNET_DEVICE(0x100F),
67         INTEL_E1000_ETHERNET_DEVICE(0x1010),
68         INTEL_E1000_ETHERNET_DEVICE(0x1011),
69         INTEL_E1000_ETHERNET_DEVICE(0x1012),
70         INTEL_E1000_ETHERNET_DEVICE(0x1013),
71         INTEL_E1000_ETHERNET_DEVICE(0x1014),
72         INTEL_E1000_ETHERNET_DEVICE(0x1015),
73         INTEL_E1000_ETHERNET_DEVICE(0x1016),
74         INTEL_E1000_ETHERNET_DEVICE(0x1017),
75         INTEL_E1000_ETHERNET_DEVICE(0x1018),
76         INTEL_E1000_ETHERNET_DEVICE(0x1019),
77         INTEL_E1000_ETHERNET_DEVICE(0x101A),
78         INTEL_E1000_ETHERNET_DEVICE(0x101D),
79         INTEL_E1000_ETHERNET_DEVICE(0x101E),
80         INTEL_E1000_ETHERNET_DEVICE(0x1026),
81         INTEL_E1000_ETHERNET_DEVICE(0x1027),
82         INTEL_E1000_ETHERNET_DEVICE(0x1028),
83         INTEL_E1000_ETHERNET_DEVICE(0x1075),
84         INTEL_E1000_ETHERNET_DEVICE(0x1076),
85         INTEL_E1000_ETHERNET_DEVICE(0x1077),
86         INTEL_E1000_ETHERNET_DEVICE(0x1078),
87         INTEL_E1000_ETHERNET_DEVICE(0x1079),
88         INTEL_E1000_ETHERNET_DEVICE(0x107A),
89         INTEL_E1000_ETHERNET_DEVICE(0x107B),
90         INTEL_E1000_ETHERNET_DEVICE(0x107C),
91         INTEL_E1000_ETHERNET_DEVICE(0x108A),
92         INTEL_E1000_ETHERNET_DEVICE(0x108B),
93         INTEL_E1000_ETHERNET_DEVICE(0x108C),
94         INTEL_E1000_ETHERNET_DEVICE(0x1099),
95         /* required last entry */
96         {0,}
97 };
98
99 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
100
101 int e1000_up(struct e1000_adapter *adapter);
102 void e1000_down(struct e1000_adapter *adapter);
103 void e1000_reset(struct e1000_adapter *adapter);
104 int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
105 int e1000_setup_tx_resources(struct e1000_adapter *adapter);
106 int e1000_setup_rx_resources(struct e1000_adapter *adapter);
107 void e1000_free_tx_resources(struct e1000_adapter *adapter);
108 void e1000_free_rx_resources(struct e1000_adapter *adapter);
109 void e1000_update_stats(struct e1000_adapter *adapter);
110
111 /* Local Function Prototypes */
112
113 static int e1000_init_module(void);
114 static void e1000_exit_module(void);
115 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
116 static void __devexit e1000_remove(struct pci_dev *pdev);
117 static int e1000_sw_init(struct e1000_adapter *adapter);
118 static int e1000_open(struct net_device *netdev);
119 static int e1000_close(struct net_device *netdev);
120 static void e1000_configure_tx(struct e1000_adapter *adapter);
121 static void e1000_configure_rx(struct e1000_adapter *adapter);
122 static void e1000_setup_rctl(struct e1000_adapter *adapter);
123 static void e1000_clean_tx_ring(struct e1000_adapter *adapter);
124 static void e1000_clean_rx_ring(struct e1000_adapter *adapter);
125 static void e1000_set_multi(struct net_device *netdev);
126 static void e1000_update_phy_info(unsigned long data);
127 static void e1000_watchdog(unsigned long data);
128 static void e1000_watchdog_task(struct e1000_adapter *adapter);
129 static void e1000_82547_tx_fifo_stall(unsigned long data);
130 static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
131 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
132 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
133 static int e1000_set_mac(struct net_device *netdev, void *p);
134 static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs);
135 static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter);
136 #ifdef CONFIG_E1000_NAPI
137 static int e1000_clean(struct net_device *netdev, int *budget);
138 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
139                                     int *work_done, int work_to_do);
140 static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
141                                        int *work_done, int work_to_do);
142 #else
143 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter);
144 static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter);
145 #endif
146 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter);
147 static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter);
148 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
149 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
150                            int cmd);
151 void e1000_set_ethtool_ops(struct net_device *netdev);
152 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
153 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
154 static void e1000_tx_timeout(struct net_device *dev);
155 static void e1000_tx_timeout_task(struct net_device *dev);
156 static void e1000_smartspeed(struct e1000_adapter *adapter);
157 static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
158                                               struct sk_buff *skb);
159
160 static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
161 static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
162 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
163 static void e1000_restore_vlan(struct e1000_adapter *adapter);
164
165 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
166 #ifdef CONFIG_PM
167 static int e1000_resume(struct pci_dev *pdev);
168 #endif
169
170 #ifdef CONFIG_NET_POLL_CONTROLLER
171 /* for netdump / net console */
172 static void e1000_netpoll (struct net_device *netdev);
173 #endif
174
175 /* Exported from other modules */
176
177 extern void e1000_check_options(struct e1000_adapter *adapter);
178
179 static struct pci_driver e1000_driver = {
180         .name     = e1000_driver_name,
181         .id_table = e1000_pci_tbl,
182         .probe    = e1000_probe,
183         .remove   = __devexit_p(e1000_remove),
184         /* Power Managment Hooks */
185 #ifdef CONFIG_PM
186         .suspend  = e1000_suspend,
187         .resume   = e1000_resume
188 #endif
189 };
190
191 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
192 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
193 MODULE_LICENSE("GPL");
194 MODULE_VERSION(DRV_VERSION);
195
196 static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
197 module_param(debug, int, 0);
198 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
199
200 /**
201  * e1000_init_module - Driver Registration Routine
202  *
203  * e1000_init_module is the first routine called when the driver is
204  * loaded. All it does is register with the PCI subsystem.
205  **/
206
207 static int __init
208 e1000_init_module(void)
209 {
210         int ret;
211         printk(KERN_INFO "%s - version %s\n",
212                e1000_driver_string, e1000_driver_version);
213
214         printk(KERN_INFO "%s\n", e1000_copyright);
215
216         ret = pci_module_init(&e1000_driver);
217
218         return ret;
219 }
220
221 module_init(e1000_init_module);
222
223 /**
224  * e1000_exit_module - Driver Exit Cleanup Routine
225  *
226  * e1000_exit_module is called just before the driver is removed
227  * from memory.
228  **/
229
230 static void __exit
231 e1000_exit_module(void)
232 {
233         pci_unregister_driver(&e1000_driver);
234 }
235
236 module_exit(e1000_exit_module);
237
238 /**
239  * e1000_irq_disable - Mask off interrupt generation on the NIC
240  * @adapter: board private structure
241  **/
242
243 static inline void
244 e1000_irq_disable(struct e1000_adapter *adapter)
245 {
246         atomic_inc(&adapter->irq_sem);
247         E1000_WRITE_REG(&adapter->hw, IMC, ~0);
248         E1000_WRITE_FLUSH(&adapter->hw);
249         synchronize_irq(adapter->pdev->irq);
250 }
251
252 /**
253  * e1000_irq_enable - Enable default interrupt generation settings
254  * @adapter: board private structure
255  **/
256
257 static inline void
258 e1000_irq_enable(struct e1000_adapter *adapter)
259 {
260         if(likely(atomic_dec_and_test(&adapter->irq_sem))) {
261                 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
262                 E1000_WRITE_FLUSH(&adapter->hw);
263         }
264 }
265 void
266 e1000_update_mng_vlan(struct e1000_adapter *adapter)
267 {
268         struct net_device *netdev = adapter->netdev;
269         uint16_t vid = adapter->hw.mng_cookie.vlan_id;
270         uint16_t old_vid = adapter->mng_vlan_id;
271         if(adapter->vlgrp) {
272                 if(!adapter->vlgrp->vlan_devices[vid]) {
273                         if(adapter->hw.mng_cookie.status &
274                                 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
275                                 e1000_vlan_rx_add_vid(netdev, vid);
276                                 adapter->mng_vlan_id = vid;
277                         } else
278                                 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
279                                 
280                         if((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
281                                         (vid != old_vid) && 
282                                         !adapter->vlgrp->vlan_devices[old_vid])
283                                 e1000_vlan_rx_kill_vid(netdev, old_vid);
284                 }
285         }
286 }
287         
288 int
289 e1000_up(struct e1000_adapter *adapter)
290 {
291         struct net_device *netdev = adapter->netdev;
292         int err;
293
294         /* hardware has been reset, we need to reload some things */
295
296         /* Reset the PHY if it was previously powered down */
297         if(adapter->hw.media_type == e1000_media_type_copper) {
298                 uint16_t mii_reg;
299                 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
300                 if(mii_reg & MII_CR_POWER_DOWN)
301                         e1000_phy_reset(&adapter->hw);
302         }
303
304         e1000_set_multi(netdev);
305
306         e1000_restore_vlan(adapter);
307
308         e1000_configure_tx(adapter);
309         e1000_setup_rctl(adapter);
310         e1000_configure_rx(adapter);
311         adapter->alloc_rx_buf(adapter);
312
313 #ifdef CONFIG_PCI_MSI
314         if(adapter->hw.mac_type > e1000_82547_rev_2) {
315                 adapter->have_msi = TRUE;
316                 if((err = pci_enable_msi(adapter->pdev))) {
317                         DPRINTK(PROBE, ERR,
318                          "Unable to allocate MSI interrupt Error: %d\n", err);
319                         adapter->have_msi = FALSE;
320                 }
321         }
322 #endif
323         if((err = request_irq(adapter->pdev->irq, &e1000_intr,
324                               SA_SHIRQ | SA_SAMPLE_RANDOM,
325                               netdev->name, netdev))) {
326                 DPRINTK(PROBE, ERR,
327                     "Unable to allocate interrupt Error: %d\n", err);
328                 return err;
329         }
330
331         mod_timer(&adapter->watchdog_timer, jiffies);
332
333 #ifdef CONFIG_E1000_NAPI
334         netif_poll_enable(netdev);
335 #endif
336         e1000_irq_enable(adapter);
337
338         return 0;
339 }
340
341 void
342 e1000_down(struct e1000_adapter *adapter)
343 {
344         struct net_device *netdev = adapter->netdev;
345
346         e1000_irq_disable(adapter);
347         free_irq(adapter->pdev->irq, netdev);
348 #ifdef CONFIG_PCI_MSI
349         if(adapter->hw.mac_type > e1000_82547_rev_2 &&
350            adapter->have_msi == TRUE)
351                 pci_disable_msi(adapter->pdev);
352 #endif
353         del_timer_sync(&adapter->tx_fifo_stall_timer);
354         del_timer_sync(&adapter->watchdog_timer);
355         del_timer_sync(&adapter->phy_info_timer);
356
357 #ifdef CONFIG_E1000_NAPI
358         netif_poll_disable(netdev);
359 #endif
360         adapter->link_speed = 0;
361         adapter->link_duplex = 0;
362         netif_carrier_off(netdev);
363         netif_stop_queue(netdev);
364
365         e1000_reset(adapter);
366         e1000_clean_tx_ring(adapter);
367         e1000_clean_rx_ring(adapter);
368
369         /* If WoL is not enabled
370          * and management mode is not IAMT
371          * Power down the PHY so no link is implied when interface is down */
372         if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
373            adapter->hw.media_type == e1000_media_type_copper &&
374            !e1000_check_mng_mode(&adapter->hw) &&
375            !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN)) {
376                 uint16_t mii_reg;
377                 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
378                 mii_reg |= MII_CR_POWER_DOWN;
379                 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
380                 mdelay(1);
381         }
382 }
383
384 void
385 e1000_reset(struct e1000_adapter *adapter)
386 {
387         struct net_device *netdev = adapter->netdev;
388         uint32_t pba, manc;
389         uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
390         uint16_t fc_low_water_mark = E1000_FC_LOW_DIFF;
391
392         /* Repartition Pba for greater than 9k mtu
393          * To take effect CTRL.RST is required.
394          */
395
396         switch (adapter->hw.mac_type) {
397         case e1000_82547:
398         case e1000_82547_rev_2:
399                 pba = E1000_PBA_30K;
400                 break;
401         case e1000_82571:
402         case e1000_82572:
403                 pba = E1000_PBA_38K;
404                 break;
405         case e1000_82573:
406                 pba = E1000_PBA_12K;
407                 break;
408         default:
409                 pba = E1000_PBA_48K;
410                 break;
411         }
412
413         if((adapter->hw.mac_type != e1000_82573) &&
414            (adapter->rx_buffer_len > E1000_RXBUFFER_8192)) {
415                 pba -= 8; /* allocate more FIFO for Tx */
416                 /* send an XOFF when there is enough space in the
417                  * Rx FIFO to hold one extra full size Rx packet 
418                 */
419                 fc_high_water_mark = netdev->mtu + ENET_HEADER_SIZE + 
420                                         ETHERNET_FCS_SIZE + 1;
421                 fc_low_water_mark = fc_high_water_mark + 8;
422         }
423
424
425         if(adapter->hw.mac_type == e1000_82547) {
426                 adapter->tx_fifo_head = 0;
427                 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
428                 adapter->tx_fifo_size =
429                         (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
430                 atomic_set(&adapter->tx_fifo_stall, 0);
431         }
432
433         E1000_WRITE_REG(&adapter->hw, PBA, pba);
434
435         /* flow control settings */
436         adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) -
437                                     fc_high_water_mark;
438         adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) -
439                                    fc_low_water_mark;
440         adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
441         adapter->hw.fc_send_xon = 1;
442         adapter->hw.fc = adapter->hw.original_fc;
443
444         /* Allow time for pending master requests to run */
445         e1000_reset_hw(&adapter->hw);
446         if(adapter->hw.mac_type >= e1000_82544)
447                 E1000_WRITE_REG(&adapter->hw, WUC, 0);
448         if(e1000_init_hw(&adapter->hw))
449                 DPRINTK(PROBE, ERR, "Hardware Error\n");
450         e1000_update_mng_vlan(adapter);
451         /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
452         E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
453
454         e1000_reset_adaptive(&adapter->hw);
455         e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
456         if (adapter->en_mng_pt) {
457                 manc = E1000_READ_REG(&adapter->hw, MANC);
458                 manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST);
459                 E1000_WRITE_REG(&adapter->hw, MANC, manc);
460         }
461 }
462
463 /**
464  * e1000_probe - Device Initialization Routine
465  * @pdev: PCI device information struct
466  * @ent: entry in e1000_pci_tbl
467  *
468  * Returns 0 on success, negative on failure
469  *
470  * e1000_probe initializes an adapter identified by a pci_dev structure.
471  * The OS initialization, configuring of the adapter private structure,
472  * and a hardware reset occur.
473  **/
474
475 static int __devinit
476 e1000_probe(struct pci_dev *pdev,
477             const struct pci_device_id *ent)
478 {
479         struct net_device *netdev;
480         struct e1000_adapter *adapter;
481         unsigned long mmio_start, mmio_len;
482         uint32_t ctrl_ext;
483         uint32_t swsm;
484
485         static int cards_found = 0;
486         int i, err, pci_using_dac;
487         uint16_t eeprom_data;
488         uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
489         if((err = pci_enable_device(pdev)))
490                 return err;
491
492         if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
493                 pci_using_dac = 1;
494         } else {
495                 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
496                         E1000_ERR("No usable DMA configuration, aborting\n");
497                         return err;
498                 }
499                 pci_using_dac = 0;
500         }
501
502         if((err = pci_request_regions(pdev, e1000_driver_name)))
503                 return err;
504
505         pci_set_master(pdev);
506
507         netdev = alloc_etherdev(sizeof(struct e1000_adapter));
508         if(!netdev) {
509                 err = -ENOMEM;
510                 goto err_alloc_etherdev;
511         }
512
513         SET_MODULE_OWNER(netdev);
514         SET_NETDEV_DEV(netdev, &pdev->dev);
515
516         pci_set_drvdata(pdev, netdev);
517         adapter = netdev_priv(netdev);
518         adapter->netdev = netdev;
519         adapter->pdev = pdev;
520         adapter->hw.back = adapter;
521         adapter->msg_enable = (1 << debug) - 1;
522
523         mmio_start = pci_resource_start(pdev, BAR_0);
524         mmio_len = pci_resource_len(pdev, BAR_0);
525
526         adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
527         if(!adapter->hw.hw_addr) {
528                 err = -EIO;
529                 goto err_ioremap;
530         }
531
532         for(i = BAR_1; i <= BAR_5; i++) {
533                 if(pci_resource_len(pdev, i) == 0)
534                         continue;
535                 if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
536                         adapter->hw.io_base = pci_resource_start(pdev, i);
537                         break;
538                 }
539         }
540
541         netdev->open = &e1000_open;
542         netdev->stop = &e1000_close;
543         netdev->hard_start_xmit = &e1000_xmit_frame;
544         netdev->get_stats = &e1000_get_stats;
545         netdev->set_multicast_list = &e1000_set_multi;
546         netdev->set_mac_address = &e1000_set_mac;
547         netdev->change_mtu = &e1000_change_mtu;
548         netdev->do_ioctl = &e1000_ioctl;
549         e1000_set_ethtool_ops(netdev);
550         netdev->tx_timeout = &e1000_tx_timeout;
551         netdev->watchdog_timeo = 5 * HZ;
552 #ifdef CONFIG_E1000_NAPI
553         netdev->poll = &e1000_clean;
554         netdev->weight = 64;
555 #endif
556         netdev->vlan_rx_register = e1000_vlan_rx_register;
557         netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
558         netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
559 #ifdef CONFIG_NET_POLL_CONTROLLER
560         netdev->poll_controller = e1000_netpoll;
561 #endif
562         strcpy(netdev->name, pci_name(pdev));
563
564         netdev->mem_start = mmio_start;
565         netdev->mem_end = mmio_start + mmio_len;
566         netdev->base_addr = adapter->hw.io_base;
567
568         adapter->bd_number = cards_found;
569
570         /* setup the private structure */
571
572         if((err = e1000_sw_init(adapter)))
573                 goto err_sw_init;
574
575         if((err = e1000_check_phy_reset_block(&adapter->hw)))
576                 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
577
578         if(adapter->hw.mac_type >= e1000_82543) {
579                 netdev->features = NETIF_F_SG |
580                                    NETIF_F_HW_CSUM |
581                                    NETIF_F_HW_VLAN_TX |
582                                    NETIF_F_HW_VLAN_RX |
583                                    NETIF_F_HW_VLAN_FILTER;
584         }
585
586 #ifdef NETIF_F_TSO
587         if((adapter->hw.mac_type >= e1000_82544) &&
588            (adapter->hw.mac_type != e1000_82547))
589                 netdev->features |= NETIF_F_TSO;
590
591 #ifdef NETIF_F_TSO_IPV6
592         if(adapter->hw.mac_type > e1000_82547_rev_2)
593                 netdev->features |= NETIF_F_TSO_IPV6;
594 #endif
595 #endif
596         if(pci_using_dac)
597                 netdev->features |= NETIF_F_HIGHDMA;
598
599         /* hard_start_xmit is safe against parallel locking */
600         netdev->features |= NETIF_F_LLTX; 
601  
602         adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
603
604         /* before reading the EEPROM, reset the controller to 
605          * put the device in a known good starting state */
606         
607         e1000_reset_hw(&adapter->hw);
608
609         /* make sure the EEPROM is good */
610
611         if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
612                 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
613                 err = -EIO;
614                 goto err_eeprom;
615         }
616
617         /* copy the MAC address out of the EEPROM */
618
619         if(e1000_read_mac_addr(&adapter->hw))
620                 DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
621         memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
622         memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
623
624         if(!is_valid_ether_addr(netdev->perm_addr)) {
625                 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
626                 err = -EIO;
627                 goto err_eeprom;
628         }
629
630         e1000_read_part_num(&adapter->hw, &(adapter->part_num));
631
632         e1000_get_bus_info(&adapter->hw);
633
634         init_timer(&adapter->tx_fifo_stall_timer);
635         adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
636         adapter->tx_fifo_stall_timer.data = (unsigned long) adapter;
637
638         init_timer(&adapter->watchdog_timer);
639         adapter->watchdog_timer.function = &e1000_watchdog;
640         adapter->watchdog_timer.data = (unsigned long) adapter;
641
642         INIT_WORK(&adapter->watchdog_task,
643                 (void (*)(void *))e1000_watchdog_task, adapter);
644
645         init_timer(&adapter->phy_info_timer);
646         adapter->phy_info_timer.function = &e1000_update_phy_info;
647         adapter->phy_info_timer.data = (unsigned long) adapter;
648
649         INIT_WORK(&adapter->tx_timeout_task,
650                 (void (*)(void *))e1000_tx_timeout_task, netdev);
651
652         /* we're going to reset, so assume we have no link for now */
653
654         netif_carrier_off(netdev);
655         netif_stop_queue(netdev);
656
657         e1000_check_options(adapter);
658
659         /* Initial Wake on LAN setting
660          * If APM wake is enabled in the EEPROM,
661          * enable the ACPI Magic Packet filter
662          */
663
664         switch(adapter->hw.mac_type) {
665         case e1000_82542_rev2_0:
666         case e1000_82542_rev2_1:
667         case e1000_82543:
668                 break;
669         case e1000_82544:
670                 e1000_read_eeprom(&adapter->hw,
671                         EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
672                 eeprom_apme_mask = E1000_EEPROM_82544_APM;
673                 break;
674         case e1000_82546:
675         case e1000_82546_rev_3:
676                 if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
677                    && (adapter->hw.media_type == e1000_media_type_copper)) {
678                         e1000_read_eeprom(&adapter->hw,
679                                 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
680                         break;
681                 }
682                 /* Fall Through */
683         default:
684                 e1000_read_eeprom(&adapter->hw,
685                         EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
686                 break;
687         }
688         if(eeprom_data & eeprom_apme_mask)
689                 adapter->wol |= E1000_WUFC_MAG;
690
691         /* reset the hardware with the new settings */
692         e1000_reset(adapter);
693
694         /* Let firmware know the driver has taken over */
695         switch(adapter->hw.mac_type) {
696         case e1000_82571:
697         case e1000_82572:
698                 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
699                 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
700                                 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
701                 break;
702         case e1000_82573:
703                 swsm = E1000_READ_REG(&adapter->hw, SWSM);
704                 E1000_WRITE_REG(&adapter->hw, SWSM,
705                                 swsm | E1000_SWSM_DRV_LOAD);
706                 break;
707         default:
708                 break;
709         }
710
711         strcpy(netdev->name, "eth%d");
712         if((err = register_netdev(netdev)))
713                 goto err_register;
714
715         DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
716
717         cards_found++;
718         return 0;
719
720 err_register:
721 err_sw_init:
722 err_eeprom:
723         iounmap(adapter->hw.hw_addr);
724 err_ioremap:
725         free_netdev(netdev);
726 err_alloc_etherdev:
727         pci_release_regions(pdev);
728         return err;
729 }
730
731 /**
732  * e1000_remove - Device Removal Routine
733  * @pdev: PCI device information struct
734  *
735  * e1000_remove is called by the PCI subsystem to alert the driver
736  * that it should release a PCI device.  The could be caused by a
737  * Hot-Plug event, or because the driver is going to be removed from
738  * memory.
739  **/
740
741 static void __devexit
742 e1000_remove(struct pci_dev *pdev)
743 {
744         struct net_device *netdev = pci_get_drvdata(pdev);
745         struct e1000_adapter *adapter = netdev_priv(netdev);
746         uint32_t ctrl_ext;
747         uint32_t manc, swsm;
748
749         flush_scheduled_work();
750
751         if(adapter->hw.mac_type >= e1000_82540 &&
752            adapter->hw.media_type == e1000_media_type_copper) {
753                 manc = E1000_READ_REG(&adapter->hw, MANC);
754                 if(manc & E1000_MANC_SMBUS_EN) {
755                         manc |= E1000_MANC_ARP_EN;
756                         E1000_WRITE_REG(&adapter->hw, MANC, manc);
757                 }
758         }
759
760         switch(adapter->hw.mac_type) {
761         case e1000_82571:
762         case e1000_82572:
763                 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
764                 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
765                                 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
766                 break;
767         case e1000_82573:
768                 swsm = E1000_READ_REG(&adapter->hw, SWSM);
769                 E1000_WRITE_REG(&adapter->hw, SWSM,
770                                 swsm & ~E1000_SWSM_DRV_LOAD);
771                 break;
772
773         default:
774                 break;
775         }
776
777         unregister_netdev(netdev);
778
779         if(!e1000_check_phy_reset_block(&adapter->hw))
780                 e1000_phy_hw_reset(&adapter->hw);
781
782         iounmap(adapter->hw.hw_addr);
783         pci_release_regions(pdev);
784
785         free_netdev(netdev);
786
787         pci_disable_device(pdev);
788 }
789
790 /**
791  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
792  * @adapter: board private structure to initialize
793  *
794  * e1000_sw_init initializes the Adapter private data structure.
795  * Fields are initialized based on PCI device information and
796  * OS network device settings (MTU size).
797  **/
798
799 static int __devinit
800 e1000_sw_init(struct e1000_adapter *adapter)
801 {
802         struct e1000_hw *hw = &adapter->hw;
803         struct net_device *netdev = adapter->netdev;
804         struct pci_dev *pdev = adapter->pdev;
805
806         /* PCI config space info */
807
808         hw->vendor_id = pdev->vendor;
809         hw->device_id = pdev->device;
810         hw->subsystem_vendor_id = pdev->subsystem_vendor;
811         hw->subsystem_id = pdev->subsystem_device;
812
813         pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
814
815         pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
816
817         adapter->rx_buffer_len = E1000_RXBUFFER_2048;
818         adapter->rx_ps_bsize0 = E1000_RXBUFFER_256;
819         hw->max_frame_size = netdev->mtu +
820                              ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
821         hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
822
823         /* identify the MAC */
824
825         if(e1000_set_mac_type(hw)) {
826                 DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
827                 return -EIO;
828         }
829
830         /* initialize eeprom parameters */
831
832         if(e1000_init_eeprom_params(hw)) {
833                 E1000_ERR("EEPROM initialization failed\n");
834                 return -EIO;
835         }
836
837         switch(hw->mac_type) {
838         default:
839                 break;
840         case e1000_82541:
841         case e1000_82547:
842         case e1000_82541_rev_2:
843         case e1000_82547_rev_2:
844                 hw->phy_init_script = 1;
845                 break;
846         }
847
848         e1000_set_media_type(hw);
849
850         hw->wait_autoneg_complete = FALSE;
851         hw->tbi_compatibility_en = TRUE;
852         hw->adaptive_ifs = TRUE;
853
854         /* Copper options */
855
856         if(hw->media_type == e1000_media_type_copper) {
857                 hw->mdix = AUTO_ALL_MODES;
858                 hw->disable_polarity_correction = FALSE;
859                 hw->master_slave = E1000_MASTER_SLAVE;
860         }
861
862         atomic_set(&adapter->irq_sem, 1);
863         spin_lock_init(&adapter->stats_lock);
864         spin_lock_init(&adapter->tx_lock);
865
866         return 0;
867 }
868
869 /**
870  * e1000_open - Called when a network interface is made active
871  * @netdev: network interface device structure
872  *
873  * Returns 0 on success, negative value on failure
874  *
875  * The open entry point is called when a network interface is made
876  * active by the system (IFF_UP).  At this point all resources needed
877  * for transmit and receive operations are allocated, the interrupt
878  * handler is registered with the OS, the watchdog timer is started,
879  * and the stack is notified that the interface is ready.
880  **/
881
882 static int
883 e1000_open(struct net_device *netdev)
884 {
885         struct e1000_adapter *adapter = netdev_priv(netdev);
886         int err;
887
888         /* allocate transmit descriptors */
889
890         if((err = e1000_setup_tx_resources(adapter)))
891                 goto err_setup_tx;
892
893         /* allocate receive descriptors */
894
895         if((err = e1000_setup_rx_resources(adapter)))
896                 goto err_setup_rx;
897
898         if((err = e1000_up(adapter)))
899                 goto err_up;
900         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
901         if((adapter->hw.mng_cookie.status &
902                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
903                 e1000_update_mng_vlan(adapter);
904         }
905
906         return E1000_SUCCESS;
907
908 err_up:
909         e1000_free_rx_resources(adapter);
910 err_setup_rx:
911         e1000_free_tx_resources(adapter);
912 err_setup_tx:
913         e1000_reset(adapter);
914
915         return err;
916 }
917
918 /**
919  * e1000_close - Disables a network interface
920  * @netdev: network interface device structure
921  *
922  * Returns 0, this is not allowed to fail
923  *
924  * The close entry point is called when an interface is de-activated
925  * by the OS.  The hardware is still under the drivers control, but
926  * needs to be disabled.  A global MAC reset is issued to stop the
927  * hardware, and all transmit and receive resources are freed.
928  **/
929
930 static int
931 e1000_close(struct net_device *netdev)
932 {
933         struct e1000_adapter *adapter = netdev_priv(netdev);
934
935         e1000_down(adapter);
936
937         e1000_free_tx_resources(adapter);
938         e1000_free_rx_resources(adapter);
939
940         if((adapter->hw.mng_cookie.status &
941                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
942                 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
943         }
944         return 0;
945 }
946
947 /**
948  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
949  * @adapter: address of board private structure
950  * @start: address of beginning of memory
951  * @len: length of memory
952  **/
953 static inline boolean_t
954 e1000_check_64k_bound(struct e1000_adapter *adapter,
955                       void *start, unsigned long len)
956 {
957         unsigned long begin = (unsigned long) start;
958         unsigned long end = begin + len;
959
960         /* First rev 82545 and 82546 need to not allow any memory
961          * write location to cross 64k boundary due to errata 23 */
962         if (adapter->hw.mac_type == e1000_82545 ||
963             adapter->hw.mac_type == e1000_82546) {
964                 return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE;
965         }
966
967         return TRUE;
968 }
969
970 /**
971  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
972  * @adapter: board private structure
973  *
974  * Return 0 on success, negative on failure
975  **/
976
977 int
978 e1000_setup_tx_resources(struct e1000_adapter *adapter)
979 {
980         struct e1000_desc_ring *txdr = &adapter->tx_ring;
981         struct pci_dev *pdev = adapter->pdev;
982         int size;
983
984         size = sizeof(struct e1000_buffer) * txdr->count;
985         txdr->buffer_info = vmalloc(size);
986         if(!txdr->buffer_info) {
987                 DPRINTK(PROBE, ERR,
988                 "Unable to allocate memory for the transmit descriptor ring\n");
989                 return -ENOMEM;
990         }
991         memset(txdr->buffer_info, 0, size);
992
993         /* round up to nearest 4K */
994
995         txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
996         E1000_ROUNDUP(txdr->size, 4096);
997
998         txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
999         if(!txdr->desc) {
1000 setup_tx_desc_die:
1001                 vfree(txdr->buffer_info);
1002                 DPRINTK(PROBE, ERR,
1003                 "Unable to allocate memory for the transmit descriptor ring\n");
1004                 return -ENOMEM;
1005         }
1006
1007         /* Fix for errata 23, can't cross 64kB boundary */
1008         if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1009                 void *olddesc = txdr->desc;
1010                 dma_addr_t olddma = txdr->dma;
1011                 DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
1012                                      "at %p\n", txdr->size, txdr->desc);
1013                 /* Try again, without freeing the previous */
1014                 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1015                 if(!txdr->desc) {
1016                 /* Failed allocation, critical failure */
1017                         pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1018                         goto setup_tx_desc_die;
1019                 }
1020
1021                 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1022                         /* give up */
1023                         pci_free_consistent(pdev, txdr->size, txdr->desc,
1024                                             txdr->dma);
1025                         pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1026                         DPRINTK(PROBE, ERR,
1027                                 "Unable to allocate aligned memory "
1028                                 "for the transmit descriptor ring\n");
1029                         vfree(txdr->buffer_info);
1030                         return -ENOMEM;
1031                 } else {
1032                         /* Free old allocation, new allocation was successful */
1033                         pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1034                 }
1035         }
1036         memset(txdr->desc, 0, txdr->size);
1037
1038         txdr->next_to_use = 0;
1039         txdr->next_to_clean = 0;
1040
1041         return 0;
1042 }
1043
1044 /**
1045  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1046  * @adapter: board private structure
1047  *
1048  * Configure the Tx unit of the MAC after a reset.
1049  **/
1050
1051 static void
1052 e1000_configure_tx(struct e1000_adapter *adapter)
1053 {
1054         uint64_t tdba = adapter->tx_ring.dma;
1055         uint32_t tdlen = adapter->tx_ring.count * sizeof(struct e1000_tx_desc);
1056         uint32_t tctl, tipg;
1057
1058         E1000_WRITE_REG(&adapter->hw, TDBAL, (tdba & 0x00000000ffffffffULL));
1059         E1000_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
1060
1061         E1000_WRITE_REG(&adapter->hw, TDLEN, tdlen);
1062
1063         /* Setup the HW Tx Head and Tail descriptor pointers */
1064
1065         E1000_WRITE_REG(&adapter->hw, TDH, 0);
1066         E1000_WRITE_REG(&adapter->hw, TDT, 0);
1067
1068         /* Set the default values for the Tx Inter Packet Gap timer */
1069
1070         switch (adapter->hw.mac_type) {
1071         case e1000_82542_rev2_0:
1072         case e1000_82542_rev2_1:
1073                 tipg = DEFAULT_82542_TIPG_IPGT;
1074                 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1075                 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1076                 break;
1077         default:
1078                 if(adapter->hw.media_type == e1000_media_type_fiber ||
1079                    adapter->hw.media_type == e1000_media_type_internal_serdes)
1080                         tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1081                 else
1082                         tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1083                 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1084                 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1085         }
1086         E1000_WRITE_REG(&adapter->hw, TIPG, tipg);
1087
1088         /* Set the Tx Interrupt Delay register */
1089
1090         E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
1091         if(adapter->hw.mac_type >= e1000_82540)
1092                 E1000_WRITE_REG(&adapter->hw, TADV, adapter->tx_abs_int_delay);
1093
1094         /* Program the Transmit Control Register */
1095
1096         tctl = E1000_READ_REG(&adapter->hw, TCTL);
1097
1098         tctl &= ~E1000_TCTL_CT;
1099         tctl |= E1000_TCTL_EN | E1000_TCTL_PSP |
1100                 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1101
1102         E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1103
1104         e1000_config_collision_dist(&adapter->hw);
1105
1106         /* Setup Transmit Descriptor Settings for eop descriptor */
1107         adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
1108                 E1000_TXD_CMD_IFCS;
1109
1110         if(adapter->hw.mac_type < e1000_82543)
1111                 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1112         else
1113                 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1114
1115         /* Cache if we're 82544 running in PCI-X because we'll
1116          * need this to apply a workaround later in the send path. */
1117         if(adapter->hw.mac_type == e1000_82544 &&
1118            adapter->hw.bus_type == e1000_bus_type_pcix)
1119                 adapter->pcix_82544 = 1;
1120 }
1121
1122 /**
1123  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1124  * @adapter: board private structure
1125  *
1126  * Returns 0 on success, negative on failure
1127  **/
1128
1129 int
1130 e1000_setup_rx_resources(struct e1000_adapter *adapter)
1131 {
1132         struct e1000_desc_ring *rxdr = &adapter->rx_ring;
1133         struct pci_dev *pdev = adapter->pdev;
1134         int size, desc_len;
1135
1136         size = sizeof(struct e1000_buffer) * rxdr->count;
1137         rxdr->buffer_info = vmalloc(size);
1138         if(!rxdr->buffer_info) {
1139                 DPRINTK(PROBE, ERR,
1140                 "Unable to allocate memory for the receive descriptor ring\n");
1141                 return -ENOMEM;
1142         }
1143         memset(rxdr->buffer_info, 0, size);
1144
1145         size = sizeof(struct e1000_ps_page) * rxdr->count;
1146         rxdr->ps_page = kmalloc(size, GFP_KERNEL);
1147         if(!rxdr->ps_page) {
1148                 vfree(rxdr->buffer_info);
1149                 DPRINTK(PROBE, ERR,
1150                 "Unable to allocate memory for the receive descriptor ring\n");
1151                 return -ENOMEM;
1152         }
1153         memset(rxdr->ps_page, 0, size);
1154
1155         size = sizeof(struct e1000_ps_page_dma) * rxdr->count;
1156         rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL);
1157         if(!rxdr->ps_page_dma) {
1158                 vfree(rxdr->buffer_info);
1159                 kfree(rxdr->ps_page);
1160                 DPRINTK(PROBE, ERR,
1161                 "Unable to allocate memory for the receive descriptor ring\n");
1162                 return -ENOMEM;
1163         }
1164         memset(rxdr->ps_page_dma, 0, size);
1165
1166         if(adapter->hw.mac_type <= e1000_82547_rev_2)
1167                 desc_len = sizeof(struct e1000_rx_desc);
1168         else
1169                 desc_len = sizeof(union e1000_rx_desc_packet_split);
1170
1171         /* Round up to nearest 4K */
1172
1173         rxdr->size = rxdr->count * desc_len;
1174         E1000_ROUNDUP(rxdr->size, 4096);
1175
1176         rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1177
1178         if(!rxdr->desc) {
1179 setup_rx_desc_die:
1180                 vfree(rxdr->buffer_info);
1181                 kfree(rxdr->ps_page);
1182                 kfree(rxdr->ps_page_dma);
1183                 DPRINTK(PROBE, ERR,
1184                 "Unable to allocate memory for the receive descriptor ring\n");
1185                 return -ENOMEM;
1186         }
1187
1188         /* Fix for errata 23, can't cross 64kB boundary */
1189         if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1190                 void *olddesc = rxdr->desc;
1191                 dma_addr_t olddma = rxdr->dma;
1192                 DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
1193                                      "at %p\n", rxdr->size, rxdr->desc);
1194                 /* Try again, without freeing the previous */
1195                 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1196                 if(!rxdr->desc) {
1197                 /* Failed allocation, critical failure */
1198                         pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1199                         goto setup_rx_desc_die;
1200                 }
1201
1202                 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1203                         /* give up */
1204                         pci_free_consistent(pdev, rxdr->size, rxdr->desc,
1205                                             rxdr->dma);
1206                         pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1207                         DPRINTK(PROBE, ERR,
1208                                 "Unable to allocate aligned memory "
1209                                 "for the receive descriptor ring\n");
1210                         vfree(rxdr->buffer_info);
1211                         kfree(rxdr->ps_page);
1212                         kfree(rxdr->ps_page_dma);
1213                         return -ENOMEM;
1214                 } else {
1215                         /* Free old allocation, new allocation was successful */
1216                         pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1217                 }
1218         }
1219         memset(rxdr->desc, 0, rxdr->size);
1220
1221         rxdr->next_to_clean = 0;
1222         rxdr->next_to_use = 0;
1223
1224         return 0;
1225 }
1226
1227 /**
1228  * e1000_setup_rctl - configure the receive control registers
1229  * @adapter: Board private structure
1230  **/
1231
1232 static void
1233 e1000_setup_rctl(struct e1000_adapter *adapter)
1234 {
1235         uint32_t rctl, rfctl;
1236         uint32_t psrctl = 0;
1237
1238         rctl = E1000_READ_REG(&adapter->hw, RCTL);
1239
1240         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1241
1242         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
1243                 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1244                 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
1245
1246         if(adapter->hw.tbi_compatibility_on == 1)
1247                 rctl |= E1000_RCTL_SBP;
1248         else
1249                 rctl &= ~E1000_RCTL_SBP;
1250
1251         if (adapter->netdev->mtu <= ETH_DATA_LEN)
1252                 rctl &= ~E1000_RCTL_LPE;
1253         else
1254                 rctl |= E1000_RCTL_LPE;
1255
1256         /* Setup buffer sizes */
1257         if(adapter->hw.mac_type >= e1000_82571) {
1258                 /* We can now specify buffers in 1K increments.
1259                  * BSIZE and BSEX are ignored in this case. */
1260                 rctl |= adapter->rx_buffer_len << 0x11;
1261         } else {
1262                 rctl &= ~E1000_RCTL_SZ_4096;
1263                 rctl |= E1000_RCTL_BSEX; 
1264                 switch (adapter->rx_buffer_len) {
1265                 case E1000_RXBUFFER_2048:
1266                 default:
1267                         rctl |= E1000_RCTL_SZ_2048;
1268                         rctl &= ~E1000_RCTL_BSEX;
1269                         break;
1270                 case E1000_RXBUFFER_4096:
1271                         rctl |= E1000_RCTL_SZ_4096;
1272                         break;
1273                 case E1000_RXBUFFER_8192:
1274                         rctl |= E1000_RCTL_SZ_8192;
1275                         break;
1276                 case E1000_RXBUFFER_16384:
1277                         rctl |= E1000_RCTL_SZ_16384;
1278                         break;
1279                 }
1280         }
1281
1282 #ifdef CONFIG_E1000_PACKET_SPLIT
1283         /* 82571 and greater support packet-split where the protocol
1284          * header is placed in skb->data and the packet data is
1285          * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
1286          * In the case of a non-split, skb->data is linearly filled,
1287          * followed by the page buffers.  Therefore, skb->data is
1288          * sized to hold the largest protocol header.
1289          */
1290         adapter->rx_ps = (adapter->hw.mac_type > e1000_82547_rev_2) 
1291                           && (adapter->netdev->mtu 
1292                               < ((3 * PAGE_SIZE) + adapter->rx_ps_bsize0));
1293 #endif
1294         if(adapter->rx_ps) {
1295                 /* Configure extra packet-split registers */
1296                 rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
1297                 rfctl |= E1000_RFCTL_EXTEN;
1298                 /* disable IPv6 packet split support */
1299                 rfctl |= E1000_RFCTL_IPV6_DIS;
1300                 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
1301
1302                 rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC;
1303                 
1304                 psrctl |= adapter->rx_ps_bsize0 >>
1305                         E1000_PSRCTL_BSIZE0_SHIFT;
1306                 psrctl |= PAGE_SIZE >>
1307                         E1000_PSRCTL_BSIZE1_SHIFT;
1308                 psrctl |= PAGE_SIZE <<
1309                         E1000_PSRCTL_BSIZE2_SHIFT;
1310                 psrctl |= PAGE_SIZE <<
1311                         E1000_PSRCTL_BSIZE3_SHIFT;
1312
1313                 E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl);
1314         }
1315
1316         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1317 }
1318
1319 /**
1320  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1321  * @adapter: board private structure
1322  *
1323  * Configure the Rx unit of the MAC after a reset.
1324  **/
1325
1326 static void
1327 e1000_configure_rx(struct e1000_adapter *adapter)
1328 {
1329         uint64_t rdba = adapter->rx_ring.dma;
1330         uint32_t rdlen, rctl, rxcsum;
1331
1332         if(adapter->rx_ps) {
1333                 rdlen = adapter->rx_ring.count *
1334                         sizeof(union e1000_rx_desc_packet_split);
1335                 adapter->clean_rx = e1000_clean_rx_irq_ps;
1336                 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
1337         } else {
1338                 rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc);
1339                 adapter->clean_rx = e1000_clean_rx_irq;
1340                 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1341         }
1342
1343         /* disable receives while setting up the descriptors */
1344         rctl = E1000_READ_REG(&adapter->hw, RCTL);
1345         E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
1346
1347         /* set the Receive Delay Timer Register */
1348         E1000_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay);
1349
1350         if(adapter->hw.mac_type >= e1000_82540) {
1351                 E1000_WRITE_REG(&adapter->hw, RADV, adapter->rx_abs_int_delay);
1352                 if(adapter->itr > 1)
1353                         E1000_WRITE_REG(&adapter->hw, ITR,
1354                                 1000000000 / (adapter->itr * 256));
1355         }
1356
1357         /* Setup the Base and Length of the Rx Descriptor Ring */
1358         E1000_WRITE_REG(&adapter->hw, RDBAL, (rdba & 0x00000000ffffffffULL));
1359         E1000_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
1360
1361         E1000_WRITE_REG(&adapter->hw, RDLEN, rdlen);
1362
1363         /* Setup the HW Rx Head and Tail Descriptor Pointers */
1364         E1000_WRITE_REG(&adapter->hw, RDH, 0);
1365         E1000_WRITE_REG(&adapter->hw, RDT, 0);
1366
1367         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1368         if(adapter->hw.mac_type >= e1000_82543) {
1369                 rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
1370                 if(adapter->rx_csum == TRUE) {
1371                         rxcsum |= E1000_RXCSUM_TUOFL;
1372
1373                         /* Enable 82571 IPv4 payload checksum for UDP fragments
1374                          * Must be used in conjunction with packet-split. */
1375                         if((adapter->hw.mac_type > e1000_82547_rev_2) && 
1376                            (adapter->rx_ps)) {
1377                                 rxcsum |= E1000_RXCSUM_IPPCSE;
1378                         }
1379                 } else {
1380                         rxcsum &= ~E1000_RXCSUM_TUOFL;
1381                         /* don't need to clear IPPCSE as it defaults to 0 */
1382                 }
1383                 E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum);
1384         }
1385
1386         if (adapter->hw.mac_type == e1000_82573)
1387                 E1000_WRITE_REG(&adapter->hw, ERT, 0x0100);
1388
1389         /* Enable Receives */
1390         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1391 }
1392
1393 /**
1394  * e1000_free_tx_resources - Free Tx Resources
1395  * @adapter: board private structure
1396  *
1397  * Free all transmit software resources
1398  **/
1399
1400 void
1401 e1000_free_tx_resources(struct e1000_adapter *adapter)
1402 {
1403         struct pci_dev *pdev = adapter->pdev;
1404
1405         e1000_clean_tx_ring(adapter);
1406
1407         vfree(adapter->tx_ring.buffer_info);
1408         adapter->tx_ring.buffer_info = NULL;
1409
1410         pci_free_consistent(pdev, adapter->tx_ring.size,
1411                             adapter->tx_ring.desc, adapter->tx_ring.dma);
1412
1413         adapter->tx_ring.desc = NULL;
1414 }
1415
1416 static inline void
1417 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1418                         struct e1000_buffer *buffer_info)
1419 {
1420         if(buffer_info->dma) {
1421                 pci_unmap_page(adapter->pdev,
1422                                 buffer_info->dma,
1423                                 buffer_info->length,
1424                                 PCI_DMA_TODEVICE);
1425                 buffer_info->dma = 0;
1426         }
1427         if(buffer_info->skb) {
1428                 dev_kfree_skb_any(buffer_info->skb);
1429                 buffer_info->skb = NULL;
1430         }
1431 }
1432
1433 /**
1434  * e1000_clean_tx_ring - Free Tx Buffers
1435  * @adapter: board private structure
1436  **/
1437
1438 static void
1439 e1000_clean_tx_ring(struct e1000_adapter *adapter)
1440 {
1441         struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1442         struct e1000_buffer *buffer_info;
1443         unsigned long size;
1444         unsigned int i;
1445
1446         /* Free all the Tx ring sk_buffs */
1447
1448         if (likely(adapter->previous_buffer_info.skb != NULL)) {
1449                 e1000_unmap_and_free_tx_resource(adapter,
1450                                 &adapter->previous_buffer_info);
1451         }
1452
1453         for(i = 0; i < tx_ring->count; i++) {
1454                 buffer_info = &tx_ring->buffer_info[i];
1455                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1456         }
1457
1458         size = sizeof(struct e1000_buffer) * tx_ring->count;
1459         memset(tx_ring->buffer_info, 0, size);
1460
1461         /* Zero out the descriptor ring */
1462
1463         memset(tx_ring->desc, 0, tx_ring->size);
1464
1465         tx_ring->next_to_use = 0;
1466         tx_ring->next_to_clean = 0;
1467
1468         E1000_WRITE_REG(&adapter->hw, TDH, 0);
1469         E1000_WRITE_REG(&adapter->hw, TDT, 0);
1470 }
1471
1472 /**
1473  * e1000_free_rx_resources - Free Rx Resources
1474  * @adapter: board private structure
1475  *
1476  * Free all receive software resources
1477  **/
1478
1479 void
1480 e1000_free_rx_resources(struct e1000_adapter *adapter)
1481 {
1482         struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1483         struct pci_dev *pdev = adapter->pdev;
1484
1485         e1000_clean_rx_ring(adapter);
1486
1487         vfree(rx_ring->buffer_info);
1488         rx_ring->buffer_info = NULL;
1489         kfree(rx_ring->ps_page);
1490         rx_ring->ps_page = NULL;
1491         kfree(rx_ring->ps_page_dma);
1492         rx_ring->ps_page_dma = NULL;
1493
1494         pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1495
1496         rx_ring->desc = NULL;
1497 }
1498
1499 /**
1500  * e1000_clean_rx_ring - Free Rx Buffers
1501  * @adapter: board private structure
1502  **/
1503
1504 static void
1505 e1000_clean_rx_ring(struct e1000_adapter *adapter)
1506 {
1507         struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1508         struct e1000_buffer *buffer_info;
1509         struct e1000_ps_page *ps_page;
1510         struct e1000_ps_page_dma *ps_page_dma;
1511         struct pci_dev *pdev = adapter->pdev;
1512         unsigned long size;
1513         unsigned int i, j;
1514
1515         /* Free all the Rx ring sk_buffs */
1516
1517         for(i = 0; i < rx_ring->count; i++) {
1518                 buffer_info = &rx_ring->buffer_info[i];
1519                 if(buffer_info->skb) {
1520                         ps_page = &rx_ring->ps_page[i];
1521                         ps_page_dma = &rx_ring->ps_page_dma[i];
1522                         pci_unmap_single(pdev,
1523                                          buffer_info->dma,
1524                                          buffer_info->length,
1525                                          PCI_DMA_FROMDEVICE);
1526
1527                         dev_kfree_skb(buffer_info->skb);
1528                         buffer_info->skb = NULL;
1529
1530                         for(j = 0; j < PS_PAGE_BUFFERS; j++) {
1531                                 if(!ps_page->ps_page[j]) break;
1532                                 pci_unmap_single(pdev,
1533                                                  ps_page_dma->ps_page_dma[j],
1534                                                  PAGE_SIZE, PCI_DMA_FROMDEVICE);
1535                                 ps_page_dma->ps_page_dma[j] = 0;
1536                                 put_page(ps_page->ps_page[j]);
1537                                 ps_page->ps_page[j] = NULL;
1538                         }
1539                 }
1540         }
1541
1542         size = sizeof(struct e1000_buffer) * rx_ring->count;
1543         memset(rx_ring->buffer_info, 0, size);
1544         size = sizeof(struct e1000_ps_page) * rx_ring->count;
1545         memset(rx_ring->ps_page, 0, size);
1546         size = sizeof(struct e1000_ps_page_dma) * rx_ring->count;
1547         memset(rx_ring->ps_page_dma, 0, size);
1548
1549         /* Zero out the descriptor ring */
1550
1551         memset(rx_ring->desc, 0, rx_ring->size);
1552
1553         rx_ring->next_to_clean = 0;
1554         rx_ring->next_to_use = 0;
1555
1556         E1000_WRITE_REG(&adapter->hw, RDH, 0);
1557         E1000_WRITE_REG(&adapter->hw, RDT, 0);
1558 }
1559
1560 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
1561  * and memory write and invalidate disabled for certain operations
1562  */
1563 static void
1564 e1000_enter_82542_rst(struct e1000_adapter *adapter)
1565 {
1566         struct net_device *netdev = adapter->netdev;
1567         uint32_t rctl;
1568
1569         e1000_pci_clear_mwi(&adapter->hw);
1570
1571         rctl = E1000_READ_REG(&adapter->hw, RCTL);
1572         rctl |= E1000_RCTL_RST;
1573         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1574         E1000_WRITE_FLUSH(&adapter->hw);
1575         mdelay(5);
1576
1577         if(netif_running(netdev))
1578                 e1000_clean_rx_ring(adapter);
1579 }
1580
1581 static void
1582 e1000_leave_82542_rst(struct e1000_adapter *adapter)
1583 {
1584         struct net_device *netdev = adapter->netdev;
1585         uint32_t rctl;
1586
1587         rctl = E1000_READ_REG(&adapter->hw, RCTL);
1588         rctl &= ~E1000_RCTL_RST;
1589         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1590         E1000_WRITE_FLUSH(&adapter->hw);
1591         mdelay(5);
1592
1593         if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
1594                 e1000_pci_set_mwi(&adapter->hw);
1595
1596         if(netif_running(netdev)) {
1597                 e1000_configure_rx(adapter);
1598                 e1000_alloc_rx_buffers(adapter);
1599         }
1600 }
1601
1602 /**
1603  * e1000_set_mac - Change the Ethernet Address of the NIC
1604  * @netdev: network interface device structure
1605  * @p: pointer to an address structure
1606  *
1607  * Returns 0 on success, negative on failure
1608  **/
1609
1610 static int
1611 e1000_set_mac(struct net_device *netdev, void *p)
1612 {
1613         struct e1000_adapter *adapter = netdev_priv(netdev);
1614         struct sockaddr *addr = p;
1615
1616         if(!is_valid_ether_addr(addr->sa_data))
1617                 return -EADDRNOTAVAIL;
1618
1619         /* 82542 2.0 needs to be in reset to write receive address registers */
1620
1621         if(adapter->hw.mac_type == e1000_82542_rev2_0)
1622                 e1000_enter_82542_rst(adapter);
1623
1624         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1625         memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
1626
1627         e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
1628
1629         /* With 82571 controllers, LAA may be overwritten (with the default)
1630          * due to controller reset from the other port. */
1631         if (adapter->hw.mac_type == e1000_82571) {
1632                 /* activate the work around */
1633                 adapter->hw.laa_is_present = 1;
1634
1635                 /* Hold a copy of the LAA in RAR[14] This is done so that 
1636                  * between the time RAR[0] gets clobbered  and the time it 
1637                  * gets fixed (in e1000_watchdog), the actual LAA is in one 
1638                  * of the RARs and no incoming packets directed to this port
1639                  * are dropped. Eventaully the LAA will be in RAR[0] and 
1640                  * RAR[14] */
1641                 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 
1642                                         E1000_RAR_ENTRIES - 1);
1643         }
1644
1645         if(adapter->hw.mac_type == e1000_82542_rev2_0)
1646                 e1000_leave_82542_rst(adapter);
1647
1648         return 0;
1649 }
1650
1651 /**
1652  * e1000_set_multi - Multicast and Promiscuous mode set
1653  * @netdev: network interface device structure
1654  *
1655  * The set_multi entry point is called whenever the multicast address
1656  * list or the network interface flags are updated.  This routine is
1657  * responsible for configuring the hardware for proper multicast,
1658  * promiscuous mode, and all-multi behavior.
1659  **/
1660
1661 static void
1662 e1000_set_multi(struct net_device *netdev)
1663 {
1664         struct e1000_adapter *adapter = netdev_priv(netdev);
1665         struct e1000_hw *hw = &adapter->hw;
1666         struct dev_mc_list *mc_ptr;
1667         unsigned long flags;
1668         uint32_t rctl;
1669         uint32_t hash_value;
1670         int i, rar_entries = E1000_RAR_ENTRIES;
1671
1672         spin_lock_irqsave(&adapter->tx_lock, flags);
1673         /* reserve RAR[14] for LAA over-write work-around */
1674         if (adapter->hw.mac_type == e1000_82571)
1675                 rar_entries--;
1676
1677         /* Check for Promiscuous and All Multicast modes */
1678
1679         rctl = E1000_READ_REG(hw, RCTL);
1680
1681         if(netdev->flags & IFF_PROMISC) {
1682                 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1683         } else if(netdev->flags & IFF_ALLMULTI) {
1684                 rctl |= E1000_RCTL_MPE;
1685                 rctl &= ~E1000_RCTL_UPE;
1686         } else {
1687                 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
1688         }
1689
1690         E1000_WRITE_REG(hw, RCTL, rctl);
1691
1692         /* 82542 2.0 needs to be in reset to write receive address registers */
1693
1694         if(hw->mac_type == e1000_82542_rev2_0)
1695                 e1000_enter_82542_rst(adapter);
1696
1697         /* load the first 14 multicast address into the exact filters 1-14
1698          * RAR 0 is used for the station MAC adddress
1699          * if there are not 14 addresses, go ahead and clear the filters
1700          * -- with 82571 controllers only 0-13 entries are filled here
1701          */
1702         mc_ptr = netdev->mc_list;
1703
1704         for(i = 1; i < rar_entries; i++) {
1705                 if (mc_ptr) {
1706                         e1000_rar_set(hw, mc_ptr->dmi_addr, i);
1707                         mc_ptr = mc_ptr->next;
1708                 } else {
1709                         E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
1710                         E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
1711                 }
1712         }
1713
1714         /* clear the old settings from the multicast hash table */
1715
1716         for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
1717                 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
1718
1719         /* load any remaining addresses into the hash table */
1720
1721         for(; mc_ptr; mc_ptr = mc_ptr->next) {
1722                 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
1723                 e1000_mta_set(hw, hash_value);
1724         }
1725
1726         if(hw->mac_type == e1000_82542_rev2_0)
1727                 e1000_leave_82542_rst(adapter);
1728
1729         spin_unlock_irqrestore(&adapter->tx_lock, flags);
1730 }
1731
1732 /* Need to wait a few seconds after link up to get diagnostic information from
1733  * the phy */
1734
1735 static void
1736 e1000_update_phy_info(unsigned long data)
1737 {
1738         struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1739         e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
1740 }
1741
1742 /**
1743  * e1000_82547_tx_fifo_stall - Timer Call-back
1744  * @data: pointer to adapter cast into an unsigned long
1745  **/
1746
1747 static void
1748 e1000_82547_tx_fifo_stall(unsigned long data)
1749 {
1750         struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1751         struct net_device *netdev = adapter->netdev;
1752         uint32_t tctl;
1753
1754         if(atomic_read(&adapter->tx_fifo_stall)) {
1755                 if((E1000_READ_REG(&adapter->hw, TDT) ==
1756                     E1000_READ_REG(&adapter->hw, TDH)) &&
1757                    (E1000_READ_REG(&adapter->hw, TDFT) ==
1758                     E1000_READ_REG(&adapter->hw, TDFH)) &&
1759                    (E1000_READ_REG(&adapter->hw, TDFTS) ==
1760                     E1000_READ_REG(&adapter->hw, TDFHS))) {
1761                         tctl = E1000_READ_REG(&adapter->hw, TCTL);
1762                         E1000_WRITE_REG(&adapter->hw, TCTL,
1763                                         tctl & ~E1000_TCTL_EN);
1764                         E1000_WRITE_REG(&adapter->hw, TDFT,
1765                                         adapter->tx_head_addr);
1766                         E1000_WRITE_REG(&adapter->hw, TDFH,
1767                                         adapter->tx_head_addr);
1768                         E1000_WRITE_REG(&adapter->hw, TDFTS,
1769                                         adapter->tx_head_addr);
1770                         E1000_WRITE_REG(&adapter->hw, TDFHS,
1771                                         adapter->tx_head_addr);
1772                         E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1773                         E1000_WRITE_FLUSH(&adapter->hw);
1774
1775                         adapter->tx_fifo_head = 0;
1776                         atomic_set(&adapter->tx_fifo_stall, 0);
1777                         netif_wake_queue(netdev);
1778                 } else {
1779                         mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
1780                 }
1781         }
1782 }
1783
1784 /**
1785  * e1000_watchdog - Timer Call-back
1786  * @data: pointer to adapter cast into an unsigned long
1787  **/
1788 static void
1789 e1000_watchdog(unsigned long data)
1790 {
1791         struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1792
1793         /* Do the rest outside of interrupt context */
1794         schedule_work(&adapter->watchdog_task);
1795 }
1796
1797 static void
1798 e1000_watchdog_task(struct e1000_adapter *adapter)
1799 {
1800         struct net_device *netdev = adapter->netdev;
1801         struct e1000_desc_ring *txdr = &adapter->tx_ring;
1802         uint32_t link;
1803
1804         e1000_check_for_link(&adapter->hw);
1805         if (adapter->hw.mac_type == e1000_82573) {
1806                 e1000_enable_tx_pkt_filtering(&adapter->hw);
1807                 if(adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
1808                         e1000_update_mng_vlan(adapter);
1809         }       
1810
1811         if((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
1812            !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
1813                 link = !adapter->hw.serdes_link_down;
1814         else
1815                 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
1816
1817         if(link) {
1818                 if(!netif_carrier_ok(netdev)) {
1819                         e1000_get_speed_and_duplex(&adapter->hw,
1820                                                    &adapter->link_speed,
1821                                                    &adapter->link_duplex);
1822
1823                         DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s\n",
1824                                adapter->link_speed,
1825                                adapter->link_duplex == FULL_DUPLEX ?
1826                                "Full Duplex" : "Half Duplex");
1827
1828                         netif_carrier_on(netdev);
1829                         netif_wake_queue(netdev);
1830                         mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
1831                         adapter->smartspeed = 0;
1832                 }
1833         } else {
1834                 if(netif_carrier_ok(netdev)) {
1835                         adapter->link_speed = 0;
1836                         adapter->link_duplex = 0;
1837                         DPRINTK(LINK, INFO, "NIC Link is Down\n");
1838                         netif_carrier_off(netdev);
1839                         netif_stop_queue(netdev);
1840                         mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
1841                 }
1842
1843                 e1000_smartspeed(adapter);
1844         }
1845
1846         e1000_update_stats(adapter);
1847
1848         adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
1849         adapter->tpt_old = adapter->stats.tpt;
1850         adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old;
1851         adapter->colc_old = adapter->stats.colc;
1852
1853         adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
1854         adapter->gorcl_old = adapter->stats.gorcl;
1855         adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
1856         adapter->gotcl_old = adapter->stats.gotcl;
1857
1858         e1000_update_adaptive(&adapter->hw);
1859
1860         if(!netif_carrier_ok(netdev)) {
1861                 if(E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
1862                         /* We've lost link, so the controller stops DMA,
1863                          * but we've got queued Tx work that's never going
1864                          * to get done, so reset controller to flush Tx.
1865                          * (Do the reset outside of interrupt context). */
1866                         schedule_work(&adapter->tx_timeout_task);
1867                 }
1868         }
1869
1870         /* Dynamic mode for Interrupt Throttle Rate (ITR) */
1871         if(adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
1872                 /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
1873                  * asymmetrical Tx or Rx gets ITR=8000; everyone
1874                  * else is between 2000-8000. */
1875                 uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
1876                 uint32_t dif = (adapter->gotcl > adapter->gorcl ? 
1877                         adapter->gotcl - adapter->gorcl :
1878                         adapter->gorcl - adapter->gotcl) / 10000;
1879                 uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
1880                 E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256));
1881         }
1882
1883         /* Cause software interrupt to ensure rx ring is cleaned */
1884         E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
1885
1886         /* Force detection of hung controller every watchdog period */
1887         adapter->detect_tx_hung = TRUE;
1888
1889         /* With 82571 controllers, LAA may be overwritten due to controller 
1890          * reset from the other port. Set the appropriate LAA in RAR[0] */
1891         if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present)
1892                 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
1893
1894         /* Reset the timer */
1895         mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1896 }
1897
1898 #define E1000_TX_FLAGS_CSUM             0x00000001
1899 #define E1000_TX_FLAGS_VLAN             0x00000002
1900 #define E1000_TX_FLAGS_TSO              0x00000004
1901 #define E1000_TX_FLAGS_IPV4             0x00000008
1902 #define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
1903 #define E1000_TX_FLAGS_VLAN_SHIFT       16
1904
1905 static inline int
1906 e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
1907 {
1908 #ifdef NETIF_F_TSO
1909         struct e1000_context_desc *context_desc;
1910         unsigned int i;
1911         uint32_t cmd_length = 0;
1912         uint16_t ipcse = 0, tucse, mss;
1913         uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
1914         int err;
1915
1916         if(skb_shinfo(skb)->tso_size) {
1917                 if (skb_header_cloned(skb)) {
1918                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1919                         if (err)
1920                                 return err;
1921                 }
1922
1923                 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
1924                 mss = skb_shinfo(skb)->tso_size;
1925                 if(skb->protocol == ntohs(ETH_P_IP)) {
1926                         skb->nh.iph->tot_len = 0;
1927                         skb->nh.iph->check = 0;
1928                         skb->h.th->check =
1929                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
1930                                                    skb->nh.iph->daddr,
1931                                                    0,
1932                                                    IPPROTO_TCP,
1933                                                    0);
1934                         cmd_length = E1000_TXD_CMD_IP;
1935                         ipcse = skb->h.raw - skb->data - 1;
1936 #ifdef NETIF_F_TSO_IPV6
1937                 } else if(skb->protocol == ntohs(ETH_P_IPV6)) {
1938                         skb->nh.ipv6h->payload_len = 0;
1939                         skb->h.th->check =
1940                                 ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
1941                                                  &skb->nh.ipv6h->daddr,
1942                                                  0,
1943                                                  IPPROTO_TCP,
1944                                                  0);
1945                         ipcse = 0;
1946 #endif
1947                 }
1948                 ipcss = skb->nh.raw - skb->data;
1949                 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
1950                 tucss = skb->h.raw - skb->data;
1951                 tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
1952                 tucse = 0;
1953
1954                 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
1955                                E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
1956
1957                 i = adapter->tx_ring.next_to_use;
1958                 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
1959
1960                 context_desc->lower_setup.ip_fields.ipcss  = ipcss;
1961                 context_desc->lower_setup.ip_fields.ipcso  = ipcso;
1962                 context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
1963                 context_desc->upper_setup.tcp_fields.tucss = tucss;
1964                 context_desc->upper_setup.tcp_fields.tucso = tucso;
1965                 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
1966                 context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
1967                 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
1968                 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
1969
1970                 if(++i == adapter->tx_ring.count) i = 0;
1971                 adapter->tx_ring.next_to_use = i;
1972
1973                 return 1;
1974         }
1975 #endif
1976
1977         return 0;
1978 }
1979
1980 static inline boolean_t
1981 e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
1982 {
1983         struct e1000_context_desc *context_desc;
1984         unsigned int i;
1985         uint8_t css;
1986
1987         if(likely(skb->ip_summed == CHECKSUM_HW)) {
1988                 css = skb->h.raw - skb->data;
1989
1990                 i = adapter->tx_ring.next_to_use;
1991                 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
1992
1993                 context_desc->upper_setup.tcp_fields.tucss = css;
1994                 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
1995                 context_desc->upper_setup.tcp_fields.tucse = 0;
1996                 context_desc->tcp_seg_setup.data = 0;
1997                 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
1998
1999                 if(unlikely(++i == adapter->tx_ring.count)) i = 0;
2000                 adapter->tx_ring.next_to_use = i;
2001
2002                 return TRUE;
2003         }
2004
2005         return FALSE;
2006 }
2007
2008 #define E1000_MAX_TXD_PWR       12
2009 #define E1000_MAX_DATA_PER_TXD  (1<<E1000_MAX_TXD_PWR)
2010
2011 static inline int
2012 e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
2013         unsigned int first, unsigned int max_per_txd,
2014         unsigned int nr_frags, unsigned int mss)
2015 {
2016         struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
2017         struct e1000_buffer *buffer_info;
2018         unsigned int len = skb->len;
2019         unsigned int offset = 0, size, count = 0, i;
2020         unsigned int f;
2021         len -= skb->data_len;
2022
2023         i = tx_ring->next_to_use;
2024
2025         while(len) {
2026                 buffer_info = &tx_ring->buffer_info[i];
2027                 size = min(len, max_per_txd);
2028 #ifdef NETIF_F_TSO
2029                 /* Workaround for premature desc write-backs
2030                  * in TSO mode.  Append 4-byte sentinel desc */
2031                 if(unlikely(mss && !nr_frags && size == len && size > 8))
2032                         size -= 4;
2033 #endif
2034                 /* work-around for errata 10 and it applies
2035                  * to all controllers in PCI-X mode
2036                  * The fix is to make sure that the first descriptor of a
2037                  * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2038                  */
2039                 if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
2040                                 (size > 2015) && count == 0))
2041                         size = 2015;
2042                                                                                 
2043                 /* Workaround for potential 82544 hang in PCI-X.  Avoid
2044                  * terminating buffers within evenly-aligned dwords. */
2045                 if(unlikely(adapter->pcix_82544 &&
2046                    !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2047                    size > 4))
2048                         size -= 4;
2049
2050                 buffer_info->length = size;
2051                 buffer_info->dma =
2052                         pci_map_single(adapter->pdev,
2053                                 skb->data + offset,
2054                                 size,
2055                                 PCI_DMA_TODEVICE);
2056                 buffer_info->time_stamp = jiffies;
2057
2058                 len -= size;
2059                 offset += size;
2060                 count++;
2061                 if(unlikely(++i == tx_ring->count)) i = 0;
2062         }
2063
2064         for(f = 0; f < nr_frags; f++) {
2065                 struct skb_frag_struct *frag;
2066
2067                 frag = &skb_shinfo(skb)->frags[f];
2068                 len = frag->size;
2069                 offset = frag->page_offset;
2070
2071                 while(len) {
2072                         buffer_info = &tx_ring->buffer_info[i];
2073                         size = min(len, max_per_txd);
2074 #ifdef NETIF_F_TSO
2075                         /* Workaround for premature desc write-backs
2076                          * in TSO mode.  Append 4-byte sentinel desc */
2077                         if(unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
2078                                 size -= 4;
2079 #endif
2080                         /* Workaround for potential 82544 hang in PCI-X.
2081                          * Avoid terminating buffers within evenly-aligned
2082                          * dwords. */
2083                         if(unlikely(adapter->pcix_82544 &&
2084                            !((unsigned long)(frag->page+offset+size-1) & 4) &&
2085                            size > 4))
2086                                 size -= 4;
2087
2088                         buffer_info->length = size;
2089                         buffer_info->dma =
2090                                 pci_map_page(adapter->pdev,
2091                                         frag->page,
2092                                         offset,
2093                                         size,
2094                                         PCI_DMA_TODEVICE);
2095                         buffer_info->time_stamp = jiffies;
2096
2097                         len -= size;
2098                         offset += size;
2099                         count++;
2100                         if(unlikely(++i == tx_ring->count)) i = 0;
2101                 }
2102         }
2103
2104         i = (i == 0) ? tx_ring->count - 1 : i - 1;
2105         tx_ring->buffer_info[i].skb = skb;
2106         tx_ring->buffer_info[first].next_to_watch = i;
2107
2108         return count;
2109 }
2110
2111 static inline void
2112 e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags)
2113 {
2114         struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
2115         struct e1000_tx_desc *tx_desc = NULL;
2116         struct e1000_buffer *buffer_info;
2117         uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2118         unsigned int i;
2119
2120         if(likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2121                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2122                              E1000_TXD_CMD_TSE;
2123                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2124
2125                 if(likely(tx_flags & E1000_TX_FLAGS_IPV4))
2126                         txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2127         }
2128
2129         if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2130                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2131                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2132         }
2133
2134         if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2135                 txd_lower |= E1000_TXD_CMD_VLE;
2136                 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2137         }
2138
2139         i = tx_ring->next_to_use;
2140
2141         while(count--) {
2142                 buffer_info = &tx_ring->buffer_info[i];
2143                 tx_desc = E1000_TX_DESC(*tx_ring, i);
2144                 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
2145                 tx_desc->lower.data =
2146                         cpu_to_le32(txd_lower | buffer_info->length);
2147                 tx_desc->upper.data = cpu_to_le32(txd_upper);
2148                 if(unlikely(++i == tx_ring->count)) i = 0;
2149         }
2150
2151         tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
2152
2153         /* Force memory writes to complete before letting h/w
2154          * know there are new descriptors to fetch.  (Only
2155          * applicable for weak-ordered memory model archs,
2156          * such as IA-64). */
2157         wmb();
2158
2159         tx_ring->next_to_use = i;
2160         E1000_WRITE_REG(&adapter->hw, TDT, i);
2161 }
2162
2163 /**
2164  * 82547 workaround to avoid controller hang in half-duplex environment.
2165  * The workaround is to avoid queuing a large packet that would span
2166  * the internal Tx FIFO ring boundary by notifying the stack to resend
2167  * the packet at a later time.  This gives the Tx FIFO an opportunity to
2168  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
2169  * to the beginning of the Tx FIFO.
2170  **/
2171
2172 #define E1000_FIFO_HDR                  0x10
2173 #define E1000_82547_PAD_LEN             0x3E0
2174
2175 static inline int
2176 e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
2177 {
2178         uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
2179         uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;
2180
2181         E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
2182
2183         if(adapter->link_duplex != HALF_DUPLEX)
2184                 goto no_fifo_stall_required;
2185
2186         if(atomic_read(&adapter->tx_fifo_stall))
2187                 return 1;
2188
2189         if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
2190                 atomic_set(&adapter->tx_fifo_stall, 1);
2191                 return 1;
2192         }
2193
2194 no_fifo_stall_required:
2195         adapter->tx_fifo_head += skb_fifo_len;
2196         if(adapter->tx_fifo_head >= adapter->tx_fifo_size)
2197                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
2198         return 0;
2199 }
2200
2201 #define MINIMUM_DHCP_PACKET_SIZE 282
2202 static inline int
2203 e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
2204 {
2205         struct e1000_hw *hw =  &adapter->hw;
2206         uint16_t length, offset;
2207         if(vlan_tx_tag_present(skb)) {
2208                 if(!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
2209                         ( adapter->hw.mng_cookie.status &
2210                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
2211                         return 0;
2212         }
2213         if(htons(ETH_P_IP) == skb->protocol) {
2214                 const struct iphdr *ip = skb->nh.iph;
2215                 if(IPPROTO_UDP == ip->protocol) {
2216                         struct udphdr *udp = (struct udphdr *)(skb->h.uh);
2217                         if(ntohs(udp->dest) == 67) {
2218                                 offset = (uint8_t *)udp + 8 - skb->data;
2219                                 length = skb->len - offset;
2220
2221                                 return e1000_mng_write_dhcp_info(hw,
2222                                                 (uint8_t *)udp + 8, length);
2223                         }
2224                 }
2225         } else if((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) {
2226                 struct ethhdr *eth = (struct ethhdr *) skb->data;
2227                 if((htons(ETH_P_IP) == eth->h_proto)) {
2228                         const struct iphdr *ip = 
2229                                 (struct iphdr *)((uint8_t *)skb->data+14);
2230                         if(IPPROTO_UDP == ip->protocol) {
2231                                 struct udphdr *udp = 
2232                                         (struct udphdr *)((uint8_t *)ip + 
2233                                                 (ip->ihl << 2));
2234                                 if(ntohs(udp->dest) == 67) {
2235                                         offset = (uint8_t *)udp + 8 - skb->data;
2236                                         length = skb->len - offset;
2237
2238                                         return e1000_mng_write_dhcp_info(hw,
2239                                                         (uint8_t *)udp + 8, 
2240                                                         length);
2241                                 }
2242                         }
2243                 }
2244         }
2245         return 0;
2246 }
2247
2248 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
2249 static int
2250 e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2251 {
2252         struct e1000_adapter *adapter = netdev_priv(netdev);
2253         unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
2254         unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
2255         unsigned int tx_flags = 0;
2256         unsigned int len = skb->len;
2257         unsigned long flags;
2258         unsigned int nr_frags = 0;
2259         unsigned int mss = 0;
2260         int count = 0;
2261         int tso;
2262         unsigned int f;
2263         len -= skb->data_len;
2264
2265         if(unlikely(skb->len <= 0)) {
2266                 dev_kfree_skb_any(skb);
2267                 return NETDEV_TX_OK;
2268         }
2269
2270 #ifdef NETIF_F_TSO
2271         mss = skb_shinfo(skb)->tso_size;
2272         /* The controller does a simple calculation to 
2273          * make sure there is enough room in the FIFO before
2274          * initiating the DMA for each buffer.  The calc is:
2275          * 4 = ceil(buffer len/mss).  To make sure we don't
2276          * overrun the FIFO, adjust the max buffer len if mss
2277          * drops. */
2278         if(mss) {
2279                 max_per_txd = min(mss << 2, max_per_txd);
2280                 max_txd_pwr = fls(max_per_txd) - 1;
2281         }
2282
2283         if((mss) || (skb->ip_summed == CHECKSUM_HW))
2284                 count++;
2285         count++;
2286 #else
2287         if(skb->ip_summed == CHECKSUM_HW)
2288                 count++;
2289 #endif
2290         count += TXD_USE_COUNT(len, max_txd_pwr);
2291
2292         if(adapter->pcix_82544)
2293                 count++;
2294
2295         /* work-around for errata 10 and it applies to all controllers 
2296          * in PCI-X mode, so add one more descriptor to the count
2297          */
2298         if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
2299                         (len > 2015)))
2300                 count++;
2301
2302         nr_frags = skb_shinfo(skb)->nr_frags;
2303         for(f = 0; f < nr_frags; f++)
2304                 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
2305                                        max_txd_pwr);
2306         if(adapter->pcix_82544)
2307                 count += nr_frags;
2308
2309         local_irq_save(flags); 
2310         if (!spin_trylock(&adapter->tx_lock)) { 
2311                 /* Collision - tell upper layer to requeue */ 
2312                 local_irq_restore(flags); 
2313                 return NETDEV_TX_LOCKED; 
2314         } 
2315 #ifdef NETIF_F_TSO
2316         /* TSO Workaround for 82571/2 Controllers -- if skb->data
2317          * points to just header, pull a few bytes of payload from 
2318          * frags into skb->data */
2319         if (skb_shinfo(skb)->tso_size) {
2320                 uint8_t hdr_len;
2321                 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
2322                 if (skb->data_len && (hdr_len < (skb->len - skb->data_len)) && 
2323                         (adapter->hw.mac_type == e1000_82571 ||
2324                         adapter->hw.mac_type == e1000_82572)) {
2325                         unsigned int pull_size;
2326                         pull_size = min((unsigned int)4, skb->data_len);
2327                         if (!__pskb_pull_tail(skb, pull_size)) {
2328                                 printk(KERN_ERR "__pskb_pull_tail failed.\n");
2329                                 dev_kfree_skb_any(skb);
2330                                 return -EFAULT;
2331                         }
2332                 }
2333         }
2334 #endif
2335
2336         if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
2337                 e1000_transfer_dhcp_info(adapter, skb);
2338
2339
2340         /* need: count + 2 desc gap to keep tail from touching
2341          * head, otherwise try next time */
2342         if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2)) {
2343                 netif_stop_queue(netdev);
2344                 spin_unlock_irqrestore(&adapter->tx_lock, flags);
2345                 return NETDEV_TX_BUSY;
2346         }
2347
2348         if(unlikely(adapter->hw.mac_type == e1000_82547)) {
2349                 if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
2350                         netif_stop_queue(netdev);
2351                         mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
2352                         spin_unlock_irqrestore(&adapter->tx_lock, flags);
2353                         return NETDEV_TX_BUSY;
2354                 }
2355         }
2356
2357         if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
2358                 tx_flags |= E1000_TX_FLAGS_VLAN;
2359                 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
2360         }
2361
2362         first = adapter->tx_ring.next_to_use;
2363         
2364         tso = e1000_tso(adapter, skb);
2365         if (tso < 0) {
2366                 dev_kfree_skb_any(skb);
2367                 spin_unlock_irqrestore(&adapter->tx_lock, flags);
2368                 return NETDEV_TX_OK;
2369         }
2370
2371         if (likely(tso))
2372                 tx_flags |= E1000_TX_FLAGS_TSO;
2373         else if(likely(e1000_tx_csum(adapter, skb)))
2374                 tx_flags |= E1000_TX_FLAGS_CSUM;
2375
2376         /* Old method was to assume IPv4 packet by default if TSO was enabled.
2377          * 82571 hardware supports TSO capabilities for IPv6 as well...
2378          * no longer assume, we must. */
2379         if(likely(skb->protocol == ntohs(ETH_P_IP)))
2380                 tx_flags |= E1000_TX_FLAGS_IPV4;
2381
2382         e1000_tx_queue(adapter,
2383                 e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss),
2384                 tx_flags);
2385
2386         netdev->trans_start = jiffies;
2387
2388         /* Make sure there is space in the ring for the next send. */
2389         if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < MAX_SKB_FRAGS + 2))
2390                 netif_stop_queue(netdev);
2391
2392         spin_unlock_irqrestore(&adapter->tx_lock, flags);
2393         return NETDEV_TX_OK;
2394 }
2395
2396 /**
2397  * e1000_tx_timeout - Respond to a Tx Hang
2398  * @netdev: network interface device structure
2399  **/
2400
2401 static void
2402 e1000_tx_timeout(struct net_device *netdev)
2403 {
2404         struct e1000_adapter *adapter = netdev_priv(netdev);
2405
2406         /* Do the reset outside of interrupt context */
2407         schedule_work(&adapter->tx_timeout_task);
2408 }
2409
2410 static void
2411 e1000_tx_timeout_task(struct net_device *netdev)
2412 {
2413         struct e1000_adapter *adapter = netdev_priv(netdev);
2414
2415         e1000_down(adapter);
2416         e1000_up(adapter);
2417 }
2418
2419 /**
2420  * e1000_get_stats - Get System Network Statistics
2421  * @netdev: network interface device structure
2422  *
2423  * Returns the address of the device statistics structure.
2424  * The statistics are actually updated from the timer callback.
2425  **/
2426
2427 static struct net_device_stats *
2428 e1000_get_stats(struct net_device *netdev)
2429 {
2430         struct e1000_adapter *adapter = netdev_priv(netdev);
2431
2432         e1000_update_stats(adapter);
2433         return &adapter->net_stats;
2434 }
2435
2436 /**
2437  * e1000_change_mtu - Change the Maximum Transfer Unit
2438  * @netdev: network interface device structure
2439  * @new_mtu: new value for maximum frame size
2440  *
2441  * Returns 0 on success, negative on failure
2442  **/
2443
2444 static int
2445 e1000_change_mtu(struct net_device *netdev, int new_mtu)
2446 {
2447         struct e1000_adapter *adapter = netdev_priv(netdev);
2448         int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
2449
2450         if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
2451                 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
2452                         DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
2453                         return -EINVAL;
2454         }
2455
2456 #define MAX_STD_JUMBO_FRAME_SIZE 9234
2457         /* might want this to be bigger enum check... */
2458         /* 82571 controllers limit jumbo frame size to 10500 bytes */
2459         if ((adapter->hw.mac_type == e1000_82571 || 
2460              adapter->hw.mac_type == e1000_82572) &&
2461             max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
2462                 DPRINTK(PROBE, ERR, "MTU > 9216 bytes not supported "
2463                                     "on 82571 and 82572 controllers.\n");
2464                 return -EINVAL;
2465         }
2466
2467         if(adapter->hw.mac_type == e1000_82573 &&
2468             max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
2469                 DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
2470                                     "on 82573\n");
2471                 return -EINVAL;
2472         }
2473
2474         if(adapter->hw.mac_type > e1000_82547_rev_2) {
2475                 adapter->rx_buffer_len = max_frame;
2476                 E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
2477         } else {
2478                 if(unlikely((adapter->hw.mac_type < e1000_82543) &&
2479                    (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) {
2480                         DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
2481                                             "on 82542\n");
2482                         return -EINVAL;
2483
2484                 } else {
2485                         if(max_frame <= E1000_RXBUFFER_2048) {
2486                                 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
2487                         } else if(max_frame <= E1000_RXBUFFER_4096) {
2488                                 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
2489                         } else if(max_frame <= E1000_RXBUFFER_8192) {
2490                                 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
2491                         } else if(max_frame <= E1000_RXBUFFER_16384) {
2492                                 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
2493                         }
2494                 }
2495         }
2496
2497         netdev->mtu = new_mtu;
2498
2499         if(netif_running(netdev)) {
2500                 e1000_down(adapter);
2501                 e1000_up(adapter);
2502         }
2503
2504         adapter->hw.max_frame_size = max_frame;
2505
2506         return 0;
2507 }
2508
2509 /**
2510  * e1000_update_stats - Update the board statistics counters
2511  * @adapter: board private structure
2512  **/
2513
2514 void
2515 e1000_update_stats(struct e1000_adapter *adapter)
2516 {
2517         struct e1000_hw *hw = &adapter->hw;
2518         unsigned long flags;
2519         uint16_t phy_tmp;
2520
2521 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
2522
2523         spin_lock_irqsave(&adapter->stats_lock, flags);
2524
2525         /* these counters are modified from e1000_adjust_tbi_stats,
2526          * called from the interrupt context, so they must only
2527          * be written while holding adapter->stats_lock
2528          */
2529
2530         adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS);
2531         adapter->stats.gprc += E1000_READ_REG(hw, GPRC);
2532         adapter->stats.gorcl += E1000_READ_REG(hw, GORCL);
2533         adapter->stats.gorch += E1000_READ_REG(hw, GORCH);
2534         adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
2535         adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
2536         adapter->stats.roc += E1000_READ_REG(hw, ROC);
2537         adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
2538         adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
2539         adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
2540         adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
2541         adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
2542         adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
2543
2544         adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
2545         adapter->stats.mpc += E1000_READ_REG(hw, MPC);
2546         adapter->stats.scc += E1000_READ_REG(hw, SCC);
2547         adapter->stats.ecol += E1000_READ_REG(hw, ECOL);
2548         adapter->stats.mcc += E1000_READ_REG(hw, MCC);
2549         adapter->stats.latecol += E1000_READ_REG(hw, LATECOL);
2550         adapter->stats.dc += E1000_READ_REG(hw, DC);
2551         adapter->stats.sec += E1000_READ_REG(hw, SEC);
2552         adapter->stats.rlec += E1000_READ_REG(hw, RLEC);
2553         adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC);
2554         adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC);
2555         adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC);
2556         adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC);
2557         adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC);
2558         adapter->stats.gptc += E1000_READ_REG(hw, GPTC);
2559         adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL);
2560         adapter->stats.gotch += E1000_READ_REG(hw, GOTCH);
2561         adapter->stats.rnbc += E1000_READ_REG(hw, RNBC);
2562         adapter->stats.ruc += E1000_READ_REG(hw, RUC);
2563         adapter->stats.rfc += E1000_READ_REG(hw, RFC);
2564         adapter->stats.rjc += E1000_READ_REG(hw, RJC);
2565         adapter->stats.torl += E1000_READ_REG(hw, TORL);
2566         adapter->stats.torh += E1000_READ_REG(hw, TORH);
2567         adapter->stats.totl += E1000_READ_REG(hw, TOTL);
2568         adapter->stats.toth += E1000_READ_REG(hw, TOTH);
2569         adapter->stats.tpr += E1000_READ_REG(hw, TPR);
2570         adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
2571         adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
2572         adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
2573         adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
2574         adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
2575         adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
2576         adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
2577         adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
2578
2579         /* used for adaptive IFS */
2580
2581         hw->tx_packet_delta = E1000_READ_REG(hw, TPT);
2582         adapter->stats.tpt += hw->tx_packet_delta;
2583         hw->collision_delta = E1000_READ_REG(hw, COLC);
2584         adapter->stats.colc += hw->collision_delta;
2585
2586         if(hw->mac_type >= e1000_82543) {
2587                 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
2588                 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
2589                 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
2590                 adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR);
2591                 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
2592                 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
2593         }
2594         if(hw->mac_type > e1000_82547_rev_2) {
2595                 adapter->stats.iac += E1000_READ_REG(hw, IAC);
2596                 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
2597                 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
2598                 adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
2599                 adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
2600                 adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
2601                 adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
2602                 adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
2603                 adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
2604         }
2605
2606         /* Fill out the OS statistics structure */
2607
2608         adapter->net_stats.rx_packets = adapter->stats.gprc;
2609         adapter->net_stats.tx_packets = adapter->stats.gptc;
2610         adapter->net_stats.rx_bytes = adapter->stats.gorcl;
2611         adapter->net_stats.tx_bytes = adapter->stats.gotcl;
2612         adapter->net_stats.multicast = adapter->stats.mprc;
2613         adapter->net_stats.collisions = adapter->stats.colc;
2614
2615         /* Rx Errors */
2616
2617         adapter->net_stats.rx_errors = adapter->stats.rxerrc +
2618                 adapter->stats.crcerrs + adapter->stats.algnerrc +
2619                 adapter->stats.rlec + adapter->stats.mpc + 
2620                 adapter->stats.cexterr;
2621         adapter->net_stats.rx_length_errors = adapter->stats.rlec;
2622         adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
2623         adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
2624         adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
2625         adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
2626
2627         /* Tx Errors */
2628
2629         adapter->net_stats.tx_errors = adapter->stats.ecol +
2630                                        adapter->stats.latecol;
2631         adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
2632         adapter->net_stats.tx_window_errors = adapter->stats.latecol;
2633         adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
2634
2635         /* Tx Dropped needs to be maintained elsewhere */
2636
2637         /* Phy Stats */
2638
2639         if(hw->media_type == e1000_media_type_copper) {
2640                 if((adapter->link_speed == SPEED_1000) &&
2641                    (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
2642                         phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
2643                         adapter->phy_stats.idle_errors += phy_tmp;
2644                 }
2645
2646                 if((hw->mac_type <= e1000_82546) &&
2647                    (hw->phy_type == e1000_phy_m88) &&
2648                    !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
2649                         adapter->phy_stats.receive_errors += phy_tmp;
2650         }
2651
2652         spin_unlock_irqrestore(&adapter->stats_lock, flags);
2653 }
2654
2655 /**
2656  * e1000_intr - Interrupt Handler
2657  * @irq: interrupt number
2658  * @data: pointer to a network interface device structure
2659  * @pt_regs: CPU registers structure
2660  **/
2661
2662 static irqreturn_t
2663 e1000_intr(int irq, void *data, struct pt_regs *regs)
2664 {
2665         struct net_device *netdev = data;
2666         struct e1000_adapter *adapter = netdev_priv(netdev);
2667         struct e1000_hw *hw = &adapter->hw;
2668         uint32_t icr = E1000_READ_REG(hw, ICR);
2669 #ifndef CONFIG_E1000_NAPI
2670         unsigned int i;
2671 #endif
2672
2673         if(unlikely(!icr))
2674                 return IRQ_NONE;  /* Not our interrupt */
2675
2676         if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
2677                 hw->get_link_status = 1;
2678                 mod_timer(&adapter->watchdog_timer, jiffies);
2679         }
2680
2681 #ifdef CONFIG_E1000_NAPI
2682         if(likely(netif_rx_schedule_prep(netdev))) {
2683
2684                 /* Disable interrupts and register for poll. The flush 
2685                   of the posted write is intentionally left out.
2686                 */
2687
2688                 atomic_inc(&adapter->irq_sem);
2689                 E1000_WRITE_REG(hw, IMC, ~0);
2690                 __netif_rx_schedule(netdev);
2691         }
2692 #else
2693         /* Writing IMC and IMS is needed for 82547.
2694            Due to Hub Link bus being occupied, an interrupt
2695            de-assertion message is not able to be sent.
2696            When an interrupt assertion message is generated later,
2697            two messages are re-ordered and sent out.
2698            That causes APIC to think 82547 is in de-assertion
2699            state, while 82547 is in assertion state, resulting
2700            in dead lock. Writing IMC forces 82547 into
2701            de-assertion state.
2702         */
2703         if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2){
2704                 atomic_inc(&adapter->irq_sem);
2705                 E1000_WRITE_REG(hw, IMC, ~0);
2706         }
2707
2708         for(i = 0; i < E1000_MAX_INTR; i++)
2709                 if(unlikely(!adapter->clean_rx(adapter) &
2710                    !e1000_clean_tx_irq(adapter)))
2711                         break;
2712
2713         if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
2714                 e1000_irq_enable(adapter);
2715 #endif
2716
2717         return IRQ_HANDLED;
2718 }
2719
2720 #ifdef CONFIG_E1000_NAPI
2721 /**
2722  * e1000_clean - NAPI Rx polling callback
2723  * @adapter: board private structure
2724  **/
2725
2726 static int
2727 e1000_clean(struct net_device *netdev, int *budget)
2728 {
2729         struct e1000_adapter *adapter = netdev_priv(netdev);
2730         int work_to_do = min(*budget, netdev->quota);
2731         int tx_cleaned;
2732         int work_done = 0;
2733
2734         tx_cleaned = e1000_clean_tx_irq(adapter);
2735         adapter->clean_rx(adapter, &work_done, work_to_do);
2736
2737         *budget -= work_done;
2738         netdev->quota -= work_done;
2739         
2740         if ((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
2741         /* If no Tx and not enough Rx work done, exit the polling mode */
2742                 netif_rx_complete(netdev);
2743                 e1000_irq_enable(adapter);
2744                 return 0;
2745         }
2746
2747         return 1;
2748 }
2749
2750 #endif
2751 /**
2752  * e1000_clean_tx_irq - Reclaim resources after transmit completes
2753  * @adapter: board private structure
2754  **/
2755
2756 static boolean_t
2757 e1000_clean_tx_irq(struct e1000_adapter *adapter)
2758 {
2759         struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
2760         struct net_device *netdev = adapter->netdev;
2761         struct e1000_tx_desc *tx_desc, *eop_desc;
2762         struct e1000_buffer *buffer_info;
2763         unsigned int i, eop;
2764         boolean_t cleaned = FALSE;
2765
2766         i = tx_ring->next_to_clean;
2767         eop = tx_ring->buffer_info[i].next_to_watch;
2768         eop_desc = E1000_TX_DESC(*tx_ring, eop);
2769
2770         while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
2771                 /* Premature writeback of Tx descriptors clear (free buffers
2772                  * and unmap pci_mapping) previous_buffer_info */
2773                 if (likely(adapter->previous_buffer_info.skb != NULL)) {
2774                         e1000_unmap_and_free_tx_resource(adapter,
2775                                         &adapter->previous_buffer_info);
2776                 }
2777
2778                 for(cleaned = FALSE; !cleaned; ) {
2779                         tx_desc = E1000_TX_DESC(*tx_ring, i);
2780                         buffer_info = &tx_ring->buffer_info[i];
2781                         cleaned = (i == eop);
2782
2783 #ifdef NETIF_F_TSO
2784                         if (!(netdev->features & NETIF_F_TSO)) {
2785 #endif
2786                                 e1000_unmap_and_free_tx_resource(adapter,
2787                                                                  buffer_info);
2788 #ifdef NETIF_F_TSO
2789                         } else {
2790                                 if (cleaned) {
2791                                         memcpy(&adapter->previous_buffer_info,
2792                                                buffer_info,
2793                                                sizeof(struct e1000_buffer));
2794                                         memset(buffer_info, 0,
2795                                                sizeof(struct e1000_buffer));
2796                                 } else {
2797                                         e1000_unmap_and_free_tx_resource(
2798                                             adapter, buffer_info);
2799                                 }
2800                         }
2801 #endif
2802
2803                         tx_desc->buffer_addr = 0;
2804                         tx_desc->lower.data = 0;
2805                         tx_desc->upper.data = 0;
2806
2807                         if(unlikely(++i == tx_ring->count)) i = 0;
2808                 }
2809                 
2810                 eop = tx_ring->buffer_info[i].next_to_watch;
2811                 eop_desc = E1000_TX_DESC(*tx_ring, eop);
2812         }
2813
2814         tx_ring->next_to_clean = i;
2815
2816         spin_lock(&adapter->tx_lock);
2817
2818         if(unlikely(cleaned && netif_queue_stopped(netdev) &&
2819                     netif_carrier_ok(netdev)))
2820                 netif_wake_queue(netdev);
2821
2822         spin_unlock(&adapter->tx_lock);
2823         if(adapter->detect_tx_hung) {
2824
2825                 /* Detect a transmit hang in hardware, this serializes the
2826                  * check with the clearing of time_stamp and movement of i */
2827                 adapter->detect_tx_hung = FALSE;
2828                 if (tx_ring->buffer_info[i].dma &&
2829                     time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ)
2830                     && !(E1000_READ_REG(&adapter->hw, STATUS) &
2831                         E1000_STATUS_TXOFF)) {
2832
2833                         /* detected Tx unit hang */
2834                         i = tx_ring->next_to_clean;
2835                         eop = tx_ring->buffer_info[i].next_to_watch;
2836                         eop_desc = E1000_TX_DESC(*tx_ring, eop);
2837                         DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
2838                                         "  TDH                  <%x>\n"
2839                                         "  TDT                  <%x>\n"
2840                                         "  next_to_use          <%x>\n"
2841                                         "  next_to_clean        <%x>\n"
2842                                         "buffer_info[next_to_clean]\n"
2843                                         "  dma                  <%llx>\n"
2844                                         "  time_stamp           <%lx>\n"
2845                                         "  next_to_watch        <%x>\n"
2846                                         "  jiffies              <%lx>\n"
2847                                         "  next_to_watch.status <%x>\n",
2848                                 E1000_READ_REG(&adapter->hw, TDH),
2849                                 E1000_READ_REG(&adapter->hw, TDT),
2850                                 tx_ring->next_to_use,
2851                                 i,
2852                                 (unsigned long long)tx_ring->buffer_info[i].dma,
2853                                 tx_ring->buffer_info[i].time_stamp,
2854                                 eop,
2855                                 jiffies,
2856                                 eop_desc->upper.fields.status);
2857                         netif_stop_queue(netdev);
2858                 }
2859         }
2860 #ifdef NETIF_F_TSO
2861
2862         if( unlikely(!(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
2863             time_after(jiffies, adapter->previous_buffer_info.time_stamp + HZ)))
2864                 e1000_unmap_and_free_tx_resource(
2865                     adapter, &adapter->previous_buffer_info);
2866
2867 #endif
2868         return cleaned;
2869 }
2870
2871 /**
2872  * e1000_rx_checksum - Receive Checksum Offload for 82543
2873  * @adapter:     board private structure
2874  * @status_err:  receive descriptor status and error fields
2875  * @csum:        receive descriptor csum field
2876  * @sk_buff:     socket buffer with received data
2877  **/
2878
2879 static inline void
2880 e1000_rx_checksum(struct e1000_adapter *adapter,
2881                   uint32_t status_err, uint32_t csum,
2882                   struct sk_buff *skb)
2883 {
2884         uint16_t status = (uint16_t)status_err;
2885         uint8_t errors = (uint8_t)(status_err >> 24);
2886         skb->ip_summed = CHECKSUM_NONE;
2887
2888         /* 82543 or newer only */
2889         if(unlikely(adapter->hw.mac_type < e1000_82543)) return;
2890         /* Ignore Checksum bit is set */
2891         if(unlikely(status & E1000_RXD_STAT_IXSM)) return;
2892         /* TCP/UDP checksum error bit is set */
2893         if(unlikely(errors & E1000_RXD_ERR_TCPE)) {
2894                 /* let the stack verify checksum errors */
2895                 adapter->hw_csum_err++;
2896                 return;
2897         }
2898         /* TCP/UDP Checksum has not been calculated */
2899         if(adapter->hw.mac_type <= e1000_82547_rev_2) {
2900                 if(!(status & E1000_RXD_STAT_TCPCS))
2901                         return;
2902         } else {
2903                 if(!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
2904                         return;
2905         }
2906         /* It must be a TCP or UDP packet with a valid checksum */
2907         if (likely(status & E1000_RXD_STAT_TCPCS)) {
2908                 /* TCP checksum is good */
2909                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2910         } else if (adapter->hw.mac_type > e1000_82547_rev_2) {
2911                 /* IP fragment with UDP payload */
2912                 /* Hardware complements the payload checksum, so we undo it
2913                  * and then put the value in host order for further stack use.
2914                  */
2915                 csum = ntohl(csum ^ 0xFFFF);
2916                 skb->csum = csum;
2917                 skb->ip_summed = CHECKSUM_HW;
2918         }
2919         adapter->hw_csum_good++;
2920 }
2921
2922 /**
2923  * e1000_clean_rx_irq - Send received data up the network stack; legacy
2924  * @adapter: board private structure
2925  **/
2926
2927 static boolean_t
2928 #ifdef CONFIG_E1000_NAPI
2929 e1000_clean_rx_irq(struct e1000_adapter *adapter, int *work_done,
2930                    int work_to_do)
2931 #else
2932 e1000_clean_rx_irq(struct e1000_adapter *adapter)
2933 #endif
2934 {
2935         struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
2936         struct net_device *netdev = adapter->netdev;
2937         struct pci_dev *pdev = adapter->pdev;
2938         struct e1000_rx_desc *rx_desc;
2939         struct e1000_buffer *buffer_info;
2940         struct sk_buff *skb;
2941         unsigned long flags;
2942         uint32_t length;
2943         uint8_t last_byte;
2944         unsigned int i;
2945         boolean_t cleaned = FALSE;
2946
2947         i = rx_ring->next_to_clean;
2948         rx_desc = E1000_RX_DESC(*rx_ring, i);
2949
2950         while(rx_desc->status & E1000_RXD_STAT_DD) {
2951                 buffer_info = &rx_ring->buffer_info[i];
2952 #ifdef CONFIG_E1000_NAPI
2953                 if(*work_done >= work_to_do)
2954                         break;
2955                 (*work_done)++;
2956 #endif
2957                 cleaned = TRUE;
2958
2959                 pci_unmap_single(pdev,
2960                                  buffer_info->dma,
2961                                  buffer_info->length,
2962                                  PCI_DMA_FROMDEVICE);
2963
2964                 skb = buffer_info->skb;
2965                 length = le16_to_cpu(rx_desc->length);
2966
2967                 if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) {
2968                         /* All receives must fit into a single buffer */
2969                         E1000_DBG("%s: Receive packet consumed multiple"
2970                                   " buffers\n", netdev->name);
2971                         dev_kfree_skb_irq(skb);
2972                         goto next_desc;
2973                 }
2974
2975                 if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
2976                         last_byte = *(skb->data + length - 1);
2977                         if(TBI_ACCEPT(&adapter->hw, rx_desc->status,
2978                                       rx_desc->errors, length, last_byte)) {
2979                                 spin_lock_irqsave(&adapter->stats_lock, flags);
2980                                 e1000_tbi_adjust_stats(&adapter->hw,
2981                                                        &adapter->stats,
2982                                                        length, skb->data);
2983                                 spin_unlock_irqrestore(&adapter->stats_lock,
2984                                                        flags);
2985                                 length--;
2986                         } else {
2987                                 dev_kfree_skb_irq(skb);
2988                                 goto next_desc;
2989                         }
2990                 }
2991
2992                 /* Good Receive */
2993                 skb_put(skb, length - ETHERNET_FCS_SIZE);
2994
2995                 /* Receive Checksum Offload */
2996                 e1000_rx_checksum(adapter,
2997                                   (uint32_t)(rx_desc->status) |
2998                                   ((uint32_t)(rx_desc->errors) << 24),
2999                                   rx_desc->csum, skb);
3000                 skb->protocol = eth_type_trans(skb, netdev);
3001 #ifdef CONFIG_E1000_NAPI
3002                 if(unlikely(adapter->vlgrp &&
3003                             (rx_desc->status & E1000_RXD_STAT_VP))) {
3004                         vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3005                                                  le16_to_cpu(rx_desc->special) &
3006                                                  E1000_RXD_SPC_VLAN_MASK);
3007                 } else {
3008                         netif_receive_skb(skb);
3009                 }
3010 #else /* CONFIG_E1000_NAPI */
3011                 if(unlikely(adapter->vlgrp &&
3012                             (rx_desc->status & E1000_RXD_STAT_VP))) {
3013                         vlan_hwaccel_rx(skb, adapter->vlgrp,
3014                                         le16_to_cpu(rx_desc->special) &
3015                                         E1000_RXD_SPC_VLAN_MASK);
3016                 } else {
3017                         netif_rx(skb);
3018                 }
3019 #endif /* CONFIG_E1000_NAPI */
3020                 netdev->last_rx = jiffies;
3021
3022 next_desc:
3023                 rx_desc->status = 0;
3024                 buffer_info->skb = NULL;
3025                 if(unlikely(++i == rx_ring->count)) i = 0;
3026
3027                 rx_desc = E1000_RX_DESC(*rx_ring, i);
3028         }
3029         rx_ring->next_to_clean = i;
3030         adapter->alloc_rx_buf(adapter);
3031
3032         return cleaned;
3033 }
3034
3035 /**
3036  * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
3037  * @adapter: board private structure
3038  **/
3039
3040 static boolean_t
3041 #ifdef CONFIG_E1000_NAPI
3042 e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, int *work_done,
3043                       int work_to_do)
3044 #else
3045 e1000_clean_rx_irq_ps(struct e1000_adapter *adapter)
3046 #endif
3047 {
3048         struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
3049         union e1000_rx_desc_packet_split *rx_desc;
3050         struct net_device *netdev = adapter->netdev;
3051         struct pci_dev *pdev = adapter->pdev;
3052         struct e1000_buffer *buffer_info;
3053         struct e1000_ps_page *ps_page;
3054         struct e1000_ps_page_dma *ps_page_dma;
3055         struct sk_buff *skb;
3056         unsigned int i, j;
3057         uint32_t length, staterr;
3058         boolean_t cleaned = FALSE;
3059
3060         i = rx_ring->next_to_clean;
3061         rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3062         staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
3063
3064         while(staterr & E1000_RXD_STAT_DD) {
3065                 buffer_info = &rx_ring->buffer_info[i];
3066                 ps_page = &rx_ring->ps_page[i];
3067                 ps_page_dma = &rx_ring->ps_page_dma[i];
3068 #ifdef CONFIG_E1000_NAPI
3069                 if(unlikely(*work_done >= work_to_do))
3070                         break;
3071                 (*work_done)++;
3072 #endif
3073                 cleaned = TRUE;
3074                 pci_unmap_single(pdev, buffer_info->dma,
3075                                  buffer_info->length,
3076                                  PCI_DMA_FROMDEVICE);
3077
3078                 skb = buffer_info->skb;
3079
3080                 if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
3081                         E1000_DBG("%s: Packet Split buffers didn't pick up"
3082                                   " the full packet\n", netdev->name);
3083                         dev_kfree_skb_irq(skb);
3084                         goto next_desc;
3085                 }
3086
3087                 if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
3088                         dev_kfree_skb_irq(skb);
3089                         goto next_desc;
3090                 }
3091
3092                 length = le16_to_cpu(rx_desc->wb.middle.length0);
3093
3094                 if(unlikely(!length)) {
3095                         E1000_DBG("%s: Last part of the packet spanning"
3096                                   " multiple descriptors\n", netdev->name);
3097                         dev_kfree_skb_irq(skb);
3098                         goto next_desc;
3099                 }
3100
3101                 /* Good Receive */
3102                 skb_put(skb, length);
3103
3104                 for(j = 0; j < PS_PAGE_BUFFERS; j++) {
3105                         if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j])))
3106                                 break;
3107
3108                         pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
3109                                         PAGE_SIZE, PCI_DMA_FROMDEVICE);
3110                         ps_page_dma->ps_page_dma[j] = 0;
3111                         skb_shinfo(skb)->frags[j].page =
3112                                 ps_page->ps_page[j];
3113                         ps_page->ps_page[j] = NULL;
3114                         skb_shinfo(skb)->frags[j].page_offset = 0;
3115                         skb_shinfo(skb)->frags[j].size = length;
3116                         skb_shinfo(skb)->nr_frags++;
3117                         skb->len += length;
3118                         skb->data_len += length;
3119                 }
3120
3121                 e1000_rx_checksum(adapter, staterr,
3122                                   rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
3123                 skb->protocol = eth_type_trans(skb, netdev);
3124
3125 #ifdef HAVE_RX_ZERO_COPY
3126                 if(likely(rx_desc->wb.upper.header_status &
3127                           E1000_RXDPS_HDRSTAT_HDRSP))
3128                         skb_shinfo(skb)->zero_copy = TRUE;
3129 #endif
3130 #ifdef CONFIG_E1000_NAPI
3131                 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3132                         vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3133                                 le16_to_cpu(rx_desc->wb.middle.vlan) &
3134                                 E1000_RXD_SPC_VLAN_MASK);
3135                 } else {
3136                         netif_receive_skb(skb);
3137                 }
3138 #else /* CONFIG_E1000_NAPI */
3139                 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3140                         vlan_hwaccel_rx(skb, adapter->vlgrp,
3141                                 le16_to_cpu(rx_desc->wb.middle.vlan) &
3142                                 E1000_RXD_SPC_VLAN_MASK);
3143                 } else {
3144                         netif_rx(skb);
3145                 }
3146 #endif /* CONFIG_E1000_NAPI */
3147                 netdev->last_rx = jiffies;
3148
3149 next_desc:
3150                 rx_desc->wb.middle.status_error &= ~0xFF;
3151                 buffer_info->skb = NULL;
3152                 if(unlikely(++i == rx_ring->count)) i = 0;
3153
3154                 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3155                 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
3156         }
3157         rx_ring->next_to_clean = i;
3158         adapter->alloc_rx_buf(adapter);
3159
3160         return cleaned;
3161 }
3162
3163 /**
3164  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
3165  * @adapter: address of board private structure
3166  **/
3167
3168 static void
3169 e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
3170 {
3171         struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
3172         struct net_device *netdev = adapter->netdev;
3173         struct pci_dev *pdev = adapter->pdev;
3174         struct e1000_rx_desc *rx_desc;
3175         struct e1000_buffer *buffer_info;
3176         struct sk_buff *skb;
3177         unsigned int i;
3178         unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
3179
3180         i = rx_ring->next_to_use;
3181         buffer_info = &rx_ring->buffer_info[i];
3182
3183         while(!buffer_info->skb) {
3184                 skb = dev_alloc_skb(bufsz);
3185
3186                 if(unlikely(!skb)) {
3187                         /* Better luck next round */
3188                         break;
3189                 }
3190
3191                 /* Fix for errata 23, can't cross 64kB boundary */
3192                 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
3193                         struct sk_buff *oldskb = skb;
3194                         DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
3195                                              "at %p\n", bufsz, skb->data);
3196                         /* Try again, without freeing the previous */
3197                         skb = dev_alloc_skb(bufsz);
3198                         /* Failed allocation, critical failure */
3199                         if (!skb) {
3200                                 dev_kfree_skb(oldskb);
3201                                 break;
3202                         }
3203
3204                         if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
3205                                 /* give up */
3206                                 dev_kfree_skb(skb);
3207                                 dev_kfree_skb(oldskb);
3208                                 break; /* while !buffer_info->skb */
3209                         } else {
3210                                 /* Use new allocation */
3211                                 dev_kfree_skb(oldskb);
3212                         }
3213                 }
3214                 /* Make buffer alignment 2 beyond a 16 byte boundary
3215                  * this will result in a 16 byte aligned IP header after
3216                  * the 14 byte MAC header is removed
3217                  */
3218                 skb_reserve(skb, NET_IP_ALIGN);
3219
3220                 skb->dev = netdev;
3221
3222                 buffer_info->skb = skb;
3223                 buffer_info->length = adapter->rx_buffer_len;
3224                 buffer_info->dma = pci_map_single(pdev,
3225                                                   skb->data,
3226                                                   adapter->rx_buffer_len,
3227                                                   PCI_DMA_FROMDEVICE);
3228
3229                 /* Fix for errata 23, can't cross 64kB boundary */
3230                 if (!e1000_check_64k_bound(adapter,
3231                                         (void *)(unsigned long)buffer_info->dma,
3232                                         adapter->rx_buffer_len)) {
3233                         DPRINTK(RX_ERR, ERR,
3234                                 "dma align check failed: %u bytes at %p\n",
3235                                 adapter->rx_buffer_len,
3236                                 (void *)(unsigned long)buffer_info->dma);
3237                         dev_kfree_skb(skb);
3238                         buffer_info->skb = NULL;
3239
3240                         pci_unmap_single(pdev, buffer_info->dma,
3241                                          adapter->rx_buffer_len,
3242                                          PCI_DMA_FROMDEVICE);
3243
3244                         break; /* while !buffer_info->skb */
3245                 }
3246                 rx_desc = E1000_RX_DESC(*rx_ring, i);
3247                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3248
3249                 if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) {
3250                         /* Force memory writes to complete before letting h/w
3251                          * know there are new descriptors to fetch.  (Only
3252                          * applicable for weak-ordered memory model archs,
3253                          * such as IA-64). */
3254                         wmb();
3255                         E1000_WRITE_REG(&adapter->hw, RDT, i);
3256                 }
3257
3258                 if(unlikely(++i == rx_ring->count)) i = 0;
3259                 buffer_info = &rx_ring->buffer_info[i];
3260         }
3261
3262         rx_ring->next_to_use = i;
3263 }
3264
3265 /**
3266  * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
3267  * @adapter: address of board private structure
3268  **/
3269
3270 static void
3271 e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter)
3272 {
3273         struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
3274         struct net_device *netdev = adapter->netdev;
3275         struct pci_dev *pdev = adapter->pdev;
3276         union e1000_rx_desc_packet_split *rx_desc;
3277         struct e1000_buffer *buffer_info;
3278         struct e1000_ps_page *ps_page;
3279         struct e1000_ps_page_dma *ps_page_dma;
3280         struct sk_buff *skb;
3281         unsigned int i, j;
3282
3283         i = rx_ring->next_to_use;
3284         buffer_info = &rx_ring->buffer_info[i];
3285         ps_page = &rx_ring->ps_page[i];
3286         ps_page_dma = &rx_ring->ps_page_dma[i];
3287
3288         while(!buffer_info->skb) {
3289                 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3290
3291                 for(j = 0; j < PS_PAGE_BUFFERS; j++) {
3292                         if(unlikely(!ps_page->ps_page[j])) {
3293                                 ps_page->ps_page[j] =
3294                                         alloc_page(GFP_ATOMIC);
3295                                 if(unlikely(!ps_page->ps_page[j]))
3296                                         goto no_buffers;
3297                                 ps_page_dma->ps_page_dma[j] =
3298                                         pci_map_page(pdev,
3299                                                      ps_page->ps_page[j],
3300                                                      0, PAGE_SIZE,
3301                                                      PCI_DMA_FROMDEVICE);
3302                         }
3303                         /* Refresh the desc even if buffer_addrs didn't
3304                          * change because each write-back erases this info.
3305                          */
3306                         rx_desc->read.buffer_addr[j+1] =
3307                                 cpu_to_le64(ps_page_dma->ps_page_dma[j]);
3308                 }
3309
3310                 skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN);
3311
3312                 if(unlikely(!skb))
3313                         break;
3314
3315                 /* Make buffer alignment 2 beyond a 16 byte boundary
3316                  * this will result in a 16 byte aligned IP header after
3317                  * the 14 byte MAC header is removed
3318                  */
3319                 skb_reserve(skb, NET_IP_ALIGN);
3320
3321                 skb->dev = netdev;
3322
3323                 buffer_info->skb = skb;
3324                 buffer_info->length = adapter->rx_ps_bsize0;
3325                 buffer_info->dma = pci_map_single(pdev, skb->data,
3326                                                   adapter->rx_ps_bsize0,
3327                                                   PCI_DMA_FROMDEVICE);
3328
3329                 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
3330
3331                 if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) {
3332                         /* Force memory writes to complete before letting h/w
3333                          * know there are new descriptors to fetch.  (Only
3334                          * applicable for weak-ordered memory model archs,
3335                          * such as IA-64). */
3336                         wmb();
3337                         /* Hardware increments by 16 bytes, but packet split
3338                          * descriptors are 32 bytes...so we increment tail
3339                          * twice as much.
3340                          */
3341                         E1000_WRITE_REG(&adapter->hw, RDT, i<<1);
3342                 }
3343
3344                 if(unlikely(++i == rx_ring->count)) i = 0;
3345                 buffer_info = &rx_ring->buffer_info[i];
3346                 ps_page = &rx_ring->ps_page[i];
3347                 ps_page_dma = &rx_ring->ps_page_dma[i];
3348         }
3349
3350 no_buffers:
3351         rx_ring->next_to_use = i;
3352 }
3353
3354 /**
3355  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
3356  * @adapter:
3357  **/
3358
3359 static void
3360 e1000_smartspeed(struct e1000_adapter *adapter)
3361 {
3362         uint16_t phy_status;
3363         uint16_t phy_ctrl;
3364
3365         if((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
3366            !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
3367                 return;
3368
3369         if(adapter->smartspeed == 0) {
3370                 /* If Master/Slave config fault is asserted twice,
3371                  * we assume back-to-back */
3372                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
3373                 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
3374                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
3375                 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
3376                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
3377                 if(phy_ctrl & CR_1000T_MS_ENABLE) {
3378                         phy_ctrl &= ~CR_1000T_MS_ENABLE;
3379                         e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
3380                                             phy_ctrl);
3381                         adapter->smartspeed++;
3382                         if(!e1000_phy_setup_autoneg(&adapter->hw) &&
3383                            !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
3384                                                &phy_ctrl)) {
3385                                 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
3386                                              MII_CR_RESTART_AUTO_NEG);
3387                                 e1000_write_phy_reg(&adapter->hw, PHY_CTRL,
3388                                                     phy_ctrl);
3389                         }
3390                 }
3391                 return;
3392         } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
3393                 /* If still no link, perhaps using 2/3 pair cable */
3394                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
3395                 phy_ctrl |= CR_1000T_MS_ENABLE;
3396                 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
3397                 if(!e1000_phy_setup_autoneg(&adapter->hw) &&
3398                    !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
3399                         phy_ctrl |= (MII_CR_AUTO_NEG_EN |
3400                                      MII_CR_RESTART_AUTO_NEG);
3401                         e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl);
3402                 }
3403         }
3404         /* Restart process after E1000_SMARTSPEED_MAX iterations */
3405         if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
3406                 adapter->smartspeed = 0;
3407 }
3408
3409 /**
3410  * e1000_ioctl -
3411  * @netdev:
3412  * @ifreq:
3413  * @cmd:
3414  **/
3415
3416 static int
3417 e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
3418 {
3419         switch (cmd) {
3420         case SIOCGMIIPHY:
3421         case SIOCGMIIREG:
3422         case SIOCSMIIREG:
3423                 return e1000_mii_ioctl(netdev, ifr, cmd);
3424         default:
3425                 return -EOPNOTSUPP;
3426         }
3427 }
3428
3429 /**
3430  * e1000_mii_ioctl -
3431  * @netdev:
3432  * @ifreq:
3433  * @cmd:
3434  **/
3435
3436 static int
3437 e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
3438 {
3439         struct e1000_adapter *adapter = netdev_priv(netdev);
3440         struct mii_ioctl_data *data = if_mii(ifr);
3441         int retval;
3442         uint16_t mii_reg;
3443         uint16_t spddplx;
3444         unsigned long flags;
3445
3446         if(adapter->hw.media_type != e1000_media_type_copper)
3447                 return -EOPNOTSUPP;
3448
3449         switch (cmd) {
3450         case SIOCGMIIPHY:
3451                 data->phy_id = adapter->hw.phy_addr;
3452                 break;
3453         case SIOCGMIIREG:
3454                 if(!capable(CAP_NET_ADMIN))
3455                         return -EPERM;
3456                 spin_lock_irqsave(&adapter->stats_lock, flags);
3457                 if(e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
3458                                    &data->val_out)) {
3459                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
3460                         return -EIO;
3461                 }
3462                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3463                 break;
3464         case SIOCSMIIREG:
3465                 if(!capable(CAP_NET_ADMIN))
3466                         return -EPERM;
3467                 if(data->reg_num & ~(0x1F))
3468                         return -EFAULT;
3469                 mii_reg = data->val_in;
3470                 spin_lock_irqsave(&adapter->stats_lock, flags);
3471                 if(e1000_write_phy_reg(&adapter->hw, data->reg_num,
3472                                         mii_reg)) {
3473                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
3474                         return -EIO;
3475                 }
3476                 if(adapter->hw.phy_type == e1000_phy_m88) {
3477                         switch (data->reg_num) {
3478                         case PHY_CTRL:
3479                                 if(mii_reg & MII_CR_POWER_DOWN)
3480                                         break;
3481                                 if(mii_reg & MII_CR_AUTO_NEG_EN) {
3482                                         adapter->hw.autoneg = 1;
3483                                         adapter->hw.autoneg_advertised = 0x2F;
3484                                 } else {
3485                                         if (mii_reg & 0x40)
3486                                                 spddplx = SPEED_1000;
3487                                         else if (mii_reg & 0x2000)
3488                                                 spddplx = SPEED_100;
3489                                         else
3490                                                 spddplx = SPEED_10;
3491                                         spddplx += (mii_reg & 0x100)
3492                                                    ? FULL_DUPLEX :
3493                                                    HALF_DUPLEX;
3494                                         retval = e1000_set_spd_dplx(adapter,
3495                                                                     spddplx);
3496                                         if(retval) {
3497                                                 spin_unlock_irqrestore(
3498                                                         &adapter->stats_lock, 
3499                                                         flags);
3500                                                 return retval;
3501                                         }
3502                                 }
3503                                 if(netif_running(adapter->netdev)) {
3504                                         e1000_down(adapter);
3505                                         e1000_up(adapter);
3506                                 } else
3507                                         e1000_reset(adapter);
3508                                 break;
3509                         case M88E1000_PHY_SPEC_CTRL:
3510                         case M88E1000_EXT_PHY_SPEC_CTRL:
3511                                 if(e1000_phy_reset(&adapter->hw)) {
3512                                         spin_unlock_irqrestore(
3513                                                 &adapter->stats_lock, flags);
3514                                         return -EIO;
3515                                 }
3516                                 break;
3517                         }
3518                 } else {
3519                         switch (data->reg_num) {
3520                         case PHY_CTRL:
3521                                 if(mii_reg & MII_CR_POWER_DOWN)
3522                                         break;
3523                                 if(netif_running(adapter->netdev)) {
3524                                         e1000_down(adapter);
3525                                         e1000_up(adapter);
3526                                 } else
3527                                         e1000_reset(adapter);
3528                                 break;
3529                         }
3530                 }
3531                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3532                 break;
3533         default:
3534                 return -EOPNOTSUPP;
3535         }
3536         return E1000_SUCCESS;
3537 }
3538
3539 void
3540 e1000_pci_set_mwi(struct e1000_hw *hw)
3541 {
3542         struct e1000_adapter *adapter = hw->back;
3543         int ret_val = pci_set_mwi(adapter->pdev);
3544
3545         if(ret_val)
3546                 DPRINTK(PROBE, ERR, "Error in setting MWI\n");
3547 }
3548
3549 void
3550 e1000_pci_clear_mwi(struct e1000_hw *hw)
3551 {
3552         struct e1000_adapter *adapter = hw->back;
3553
3554         pci_clear_mwi(adapter->pdev);
3555 }
3556
3557 void
3558 e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
3559 {
3560         struct e1000_adapter *adapter = hw->back;
3561
3562         pci_read_config_word(adapter->pdev, reg, value);
3563 }
3564
3565 void
3566 e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
3567 {
3568         struct e1000_adapter *adapter = hw->back;
3569
3570         pci_write_config_word(adapter->pdev, reg, *value);
3571 }
3572
3573 uint32_t
3574 e1000_io_read(struct e1000_hw *hw, unsigned long port)
3575 {
3576         return inl(port);
3577 }
3578
3579 void
3580 e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
3581 {
3582         outl(value, port);
3583 }
3584
3585 static void
3586 e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
3587 {
3588         struct e1000_adapter *adapter = netdev_priv(netdev);
3589         uint32_t ctrl, rctl;
3590
3591         e1000_irq_disable(adapter);
3592         adapter->vlgrp = grp;
3593
3594         if(grp) {
3595                 /* enable VLAN tag insert/strip */
3596                 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3597                 ctrl |= E1000_CTRL_VME;
3598                 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3599
3600                 /* enable VLAN receive filtering */
3601                 rctl = E1000_READ_REG(&adapter->hw, RCTL);
3602                 rctl |= E1000_RCTL_VFE;
3603                 rctl &= ~E1000_RCTL_CFIEN;
3604                 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
3605                 e1000_update_mng_vlan(adapter);
3606         } else {
3607                 /* disable VLAN tag insert/strip */
3608                 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3609                 ctrl &= ~E1000_CTRL_VME;
3610                 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3611
3612                 /* disable VLAN filtering */
3613                 rctl = E1000_READ_REG(&adapter->hw, RCTL);
3614                 rctl &= ~E1000_RCTL_VFE;
3615                 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
3616                 if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
3617                         e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
3618                         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
3619                 }
3620         }
3621
3622         e1000_irq_enable(adapter);
3623 }
3624
3625 static void
3626 e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
3627 {
3628         struct e1000_adapter *adapter = netdev_priv(netdev);
3629         uint32_t vfta, index;
3630         if((adapter->hw.mng_cookie.status &
3631                 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
3632                 (vid == adapter->mng_vlan_id))
3633                 return;
3634         /* add VID to filter table */
3635         index = (vid >> 5) & 0x7F;
3636         vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
3637         vfta |= (1 << (vid & 0x1F));
3638         e1000_write_vfta(&adapter->hw, index, vfta);
3639 }
3640
3641 static void
3642 e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
3643 {
3644         struct e1000_adapter *adapter = netdev_priv(netdev);
3645         uint32_t vfta, index;
3646
3647         e1000_irq_disable(adapter);
3648
3649         if(adapter->vlgrp)
3650                 adapter->vlgrp->vlan_devices[vid] = NULL;
3651
3652         e1000_irq_enable(adapter);
3653
3654         if((adapter->hw.mng_cookie.status &
3655                 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
3656                 (vid == adapter->mng_vlan_id))
3657                 return;
3658         /* remove VID from filter table */
3659         index = (vid >> 5) & 0x7F;
3660         vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
3661         vfta &= ~(1 << (vid & 0x1F));
3662         e1000_write_vfta(&adapter->hw, index, vfta);
3663 }
3664
3665 static void
3666 e1000_restore_vlan(struct e1000_adapter *adapter)
3667 {
3668         e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
3669
3670         if(adapter->vlgrp) {
3671                 uint16_t vid;
3672                 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
3673                         if(!adapter->vlgrp->vlan_devices[vid])
3674                                 continue;
3675                         e1000_vlan_rx_add_vid(adapter->netdev, vid);
3676                 }
3677         }
3678 }
3679
3680 int
3681 e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
3682 {
3683         adapter->hw.autoneg = 0;
3684
3685         /* Fiber NICs only allow 1000 gbps Full duplex */
3686         if((adapter->hw.media_type == e1000_media_type_fiber) &&
3687                 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
3688                 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
3689                 return -EINVAL;
3690         }
3691
3692         switch(spddplx) {
3693         case SPEED_10 + DUPLEX_HALF:
3694                 adapter->hw.forced_speed_duplex = e1000_10_half;
3695                 break;
3696         case SPEED_10 + DUPLEX_FULL:
3697                 adapter->hw.forced_speed_duplex = e1000_10_full;
3698                 break;
3699         case SPEED_100 + DUPLEX_HALF:
3700                 adapter->hw.forced_speed_duplex = e1000_100_half;
3701                 break;
3702         case SPEED_100 + DUPLEX_FULL:
3703                 adapter->hw.forced_speed_duplex = e1000_100_full;
3704                 break;
3705         case SPEED_1000 + DUPLEX_FULL:
3706                 adapter->hw.autoneg = 1;
3707                 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
3708                 break;
3709         case SPEED_1000 + DUPLEX_HALF: /* not supported */
3710         default:
3711                 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
3712                 return -EINVAL;
3713         }
3714         return 0;
3715 }
3716
3717 static int
3718 e1000_suspend(struct pci_dev *pdev, pm_message_t state)
3719 {
3720         struct net_device *netdev = pci_get_drvdata(pdev);
3721         struct e1000_adapter *adapter = netdev_priv(netdev);
3722         uint32_t ctrl, ctrl_ext, rctl, manc, status, swsm;
3723         uint32_t wufc = adapter->wol;
3724
3725         netif_device_detach(netdev);
3726
3727         if(netif_running(netdev))
3728                 e1000_down(adapter);
3729
3730         status = E1000_READ_REG(&adapter->hw, STATUS);
3731         if(status & E1000_STATUS_LU)
3732                 wufc &= ~E1000_WUFC_LNKC;
3733
3734         if(wufc) {
3735                 e1000_setup_rctl(adapter);
3736                 e1000_set_multi(netdev);
3737
3738                 /* turn on all-multi mode if wake on multicast is enabled */
3739                 if(adapter->wol & E1000_WUFC_MC) {
3740                         rctl = E1000_READ_REG(&adapter->hw, RCTL);
3741                         rctl |= E1000_RCTL_MPE;
3742                         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
3743                 }
3744
3745                 if(adapter->hw.mac_type >= e1000_82540) {
3746                         ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3747                         /* advertise wake from D3Cold */
3748                         #define E1000_CTRL_ADVD3WUC 0x00100000
3749                         /* phy power management enable */
3750                         #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
3751                         ctrl |= E1000_CTRL_ADVD3WUC |
3752                                 E1000_CTRL_EN_PHY_PWR_MGMT;
3753                         E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3754                 }
3755
3756                 if(adapter->hw.media_type == e1000_media_type_fiber ||
3757                    adapter->hw.media_type == e1000_media_type_internal_serdes) {
3758                         /* keep the laser running in D3 */
3759                         ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
3760                         ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
3761                         E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext);
3762                 }
3763
3764                 /* Allow time for pending master requests to run */
3765                 e1000_disable_pciex_master(&adapter->hw);
3766
3767                 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
3768                 E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
3769                 pci_enable_wake(pdev, 3, 1);
3770                 pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
3771         } else {
3772                 E1000_WRITE_REG(&adapter->hw, WUC, 0);
3773                 E1000_WRITE_REG(&adapter->hw, WUFC, 0);
3774                 pci_enable_wake(pdev, 3, 0);
3775                 pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
3776         }
3777
3778         pci_save_state(pdev);
3779
3780         if(adapter->hw.mac_type >= e1000_82540 &&
3781            adapter->hw.media_type == e1000_media_type_copper) {
3782                 manc = E1000_READ_REG(&adapter->hw, MANC);
3783                 if(manc & E1000_MANC_SMBUS_EN) {
3784                         manc |= E1000_MANC_ARP_EN;
3785                         E1000_WRITE_REG(&adapter->hw, MANC, manc);
3786                         pci_enable_wake(pdev, 3, 1);
3787                         pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
3788                 }
3789         }
3790
3791         switch(adapter->hw.mac_type) {
3792         case e1000_82571:
3793         case e1000_82572:
3794                 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
3795                 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
3796                                 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
3797                 break;
3798         case e1000_82573:
3799                 swsm = E1000_READ_REG(&adapter->hw, SWSM);
3800                 E1000_WRITE_REG(&adapter->hw, SWSM,
3801                                 swsm & ~E1000_SWSM_DRV_LOAD);
3802                 break;
3803         default:
3804                 break;
3805         }
3806
3807         pci_disable_device(pdev);
3808         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3809
3810         return 0;
3811 }
3812
3813 #ifdef CONFIG_PM
3814 static int
3815 e1000_resume(struct pci_dev *pdev)
3816 {
3817         struct net_device *netdev = pci_get_drvdata(pdev);
3818         struct e1000_adapter *adapter = netdev_priv(netdev);
3819         uint32_t manc, ret_val, swsm;
3820         uint32_t ctrl_ext;
3821
3822         pci_set_power_state(pdev, PCI_D0);
3823         pci_restore_state(pdev);
3824         ret_val = pci_enable_device(pdev);
3825         pci_set_master(pdev);
3826
3827         pci_enable_wake(pdev, PCI_D3hot, 0);
3828         pci_enable_wake(pdev, PCI_D3cold, 0);
3829
3830         e1000_reset(adapter);
3831         E1000_WRITE_REG(&adapter->hw, WUS, ~0);
3832
3833         if(netif_running(netdev))
3834                 e1000_up(adapter);
3835
3836         netif_device_attach(netdev);
3837
3838         if(adapter->hw.mac_type >= e1000_82540 &&
3839            adapter->hw.media_type == e1000_media_type_copper) {
3840                 manc = E1000_READ_REG(&adapter->hw, MANC);
3841                 manc &= ~(E1000_MANC_ARP_EN);
3842                 E1000_WRITE_REG(&adapter->hw, MANC, manc);
3843         }
3844
3845         switch(adapter->hw.mac_type) {
3846         case e1000_82571:
3847         case e1000_82572:
3848                 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
3849                 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
3850                                 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
3851                 break;
3852         case e1000_82573:
3853                 swsm = E1000_READ_REG(&adapter->hw, SWSM);
3854                 E1000_WRITE_REG(&adapter->hw, SWSM,
3855                                 swsm | E1000_SWSM_DRV_LOAD);
3856                 break;
3857         default:
3858                 break;
3859         }
3860
3861         return 0;
3862 }
3863 #endif
3864 #ifdef CONFIG_NET_POLL_CONTROLLER
3865 /*
3866  * Polling 'interrupt' - used by things like netconsole to send skbs
3867  * without having to re-enable interrupts. It's not called while
3868  * the interrupt routine is executing.
3869  */
3870 static void
3871 e1000_netpoll(struct net_device *netdev)
3872 {
3873         struct e1000_adapter *adapter = netdev_priv(netdev);
3874         disable_irq(adapter->pdev->irq);
3875         e1000_intr(adapter->pdev->irq, netdev, NULL);
3876         e1000_clean_tx_irq(adapter);
3877         enable_irq(adapter->pdev->irq);
3878 }
3879 #endif
3880
3881 /* e1000_main.c */