]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/igb/igb_main.c
igb: Replace LRO with GRO
[linux-2.6-omap-h63xx.git] / drivers / net / igb / igb_main.c
1 /*******************************************************************************
2
3   Intel(R) Gigabit Ethernet Linux driver
4   Copyright(c) 2007 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/vmalloc.h>
32 #include <linux/pagemap.h>
33 #include <linux/netdevice.h>
34 #include <linux/ipv6.h>
35 #include <net/checksum.h>
36 #include <net/ip6_checksum.h>
37 #include <linux/mii.h>
38 #include <linux/ethtool.h>
39 #include <linux/if_vlan.h>
40 #include <linux/pci.h>
41 #include <linux/pci-aspm.h>
42 #include <linux/delay.h>
43 #include <linux/interrupt.h>
44 #include <linux/if_ether.h>
45 #include <linux/aer.h>
46 #ifdef CONFIG_IGB_DCA
47 #include <linux/dca.h>
48 #endif
49 #include "igb.h"
50
51 #define DRV_VERSION "1.2.45-k2"
52 char igb_driver_name[] = "igb";
53 char igb_driver_version[] = DRV_VERSION;
54 static const char igb_driver_string[] =
55                                 "Intel(R) Gigabit Ethernet Network Driver";
56 static const char igb_copyright[] = "Copyright (c) 2008 Intel Corporation.";
57
58 static const struct e1000_info *igb_info_tbl[] = {
59         [board_82575] = &e1000_82575_info,
60 };
61
62 static struct pci_device_id igb_pci_tbl[] = {
63         { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
64         { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
65         { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
66         { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
67         { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
68         { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
69         /* required last entry */
70         {0, }
71 };
72
73 MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
74
75 void igb_reset(struct igb_adapter *);
76 static int igb_setup_all_tx_resources(struct igb_adapter *);
77 static int igb_setup_all_rx_resources(struct igb_adapter *);
78 static void igb_free_all_tx_resources(struct igb_adapter *);
79 static void igb_free_all_rx_resources(struct igb_adapter *);
80 void igb_update_stats(struct igb_adapter *);
81 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
82 static void __devexit igb_remove(struct pci_dev *pdev);
83 static int igb_sw_init(struct igb_adapter *);
84 static int igb_open(struct net_device *);
85 static int igb_close(struct net_device *);
86 static void igb_configure_tx(struct igb_adapter *);
87 static void igb_configure_rx(struct igb_adapter *);
88 static void igb_setup_rctl(struct igb_adapter *);
89 static void igb_clean_all_tx_rings(struct igb_adapter *);
90 static void igb_clean_all_rx_rings(struct igb_adapter *);
91 static void igb_clean_tx_ring(struct igb_ring *);
92 static void igb_clean_rx_ring(struct igb_ring *);
93 static void igb_set_multi(struct net_device *);
94 static void igb_update_phy_info(unsigned long);
95 static void igb_watchdog(unsigned long);
96 static void igb_watchdog_task(struct work_struct *);
97 static int igb_xmit_frame_ring_adv(struct sk_buff *, struct net_device *,
98                                   struct igb_ring *);
99 static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
100 static struct net_device_stats *igb_get_stats(struct net_device *);
101 static int igb_change_mtu(struct net_device *, int);
102 static int igb_set_mac(struct net_device *, void *);
103 static irqreturn_t igb_intr(int irq, void *);
104 static irqreturn_t igb_intr_msi(int irq, void *);
105 static irqreturn_t igb_msix_other(int irq, void *);
106 static irqreturn_t igb_msix_rx(int irq, void *);
107 static irqreturn_t igb_msix_tx(int irq, void *);
108 static int igb_clean_rx_ring_msix(struct napi_struct *, int);
109 #ifdef CONFIG_IGB_DCA
110 static void igb_update_rx_dca(struct igb_ring *);
111 static void igb_update_tx_dca(struct igb_ring *);
112 static void igb_setup_dca(struct igb_adapter *);
113 #endif /* CONFIG_IGB_DCA */
114 static bool igb_clean_tx_irq(struct igb_ring *);
115 static int igb_poll(struct napi_struct *, int);
116 static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
117 static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
118 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
119 static void igb_tx_timeout(struct net_device *);
120 static void igb_reset_task(struct work_struct *);
121 static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
122 static void igb_vlan_rx_add_vid(struct net_device *, u16);
123 static void igb_vlan_rx_kill_vid(struct net_device *, u16);
124 static void igb_restore_vlan(struct igb_adapter *);
125
126 static int igb_suspend(struct pci_dev *, pm_message_t);
127 #ifdef CONFIG_PM
128 static int igb_resume(struct pci_dev *);
129 #endif
130 static void igb_shutdown(struct pci_dev *);
131 #ifdef CONFIG_IGB_DCA
132 static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
133 static struct notifier_block dca_notifier = {
134         .notifier_call  = igb_notify_dca,
135         .next           = NULL,
136         .priority       = 0
137 };
138 #endif
139
140 #ifdef CONFIG_NET_POLL_CONTROLLER
141 /* for netdump / net console */
142 static void igb_netpoll(struct net_device *);
143 #endif
144
145 static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
146                      pci_channel_state_t);
147 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
148 static void igb_io_resume(struct pci_dev *);
149
150 static struct pci_error_handlers igb_err_handler = {
151         .error_detected = igb_io_error_detected,
152         .slot_reset = igb_io_slot_reset,
153         .resume = igb_io_resume,
154 };
155
156
157 static struct pci_driver igb_driver = {
158         .name     = igb_driver_name,
159         .id_table = igb_pci_tbl,
160         .probe    = igb_probe,
161         .remove   = __devexit_p(igb_remove),
162 #ifdef CONFIG_PM
163         /* Power Managment Hooks */
164         .suspend  = igb_suspend,
165         .resume   = igb_resume,
166 #endif
167         .shutdown = igb_shutdown,
168         .err_handler = &igb_err_handler
169 };
170
171 static int global_quad_port_a; /* global quad port a indication */
172
173 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
174 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
175 MODULE_LICENSE("GPL");
176 MODULE_VERSION(DRV_VERSION);
177
178 #ifdef DEBUG
179 /**
180  * igb_get_hw_dev_name - return device name string
181  * used by hardware layer to print debugging information
182  **/
183 char *igb_get_hw_dev_name(struct e1000_hw *hw)
184 {
185         struct igb_adapter *adapter = hw->back;
186         return adapter->netdev->name;
187 }
188 #endif
189
190 /**
191  * igb_init_module - Driver Registration Routine
192  *
193  * igb_init_module is the first routine called when the driver is
194  * loaded. All it does is register with the PCI subsystem.
195  **/
196 static int __init igb_init_module(void)
197 {
198         int ret;
199         printk(KERN_INFO "%s - version %s\n",
200                igb_driver_string, igb_driver_version);
201
202         printk(KERN_INFO "%s\n", igb_copyright);
203
204         global_quad_port_a = 0;
205
206         ret = pci_register_driver(&igb_driver);
207 #ifdef CONFIG_IGB_DCA
208         dca_register_notify(&dca_notifier);
209 #endif
210         return ret;
211 }
212
213 module_init(igb_init_module);
214
215 /**
216  * igb_exit_module - Driver Exit Cleanup Routine
217  *
218  * igb_exit_module is called just before the driver is removed
219  * from memory.
220  **/
221 static void __exit igb_exit_module(void)
222 {
223 #ifdef CONFIG_IGB_DCA
224         dca_unregister_notify(&dca_notifier);
225 #endif
226         pci_unregister_driver(&igb_driver);
227 }
228
229 module_exit(igb_exit_module);
230
231 #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
232 /**
233  * igb_cache_ring_register - Descriptor ring to register mapping
234  * @adapter: board private structure to initialize
235  *
236  * Once we know the feature-set enabled for the device, we'll cache
237  * the register offset the descriptor ring is assigned to.
238  **/
239 static void igb_cache_ring_register(struct igb_adapter *adapter)
240 {
241         int i;
242
243         switch (adapter->hw.mac.type) {
244         case e1000_82576:
245                 /* The queues are allocated for virtualization such that VF 0
246                  * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
247                  * In order to avoid collision we start at the first free queue
248                  * and continue consuming queues in the same sequence
249                  */
250                 for (i = 0; i < adapter->num_rx_queues; i++)
251                         adapter->rx_ring[i].reg_idx = Q_IDX_82576(i);
252                 for (i = 0; i < adapter->num_tx_queues; i++)
253                         adapter->tx_ring[i].reg_idx = Q_IDX_82576(i);
254                 break;
255         case e1000_82575:
256         default:
257                 for (i = 0; i < adapter->num_rx_queues; i++)
258                         adapter->rx_ring[i].reg_idx = i;
259                 for (i = 0; i < adapter->num_tx_queues; i++)
260                         adapter->tx_ring[i].reg_idx = i;
261                 break;
262         }
263 }
264
265 /**
266  * igb_alloc_queues - Allocate memory for all rings
267  * @adapter: board private structure to initialize
268  *
269  * We allocate one ring per queue at run-time since we don't know the
270  * number of queues at compile-time.
271  **/
272 static int igb_alloc_queues(struct igb_adapter *adapter)
273 {
274         int i;
275
276         adapter->tx_ring = kcalloc(adapter->num_tx_queues,
277                                    sizeof(struct igb_ring), GFP_KERNEL);
278         if (!adapter->tx_ring)
279                 return -ENOMEM;
280
281         adapter->rx_ring = kcalloc(adapter->num_rx_queues,
282                                    sizeof(struct igb_ring), GFP_KERNEL);
283         if (!adapter->rx_ring) {
284                 kfree(adapter->tx_ring);
285                 return -ENOMEM;
286         }
287
288         adapter->rx_ring->buddy = adapter->tx_ring;
289
290         for (i = 0; i < adapter->num_tx_queues; i++) {
291                 struct igb_ring *ring = &(adapter->tx_ring[i]);
292                 ring->count = adapter->tx_ring_count;
293                 ring->adapter = adapter;
294                 ring->queue_index = i;
295         }
296         for (i = 0; i < adapter->num_rx_queues; i++) {
297                 struct igb_ring *ring = &(adapter->rx_ring[i]);
298                 ring->count = adapter->rx_ring_count;
299                 ring->adapter = adapter;
300                 ring->queue_index = i;
301                 ring->itr_register = E1000_ITR;
302
303                 /* set a default napi handler for each rx_ring */
304                 netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64);
305         }
306
307         igb_cache_ring_register(adapter);
308         return 0;
309 }
310
311 static void igb_free_queues(struct igb_adapter *adapter)
312 {
313         int i;
314
315         for (i = 0; i < adapter->num_rx_queues; i++)
316                 netif_napi_del(&adapter->rx_ring[i].napi);
317
318         kfree(adapter->tx_ring);
319         kfree(adapter->rx_ring);
320 }
321
322 #define IGB_N0_QUEUE -1
323 static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
324                               int tx_queue, int msix_vector)
325 {
326         u32 msixbm = 0;
327         struct e1000_hw *hw = &adapter->hw;
328         u32 ivar, index;
329
330         switch (hw->mac.type) {
331         case e1000_82575:
332                 /* The 82575 assigns vectors using a bitmask, which matches the
333                    bitmask for the EICR/EIMS/EIMC registers.  To assign one
334                    or more queues to a vector, we write the appropriate bits
335                    into the MSIXBM register for that vector. */
336                 if (rx_queue > IGB_N0_QUEUE) {
337                         msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
338                         adapter->rx_ring[rx_queue].eims_value = msixbm;
339                 }
340                 if (tx_queue > IGB_N0_QUEUE) {
341                         msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
342                         adapter->tx_ring[tx_queue].eims_value =
343                                   E1000_EICR_TX_QUEUE0 << tx_queue;
344                 }
345                 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
346                 break;
347         case e1000_82576:
348                 /* 82576 uses a table-based method for assigning vectors.
349                    Each queue has a single entry in the table to which we write
350                    a vector number along with a "valid" bit.  Sadly, the layout
351                    of the table is somewhat counterintuitive. */
352                 if (rx_queue > IGB_N0_QUEUE) {
353                         index = (rx_queue >> 1);
354                         ivar = array_rd32(E1000_IVAR0, index);
355                         if (rx_queue & 0x1) {
356                                 /* vector goes into third byte of register */
357                                 ivar = ivar & 0xFF00FFFF;
358                                 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
359                         } else {
360                                 /* vector goes into low byte of register */
361                                 ivar = ivar & 0xFFFFFF00;
362                                 ivar |= msix_vector | E1000_IVAR_VALID;
363                         }
364                         adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
365                         array_wr32(E1000_IVAR0, index, ivar);
366                 }
367                 if (tx_queue > IGB_N0_QUEUE) {
368                         index = (tx_queue >> 1);
369                         ivar = array_rd32(E1000_IVAR0, index);
370                         if (tx_queue & 0x1) {
371                                 /* vector goes into high byte of register */
372                                 ivar = ivar & 0x00FFFFFF;
373                                 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
374                         } else {
375                                 /* vector goes into second byte of register */
376                                 ivar = ivar & 0xFFFF00FF;
377                                 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
378                         }
379                         adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
380                         array_wr32(E1000_IVAR0, index, ivar);
381                 }
382                 break;
383         default:
384                 BUG();
385                 break;
386         }
387 }
388
389 /**
390  * igb_configure_msix - Configure MSI-X hardware
391  *
392  * igb_configure_msix sets up the hardware to properly
393  * generate MSI-X interrupts.
394  **/
395 static void igb_configure_msix(struct igb_adapter *adapter)
396 {
397         u32 tmp;
398         int i, vector = 0;
399         struct e1000_hw *hw = &adapter->hw;
400
401         adapter->eims_enable_mask = 0;
402         if (hw->mac.type == e1000_82576)
403                 /* Turn on MSI-X capability first, or our settings
404                  * won't stick.  And it will take days to debug. */
405                 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
406                                    E1000_GPIE_PBA | E1000_GPIE_EIAME | 
407                                    E1000_GPIE_NSICR);
408
409         for (i = 0; i < adapter->num_tx_queues; i++) {
410                 struct igb_ring *tx_ring = &adapter->tx_ring[i];
411                 igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++);
412                 adapter->eims_enable_mask |= tx_ring->eims_value;
413                 if (tx_ring->itr_val)
414                         writel(tx_ring->itr_val,
415                                hw->hw_addr + tx_ring->itr_register);
416                 else
417                         writel(1, hw->hw_addr + tx_ring->itr_register);
418         }
419
420         for (i = 0; i < adapter->num_rx_queues; i++) {
421                 struct igb_ring *rx_ring = &adapter->rx_ring[i];
422                 rx_ring->buddy = NULL;
423                 igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
424                 adapter->eims_enable_mask |= rx_ring->eims_value;
425                 if (rx_ring->itr_val)
426                         writel(rx_ring->itr_val,
427                                hw->hw_addr + rx_ring->itr_register);
428                 else
429                         writel(1, hw->hw_addr + rx_ring->itr_register);
430         }
431
432
433         /* set vector for other causes, i.e. link changes */
434         switch (hw->mac.type) {
435         case e1000_82575:
436                 array_wr32(E1000_MSIXBM(0), vector++,
437                                       E1000_EIMS_OTHER);
438
439                 tmp = rd32(E1000_CTRL_EXT);
440                 /* enable MSI-X PBA support*/
441                 tmp |= E1000_CTRL_EXT_PBA_CLR;
442
443                 /* Auto-Mask interrupts upon ICR read. */
444                 tmp |= E1000_CTRL_EXT_EIAME;
445                 tmp |= E1000_CTRL_EXT_IRCA;
446
447                 wr32(E1000_CTRL_EXT, tmp);
448                 adapter->eims_enable_mask |= E1000_EIMS_OTHER;
449                 adapter->eims_other = E1000_EIMS_OTHER;
450
451                 break;
452
453         case e1000_82576:
454                 tmp = (vector++ | E1000_IVAR_VALID) << 8;
455                 wr32(E1000_IVAR_MISC, tmp);
456
457                 adapter->eims_enable_mask = (1 << (vector)) - 1;
458                 adapter->eims_other = 1 << (vector - 1);
459                 break;
460         default:
461                 /* do nothing, since nothing else supports MSI-X */
462                 break;
463         } /* switch (hw->mac.type) */
464         wrfl();
465 }
466
467 /**
468  * igb_request_msix - Initialize MSI-X interrupts
469  *
470  * igb_request_msix allocates MSI-X vectors and requests interrupts from the
471  * kernel.
472  **/
473 static int igb_request_msix(struct igb_adapter *adapter)
474 {
475         struct net_device *netdev = adapter->netdev;
476         int i, err = 0, vector = 0;
477
478         vector = 0;
479
480         for (i = 0; i < adapter->num_tx_queues; i++) {
481                 struct igb_ring *ring = &(adapter->tx_ring[i]);
482                 sprintf(ring->name, "%s-tx-%d", netdev->name, i);
483                 err = request_irq(adapter->msix_entries[vector].vector,
484                                   &igb_msix_tx, 0, ring->name,
485                                   &(adapter->tx_ring[i]));
486                 if (err)
487                         goto out;
488                 ring->itr_register = E1000_EITR(0) + (vector << 2);
489                 ring->itr_val = 976; /* ~4000 ints/sec */
490                 vector++;
491         }
492         for (i = 0; i < adapter->num_rx_queues; i++) {
493                 struct igb_ring *ring = &(adapter->rx_ring[i]);
494                 if (strlen(netdev->name) < (IFNAMSIZ - 5))
495                         sprintf(ring->name, "%s-rx-%d", netdev->name, i);
496                 else
497                         memcpy(ring->name, netdev->name, IFNAMSIZ);
498                 err = request_irq(adapter->msix_entries[vector].vector,
499                                   &igb_msix_rx, 0, ring->name,
500                                   &(adapter->rx_ring[i]));
501                 if (err)
502                         goto out;
503                 ring->itr_register = E1000_EITR(0) + (vector << 2);
504                 ring->itr_val = adapter->itr;
505                 /* overwrite the poll routine for MSIX, we've already done
506                  * netif_napi_add */
507                 ring->napi.poll = &igb_clean_rx_ring_msix;
508                 vector++;
509         }
510
511         err = request_irq(adapter->msix_entries[vector].vector,
512                           &igb_msix_other, 0, netdev->name, netdev);
513         if (err)
514                 goto out;
515
516         igb_configure_msix(adapter);
517         return 0;
518 out:
519         return err;
520 }
521
522 static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
523 {
524         if (adapter->msix_entries) {
525                 pci_disable_msix(adapter->pdev);
526                 kfree(adapter->msix_entries);
527                 adapter->msix_entries = NULL;
528         } else if (adapter->flags & IGB_FLAG_HAS_MSI)
529                 pci_disable_msi(adapter->pdev);
530         return;
531 }
532
533
534 /**
535  * igb_set_interrupt_capability - set MSI or MSI-X if supported
536  *
537  * Attempt to configure interrupts using the best available
538  * capabilities of the hardware and kernel.
539  **/
540 static void igb_set_interrupt_capability(struct igb_adapter *adapter)
541 {
542         int err;
543         int numvecs, i;
544
545         numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1;
546         adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
547                                         GFP_KERNEL);
548         if (!adapter->msix_entries)
549                 goto msi_only;
550
551         for (i = 0; i < numvecs; i++)
552                 adapter->msix_entries[i].entry = i;
553
554         err = pci_enable_msix(adapter->pdev,
555                               adapter->msix_entries,
556                               numvecs);
557         if (err == 0)
558                 goto out;
559
560         igb_reset_interrupt_capability(adapter);
561
562         /* If we can't do MSI-X, try MSI */
563 msi_only:
564         adapter->num_rx_queues = 1;
565         adapter->num_tx_queues = 1;
566         if (!pci_enable_msi(adapter->pdev))
567                 adapter->flags |= IGB_FLAG_HAS_MSI;
568 out:
569         /* Notify the stack of the (possibly) reduced Tx Queue count. */
570         adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
571         return;
572 }
573
574 /**
575  * igb_request_irq - initialize interrupts
576  *
577  * Attempts to configure interrupts using the best available
578  * capabilities of the hardware and kernel.
579  **/
580 static int igb_request_irq(struct igb_adapter *adapter)
581 {
582         struct net_device *netdev = adapter->netdev;
583         struct e1000_hw *hw = &adapter->hw;
584         int err = 0;
585
586         if (adapter->msix_entries) {
587                 err = igb_request_msix(adapter);
588                 if (!err)
589                         goto request_done;
590                 /* fall back to MSI */
591                 igb_reset_interrupt_capability(adapter);
592                 if (!pci_enable_msi(adapter->pdev))
593                         adapter->flags |= IGB_FLAG_HAS_MSI;
594                 igb_free_all_tx_resources(adapter);
595                 igb_free_all_rx_resources(adapter);
596                 adapter->num_rx_queues = 1;
597                 igb_alloc_queues(adapter);
598         } else {
599                 switch (hw->mac.type) {
600                 case e1000_82575:
601                         wr32(E1000_MSIXBM(0),
602                              (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER));
603                         break;
604                 case e1000_82576:
605                         wr32(E1000_IVAR0, E1000_IVAR_VALID);
606                         break;
607                 default:
608                         break;
609                 }
610         }
611
612         if (adapter->flags & IGB_FLAG_HAS_MSI) {
613                 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
614                                   netdev->name, netdev);
615                 if (!err)
616                         goto request_done;
617                 /* fall back to legacy interrupts */
618                 igb_reset_interrupt_capability(adapter);
619                 adapter->flags &= ~IGB_FLAG_HAS_MSI;
620         }
621
622         err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
623                           netdev->name, netdev);
624
625         if (err)
626                 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
627                         err);
628
629 request_done:
630         return err;
631 }
632
633 static void igb_free_irq(struct igb_adapter *adapter)
634 {
635         struct net_device *netdev = adapter->netdev;
636
637         if (adapter->msix_entries) {
638                 int vector = 0, i;
639
640                 for (i = 0; i < adapter->num_tx_queues; i++)
641                         free_irq(adapter->msix_entries[vector++].vector,
642                                 &(adapter->tx_ring[i]));
643                 for (i = 0; i < adapter->num_rx_queues; i++)
644                         free_irq(adapter->msix_entries[vector++].vector,
645                                 &(adapter->rx_ring[i]));
646
647                 free_irq(adapter->msix_entries[vector++].vector, netdev);
648                 return;
649         }
650
651         free_irq(adapter->pdev->irq, netdev);
652 }
653
654 /**
655  * igb_irq_disable - Mask off interrupt generation on the NIC
656  * @adapter: board private structure
657  **/
658 static void igb_irq_disable(struct igb_adapter *adapter)
659 {
660         struct e1000_hw *hw = &adapter->hw;
661
662         if (adapter->msix_entries) {
663                 wr32(E1000_EIAM, 0);
664                 wr32(E1000_EIMC, ~0);
665                 wr32(E1000_EIAC, 0);
666         }
667
668         wr32(E1000_IAM, 0);
669         wr32(E1000_IMC, ~0);
670         wrfl();
671         synchronize_irq(adapter->pdev->irq);
672 }
673
674 /**
675  * igb_irq_enable - Enable default interrupt generation settings
676  * @adapter: board private structure
677  **/
678 static void igb_irq_enable(struct igb_adapter *adapter)
679 {
680         struct e1000_hw *hw = &adapter->hw;
681
682         if (adapter->msix_entries) {
683                 wr32(E1000_EIAC, adapter->eims_enable_mask);
684                 wr32(E1000_EIAM, adapter->eims_enable_mask);
685                 wr32(E1000_EIMS, adapter->eims_enable_mask);
686                 wr32(E1000_IMS, E1000_IMS_LSC);
687         } else {
688                 wr32(E1000_IMS, IMS_ENABLE_MASK);
689                 wr32(E1000_IAM, IMS_ENABLE_MASK);
690         }
691 }
692
693 static void igb_update_mng_vlan(struct igb_adapter *adapter)
694 {
695         struct net_device *netdev = adapter->netdev;
696         u16 vid = adapter->hw.mng_cookie.vlan_id;
697         u16 old_vid = adapter->mng_vlan_id;
698         if (adapter->vlgrp) {
699                 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
700                         if (adapter->hw.mng_cookie.status &
701                                 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
702                                 igb_vlan_rx_add_vid(netdev, vid);
703                                 adapter->mng_vlan_id = vid;
704                         } else
705                                 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
706
707                         if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
708                                         (vid != old_vid) &&
709                             !vlan_group_get_device(adapter->vlgrp, old_vid))
710                                 igb_vlan_rx_kill_vid(netdev, old_vid);
711                 } else
712                         adapter->mng_vlan_id = vid;
713         }
714 }
715
716 /**
717  * igb_release_hw_control - release control of the h/w to f/w
718  * @adapter: address of board private structure
719  *
720  * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
721  * For ASF and Pass Through versions of f/w this means that the
722  * driver is no longer loaded.
723  *
724  **/
725 static void igb_release_hw_control(struct igb_adapter *adapter)
726 {
727         struct e1000_hw *hw = &adapter->hw;
728         u32 ctrl_ext;
729
730         /* Let firmware take over control of h/w */
731         ctrl_ext = rd32(E1000_CTRL_EXT);
732         wr32(E1000_CTRL_EXT,
733                         ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
734 }
735
736
737 /**
738  * igb_get_hw_control - get control of the h/w from f/w
739  * @adapter: address of board private structure
740  *
741  * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
742  * For ASF and Pass Through versions of f/w this means that
743  * the driver is loaded.
744  *
745  **/
746 static void igb_get_hw_control(struct igb_adapter *adapter)
747 {
748         struct e1000_hw *hw = &adapter->hw;
749         u32 ctrl_ext;
750
751         /* Let firmware know the driver has taken over */
752         ctrl_ext = rd32(E1000_CTRL_EXT);
753         wr32(E1000_CTRL_EXT,
754                         ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
755 }
756
757 /**
758  * igb_configure - configure the hardware for RX and TX
759  * @adapter: private board structure
760  **/
761 static void igb_configure(struct igb_adapter *adapter)
762 {
763         struct net_device *netdev = adapter->netdev;
764         int i;
765
766         igb_get_hw_control(adapter);
767         igb_set_multi(netdev);
768
769         igb_restore_vlan(adapter);
770
771         igb_configure_tx(adapter);
772         igb_setup_rctl(adapter);
773         igb_configure_rx(adapter);
774
775         igb_rx_fifo_flush_82575(&adapter->hw);
776
777         /* call IGB_DESC_UNUSED which always leaves
778          * at least 1 descriptor unused to make sure
779          * next_to_use != next_to_clean */
780         for (i = 0; i < adapter->num_rx_queues; i++) {
781                 struct igb_ring *ring = &adapter->rx_ring[i];
782                 igb_alloc_rx_buffers_adv(ring, IGB_DESC_UNUSED(ring));
783         }
784
785
786         adapter->tx_queue_len = netdev->tx_queue_len;
787 }
788
789
790 /**
791  * igb_up - Open the interface and prepare it to handle traffic
792  * @adapter: board private structure
793  **/
794
795 int igb_up(struct igb_adapter *adapter)
796 {
797         struct e1000_hw *hw = &adapter->hw;
798         int i;
799
800         /* hardware has been reset, we need to reload some things */
801         igb_configure(adapter);
802
803         clear_bit(__IGB_DOWN, &adapter->state);
804
805         for (i = 0; i < adapter->num_rx_queues; i++)
806                 napi_enable(&adapter->rx_ring[i].napi);
807         if (adapter->msix_entries)
808                 igb_configure_msix(adapter);
809
810         /* Clear any pending interrupts. */
811         rd32(E1000_ICR);
812         igb_irq_enable(adapter);
813
814         /* Fire a link change interrupt to start the watchdog. */
815         wr32(E1000_ICS, E1000_ICS_LSC);
816         return 0;
817 }
818
819 void igb_down(struct igb_adapter *adapter)
820 {
821         struct e1000_hw *hw = &adapter->hw;
822         struct net_device *netdev = adapter->netdev;
823         u32 tctl, rctl;
824         int i;
825
826         /* signal that we're down so the interrupt handler does not
827          * reschedule our watchdog timer */
828         set_bit(__IGB_DOWN, &adapter->state);
829
830         /* disable receives in the hardware */
831         rctl = rd32(E1000_RCTL);
832         wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
833         /* flush and sleep below */
834
835         netif_tx_stop_all_queues(netdev);
836
837         /* disable transmits in the hardware */
838         tctl = rd32(E1000_TCTL);
839         tctl &= ~E1000_TCTL_EN;
840         wr32(E1000_TCTL, tctl);
841         /* flush both disables and wait for them to finish */
842         wrfl();
843         msleep(10);
844
845         for (i = 0; i < adapter->num_rx_queues; i++)
846                 napi_disable(&adapter->rx_ring[i].napi);
847
848         igb_irq_disable(adapter);
849
850         del_timer_sync(&adapter->watchdog_timer);
851         del_timer_sync(&adapter->phy_info_timer);
852
853         netdev->tx_queue_len = adapter->tx_queue_len;
854         netif_carrier_off(netdev);
855         adapter->link_speed = 0;
856         adapter->link_duplex = 0;
857
858         if (!pci_channel_offline(adapter->pdev))
859                 igb_reset(adapter);
860         igb_clean_all_tx_rings(adapter);
861         igb_clean_all_rx_rings(adapter);
862 }
863
864 void igb_reinit_locked(struct igb_adapter *adapter)
865 {
866         WARN_ON(in_interrupt());
867         while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
868                 msleep(1);
869         igb_down(adapter);
870         igb_up(adapter);
871         clear_bit(__IGB_RESETTING, &adapter->state);
872 }
873
874 void igb_reset(struct igb_adapter *adapter)
875 {
876         struct e1000_hw *hw = &adapter->hw;
877         struct e1000_mac_info *mac = &hw->mac;
878         struct e1000_fc_info *fc = &hw->fc;
879         u32 pba = 0, tx_space, min_tx_space, min_rx_space;
880         u16 hwm;
881
882         /* Repartition Pba for greater than 9k mtu
883          * To take effect CTRL.RST is required.
884          */
885         if (mac->type != e1000_82576) {
886         pba = E1000_PBA_34K;
887         }
888         else {
889                 pba = E1000_PBA_64K;
890         }
891
892         if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
893             (mac->type < e1000_82576)) {
894                 /* adjust PBA for jumbo frames */
895                 wr32(E1000_PBA, pba);
896
897                 /* To maintain wire speed transmits, the Tx FIFO should be
898                  * large enough to accommodate two full transmit packets,
899                  * rounded up to the next 1KB and expressed in KB.  Likewise,
900                  * the Rx FIFO should be large enough to accommodate at least
901                  * one full receive packet and is similarly rounded up and
902                  * expressed in KB. */
903                 pba = rd32(E1000_PBA);
904                 /* upper 16 bits has Tx packet buffer allocation size in KB */
905                 tx_space = pba >> 16;
906                 /* lower 16 bits has Rx packet buffer allocation size in KB */
907                 pba &= 0xffff;
908                 /* the tx fifo also stores 16 bytes of information about the tx
909                  * but don't include ethernet FCS because hardware appends it */
910                 min_tx_space = (adapter->max_frame_size +
911                                 sizeof(struct e1000_tx_desc) -
912                                 ETH_FCS_LEN) * 2;
913                 min_tx_space = ALIGN(min_tx_space, 1024);
914                 min_tx_space >>= 10;
915                 /* software strips receive CRC, so leave room for it */
916                 min_rx_space = adapter->max_frame_size;
917                 min_rx_space = ALIGN(min_rx_space, 1024);
918                 min_rx_space >>= 10;
919
920                 /* If current Tx allocation is less than the min Tx FIFO size,
921                  * and the min Tx FIFO size is less than the current Rx FIFO
922                  * allocation, take space away from current Rx allocation */
923                 if (tx_space < min_tx_space &&
924                     ((min_tx_space - tx_space) < pba)) {
925                         pba = pba - (min_tx_space - tx_space);
926
927                         /* if short on rx space, rx wins and must trump tx
928                          * adjustment */
929                         if (pba < min_rx_space)
930                                 pba = min_rx_space;
931                 }
932                 wr32(E1000_PBA, pba);
933         }
934
935         /* flow control settings */
936         /* The high water mark must be low enough to fit one full frame
937          * (or the size used for early receive) above it in the Rx FIFO.
938          * Set it to the lower of:
939          * - 90% of the Rx FIFO size, or
940          * - the full Rx FIFO size minus one full frame */
941         hwm = min(((pba << 10) * 9 / 10),
942                         ((pba << 10) - 2 * adapter->max_frame_size));
943
944         if (mac->type < e1000_82576) {
945                 fc->high_water = hwm & 0xFFF8;  /* 8-byte granularity */
946                 fc->low_water = fc->high_water - 8;
947         } else {
948                 fc->high_water = hwm & 0xFFF0;  /* 16-byte granularity */
949                 fc->low_water = fc->high_water - 16;
950         }
951         fc->pause_time = 0xFFFF;
952         fc->send_xon = 1;
953         fc->type = fc->original_type;
954
955         /* Allow time for pending master requests to run */
956         adapter->hw.mac.ops.reset_hw(&adapter->hw);
957         wr32(E1000_WUC, 0);
958
959         if (adapter->hw.mac.ops.init_hw(&adapter->hw))
960                 dev_err(&adapter->pdev->dev, "Hardware Error\n");
961
962         igb_update_mng_vlan(adapter);
963
964         /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
965         wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
966
967         igb_reset_adaptive(&adapter->hw);
968         igb_get_phy_info(&adapter->hw);
969 }
970
971 /**
972  * igb_is_need_ioport - determine if an adapter needs ioport resources or not
973  * @pdev: PCI device information struct
974  *
975  * Returns true if an adapter needs ioport resources
976  **/
977 static int igb_is_need_ioport(struct pci_dev *pdev)
978 {
979         switch (pdev->device) {
980         /* Currently there are no adapters that need ioport resources */
981         default:
982                 return false;
983         }
984 }
985
986 static const struct net_device_ops igb_netdev_ops = {
987         .ndo_open               = igb_open,
988         .ndo_stop               = igb_close,
989         .ndo_start_xmit         = igb_xmit_frame_adv,
990         .ndo_get_stats          = igb_get_stats,
991         .ndo_set_multicast_list = igb_set_multi,
992         .ndo_set_mac_address    = igb_set_mac,
993         .ndo_change_mtu         = igb_change_mtu,
994         .ndo_do_ioctl           = igb_ioctl,
995         .ndo_tx_timeout         = igb_tx_timeout,
996         .ndo_validate_addr      = eth_validate_addr,
997         .ndo_vlan_rx_register   = igb_vlan_rx_register,
998         .ndo_vlan_rx_add_vid    = igb_vlan_rx_add_vid,
999         .ndo_vlan_rx_kill_vid   = igb_vlan_rx_kill_vid,
1000 #ifdef CONFIG_NET_POLL_CONTROLLER
1001         .ndo_poll_controller    = igb_netpoll,
1002 #endif
1003 };
1004
1005 /**
1006  * igb_probe - Device Initialization Routine
1007  * @pdev: PCI device information struct
1008  * @ent: entry in igb_pci_tbl
1009  *
1010  * Returns 0 on success, negative on failure
1011  *
1012  * igb_probe initializes an adapter identified by a pci_dev structure.
1013  * The OS initialization, configuring of the adapter private structure,
1014  * and a hardware reset occur.
1015  **/
1016 static int __devinit igb_probe(struct pci_dev *pdev,
1017                                const struct pci_device_id *ent)
1018 {
1019         struct net_device *netdev;
1020         struct igb_adapter *adapter;
1021         struct e1000_hw *hw;
1022         struct pci_dev *us_dev;
1023         const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1024         unsigned long mmio_start, mmio_len;
1025         int i, err, pci_using_dac, pos;
1026         u16 eeprom_data = 0, state = 0;
1027         u16 eeprom_apme_mask = IGB_EEPROM_APME;
1028         u32 part_num;
1029         int bars, need_ioport;
1030
1031         /* do not allocate ioport bars when not needed */
1032         need_ioport = igb_is_need_ioport(pdev);
1033         if (need_ioport) {
1034                 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
1035                 err = pci_enable_device(pdev);
1036         } else {
1037                 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1038                 err = pci_enable_device_mem(pdev);
1039         }
1040         if (err)
1041                 return err;
1042
1043         pci_using_dac = 0;
1044         err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
1045         if (!err) {
1046                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1047                 if (!err)
1048                         pci_using_dac = 1;
1049         } else {
1050                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1051                 if (err) {
1052                         err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1053                         if (err) {
1054                                 dev_err(&pdev->dev, "No usable DMA "
1055                                         "configuration, aborting\n");
1056                                 goto err_dma;
1057                         }
1058                 }
1059         }
1060
1061         /* 82575 requires that the pci-e link partner disable the L0s state */
1062         switch (pdev->device) {
1063         case E1000_DEV_ID_82575EB_COPPER:
1064         case E1000_DEV_ID_82575EB_FIBER_SERDES:
1065         case E1000_DEV_ID_82575GB_QUAD_COPPER:
1066                 us_dev = pdev->bus->self;
1067                 pos = pci_find_capability(us_dev, PCI_CAP_ID_EXP);
1068                 if (pos) {
1069                         pci_read_config_word(us_dev, pos + PCI_EXP_LNKCTL,
1070                                              &state);
1071                         state &= ~PCIE_LINK_STATE_L0S;
1072                         pci_write_config_word(us_dev, pos + PCI_EXP_LNKCTL,
1073                                               state);
1074                         dev_info(&pdev->dev,
1075                                  "Disabling ASPM L0s upstream switch port %s\n",
1076                                  pci_name(us_dev));
1077                 }
1078         default:
1079                 break;
1080         }
1081
1082         err = pci_request_selected_regions(pdev, bars, igb_driver_name);
1083         if (err)
1084                 goto err_pci_reg;
1085
1086         err = pci_enable_pcie_error_reporting(pdev);
1087         if (err) {
1088                 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
1089                         "0x%x\n", err);
1090                 /* non-fatal, continue */
1091         }
1092
1093         pci_set_master(pdev);
1094         pci_save_state(pdev);
1095
1096         err = -ENOMEM;
1097         netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), IGB_MAX_TX_QUEUES);
1098         if (!netdev)
1099                 goto err_alloc_etherdev;
1100
1101         SET_NETDEV_DEV(netdev, &pdev->dev);
1102
1103         pci_set_drvdata(pdev, netdev);
1104         adapter = netdev_priv(netdev);
1105         adapter->netdev = netdev;
1106         adapter->pdev = pdev;
1107         hw = &adapter->hw;
1108         hw->back = adapter;
1109         adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1110         adapter->bars = bars;
1111         adapter->need_ioport = need_ioport;
1112
1113         mmio_start = pci_resource_start(pdev, 0);
1114         mmio_len = pci_resource_len(pdev, 0);
1115
1116         err = -EIO;
1117         adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
1118         if (!adapter->hw.hw_addr)
1119                 goto err_ioremap;
1120
1121         netdev->netdev_ops = &igb_netdev_ops;
1122         igb_set_ethtool_ops(netdev);
1123         netdev->watchdog_timeo = 5 * HZ;
1124
1125         strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1126
1127         netdev->mem_start = mmio_start;
1128         netdev->mem_end = mmio_start + mmio_len;
1129
1130         /* PCI config space info */
1131         hw->vendor_id = pdev->vendor;
1132         hw->device_id = pdev->device;
1133         hw->revision_id = pdev->revision;
1134         hw->subsystem_vendor_id = pdev->subsystem_vendor;
1135         hw->subsystem_device_id = pdev->subsystem_device;
1136
1137         /* setup the private structure */
1138         hw->back = adapter;
1139         /* Copy the default MAC, PHY and NVM function pointers */
1140         memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1141         memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1142         memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1143         /* Initialize skew-specific constants */
1144         err = ei->get_invariants(hw);
1145         if (err)
1146                 goto err_hw_init;
1147
1148         err = igb_sw_init(adapter);
1149         if (err)
1150                 goto err_sw_init;
1151
1152         igb_get_bus_info_pcie(hw);
1153
1154         /* set flags */
1155         switch (hw->mac.type) {
1156         case e1000_82576:
1157         case e1000_82575:
1158                 adapter->flags |= IGB_FLAG_HAS_DCA;
1159                 adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
1160                 break;
1161         default:
1162                 break;
1163         }
1164
1165         hw->phy.autoneg_wait_to_complete = false;
1166         hw->mac.adaptive_ifs = true;
1167
1168         /* Copper options */
1169         if (hw->phy.media_type == e1000_media_type_copper) {
1170                 hw->phy.mdix = AUTO_ALL_MODES;
1171                 hw->phy.disable_polarity_correction = false;
1172                 hw->phy.ms_type = e1000_ms_hw_default;
1173         }
1174
1175         if (igb_check_reset_block(hw))
1176                 dev_info(&pdev->dev,
1177                         "PHY reset is blocked due to SOL/IDER session.\n");
1178
1179         netdev->features = NETIF_F_SG |
1180                            NETIF_F_HW_CSUM |
1181                            NETIF_F_HW_VLAN_TX |
1182                            NETIF_F_HW_VLAN_RX |
1183                            NETIF_F_HW_VLAN_FILTER;
1184
1185         netdev->features |= NETIF_F_TSO;
1186         netdev->features |= NETIF_F_TSO6;
1187
1188 #ifdef CONFIG_IGB_LRO
1189         netdev->features |= NETIF_F_GRO;
1190 #endif
1191
1192         netdev->vlan_features |= NETIF_F_TSO;
1193         netdev->vlan_features |= NETIF_F_TSO6;
1194         netdev->vlan_features |= NETIF_F_HW_CSUM;
1195         netdev->vlan_features |= NETIF_F_SG;
1196
1197         if (pci_using_dac)
1198                 netdev->features |= NETIF_F_HIGHDMA;
1199
1200         netdev->features |= NETIF_F_LLTX;
1201         adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
1202
1203         /* before reading the NVM, reset the controller to put the device in a
1204          * known good starting state */
1205         hw->mac.ops.reset_hw(hw);
1206
1207         /* make sure the NVM is good */
1208         if (igb_validate_nvm_checksum(hw) < 0) {
1209                 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1210                 err = -EIO;
1211                 goto err_eeprom;
1212         }
1213
1214         /* copy the MAC address out of the NVM */
1215         if (hw->mac.ops.read_mac_addr(hw))
1216                 dev_err(&pdev->dev, "NVM Read Error\n");
1217
1218         memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1219         memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1220
1221         if (!is_valid_ether_addr(netdev->perm_addr)) {
1222                 dev_err(&pdev->dev, "Invalid MAC Address\n");
1223                 err = -EIO;
1224                 goto err_eeprom;
1225         }
1226
1227         init_timer(&adapter->watchdog_timer);
1228         adapter->watchdog_timer.function = &igb_watchdog;
1229         adapter->watchdog_timer.data = (unsigned long) adapter;
1230
1231         init_timer(&adapter->phy_info_timer);
1232         adapter->phy_info_timer.function = &igb_update_phy_info;
1233         adapter->phy_info_timer.data = (unsigned long) adapter;
1234
1235         INIT_WORK(&adapter->reset_task, igb_reset_task);
1236         INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1237
1238         /* Initialize link & ring properties that are user-changeable */
1239         adapter->tx_ring->count = 256;
1240         for (i = 0; i < adapter->num_tx_queues; i++)
1241                 adapter->tx_ring[i].count = adapter->tx_ring->count;
1242         adapter->rx_ring->count = 256;
1243         for (i = 0; i < adapter->num_rx_queues; i++)
1244                 adapter->rx_ring[i].count = adapter->rx_ring->count;
1245
1246         adapter->fc_autoneg = true;
1247         hw->mac.autoneg = true;
1248         hw->phy.autoneg_advertised = 0x2f;
1249
1250         hw->fc.original_type = e1000_fc_default;
1251         hw->fc.type = e1000_fc_default;
1252
1253         adapter->itr_setting = 3;
1254         adapter->itr = IGB_START_ITR;
1255
1256         igb_validate_mdi_setting(hw);
1257
1258         adapter->rx_csum = 1;
1259
1260         /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1261          * enable the ACPI Magic Packet filter
1262          */
1263
1264         if (hw->bus.func == 0 ||
1265             hw->device_id == E1000_DEV_ID_82575EB_COPPER)
1266                 hw->nvm.ops.read_nvm(hw, NVM_INIT_CONTROL3_PORT_A, 1,
1267                                      &eeprom_data);
1268
1269         if (eeprom_data & eeprom_apme_mask)
1270                 adapter->eeprom_wol |= E1000_WUFC_MAG;
1271
1272         /* now that we have the eeprom settings, apply the special cases where
1273          * the eeprom may be wrong or the board simply won't support wake on
1274          * lan on a particular port */
1275         switch (pdev->device) {
1276         case E1000_DEV_ID_82575GB_QUAD_COPPER:
1277                 adapter->eeprom_wol = 0;
1278                 break;
1279         case E1000_DEV_ID_82575EB_FIBER_SERDES:
1280         case E1000_DEV_ID_82576_FIBER:
1281         case E1000_DEV_ID_82576_SERDES:
1282                 /* Wake events only supported on port A for dual fiber
1283                  * regardless of eeprom setting */
1284                 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1285                         adapter->eeprom_wol = 0;
1286                 break;
1287         }
1288
1289         /* initialize the wol settings based on the eeprom settings */
1290         adapter->wol = adapter->eeprom_wol;
1291         device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1292
1293         /* reset the hardware with the new settings */
1294         igb_reset(adapter);
1295
1296         /* let the f/w know that the h/w is now under the control of the
1297          * driver. */
1298         igb_get_hw_control(adapter);
1299
1300         /* tell the stack to leave us alone until igb_open() is called */
1301         netif_carrier_off(netdev);
1302         netif_tx_stop_all_queues(netdev);
1303
1304         strcpy(netdev->name, "eth%d");
1305         err = register_netdev(netdev);
1306         if (err)
1307                 goto err_register;
1308
1309 #ifdef CONFIG_IGB_DCA
1310         if ((adapter->flags & IGB_FLAG_HAS_DCA) &&
1311             (dca_add_requester(&pdev->dev) == 0)) {
1312                 adapter->flags |= IGB_FLAG_DCA_ENABLED;
1313                 dev_info(&pdev->dev, "DCA enabled\n");
1314                 /* Always use CB2 mode, difference is masked
1315                  * in the CB driver. */
1316                 wr32(E1000_DCA_CTRL, 2);
1317                 igb_setup_dca(adapter);
1318         }
1319 #endif
1320
1321         dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1322         /* print bus type/speed/width info */
1323         dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
1324                  netdev->name,
1325                  ((hw->bus.speed == e1000_bus_speed_2500)
1326                   ? "2.5Gb/s" : "unknown"),
1327                  ((hw->bus.width == e1000_bus_width_pcie_x4)
1328                   ? "Width x4" : (hw->bus.width == e1000_bus_width_pcie_x1)
1329                   ? "Width x1" : "unknown"),
1330                  netdev->dev_addr);
1331
1332         igb_read_part_num(hw, &part_num);
1333         dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
1334                 (part_num >> 8), (part_num & 0xff));
1335
1336         dev_info(&pdev->dev,
1337                 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1338                 adapter->msix_entries ? "MSI-X" :
1339                 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
1340                 adapter->num_rx_queues, adapter->num_tx_queues);
1341
1342         return 0;
1343
1344 err_register:
1345         igb_release_hw_control(adapter);
1346 err_eeprom:
1347         if (!igb_check_reset_block(hw))
1348                 igb_reset_phy(hw);
1349
1350         if (hw->flash_address)
1351                 iounmap(hw->flash_address);
1352
1353         igb_remove_device(hw);
1354         igb_free_queues(adapter);
1355 err_sw_init:
1356 err_hw_init:
1357         iounmap(hw->hw_addr);
1358 err_ioremap:
1359         free_netdev(netdev);
1360 err_alloc_etherdev:
1361         pci_release_selected_regions(pdev, bars);
1362 err_pci_reg:
1363 err_dma:
1364         pci_disable_device(pdev);
1365         return err;
1366 }
1367
1368 /**
1369  * igb_remove - Device Removal Routine
1370  * @pdev: PCI device information struct
1371  *
1372  * igb_remove is called by the PCI subsystem to alert the driver
1373  * that it should release a PCI device.  The could be caused by a
1374  * Hot-Plug event, or because the driver is going to be removed from
1375  * memory.
1376  **/
1377 static void __devexit igb_remove(struct pci_dev *pdev)
1378 {
1379         struct net_device *netdev = pci_get_drvdata(pdev);
1380         struct igb_adapter *adapter = netdev_priv(netdev);
1381 #ifdef CONFIG_IGB_DCA
1382         struct e1000_hw *hw = &adapter->hw;
1383 #endif
1384         int err;
1385
1386         /* flush_scheduled work may reschedule our watchdog task, so
1387          * explicitly disable watchdog tasks from being rescheduled  */
1388         set_bit(__IGB_DOWN, &adapter->state);
1389         del_timer_sync(&adapter->watchdog_timer);
1390         del_timer_sync(&adapter->phy_info_timer);
1391
1392         flush_scheduled_work();
1393
1394 #ifdef CONFIG_IGB_DCA
1395         if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
1396                 dev_info(&pdev->dev, "DCA disabled\n");
1397                 dca_remove_requester(&pdev->dev);
1398                 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
1399                 wr32(E1000_DCA_CTRL, 1);
1400         }
1401 #endif
1402
1403         /* Release control of h/w to f/w.  If f/w is AMT enabled, this
1404          * would have already happened in close and is redundant. */
1405         igb_release_hw_control(adapter);
1406
1407         unregister_netdev(netdev);
1408
1409         if (!igb_check_reset_block(&adapter->hw))
1410                 igb_reset_phy(&adapter->hw);
1411
1412         igb_remove_device(&adapter->hw);
1413         igb_reset_interrupt_capability(adapter);
1414
1415         igb_free_queues(adapter);
1416
1417         iounmap(adapter->hw.hw_addr);
1418         if (adapter->hw.flash_address)
1419                 iounmap(adapter->hw.flash_address);
1420         pci_release_selected_regions(pdev, adapter->bars);
1421
1422         free_netdev(netdev);
1423
1424         err = pci_disable_pcie_error_reporting(pdev);
1425         if (err)
1426                 dev_err(&pdev->dev,
1427                         "pci_disable_pcie_error_reporting failed 0x%x\n", err);
1428
1429         pci_disable_device(pdev);
1430 }
1431
1432 /**
1433  * igb_sw_init - Initialize general software structures (struct igb_adapter)
1434  * @adapter: board private structure to initialize
1435  *
1436  * igb_sw_init initializes the Adapter private data structure.
1437  * Fields are initialized based on PCI device information and
1438  * OS network device settings (MTU size).
1439  **/
1440 static int __devinit igb_sw_init(struct igb_adapter *adapter)
1441 {
1442         struct e1000_hw *hw = &adapter->hw;
1443         struct net_device *netdev = adapter->netdev;
1444         struct pci_dev *pdev = adapter->pdev;
1445
1446         pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1447
1448         adapter->tx_ring_count = IGB_DEFAULT_TXD;
1449         adapter->rx_ring_count = IGB_DEFAULT_RXD;
1450         adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1451         adapter->rx_ps_hdr_size = 0; /* disable packet split */
1452         adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1453         adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1454
1455         /* Number of supported queues. */
1456         /* Having more queues than CPUs doesn't make sense. */
1457         adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
1458         adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
1459
1460         /* This call may decrease the number of queues depending on
1461          * interrupt mode. */
1462         igb_set_interrupt_capability(adapter);
1463
1464         if (igb_alloc_queues(adapter)) {
1465                 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1466                 return -ENOMEM;
1467         }
1468
1469         /* Explicitly disable IRQ since the NIC can be in any state. */
1470         igb_irq_disable(adapter);
1471
1472         set_bit(__IGB_DOWN, &adapter->state);
1473         return 0;
1474 }
1475
1476 /**
1477  * igb_open - Called when a network interface is made active
1478  * @netdev: network interface device structure
1479  *
1480  * Returns 0 on success, negative value on failure
1481  *
1482  * The open entry point is called when a network interface is made
1483  * active by the system (IFF_UP).  At this point all resources needed
1484  * for transmit and receive operations are allocated, the interrupt
1485  * handler is registered with the OS, the watchdog timer is started,
1486  * and the stack is notified that the interface is ready.
1487  **/
1488 static int igb_open(struct net_device *netdev)
1489 {
1490         struct igb_adapter *adapter = netdev_priv(netdev);
1491         struct e1000_hw *hw = &adapter->hw;
1492         int err;
1493         int i;
1494
1495         /* disallow open during test */
1496         if (test_bit(__IGB_TESTING, &adapter->state))
1497                 return -EBUSY;
1498
1499         /* allocate transmit descriptors */
1500         err = igb_setup_all_tx_resources(adapter);
1501         if (err)
1502                 goto err_setup_tx;
1503
1504         /* allocate receive descriptors */
1505         err = igb_setup_all_rx_resources(adapter);
1506         if (err)
1507                 goto err_setup_rx;
1508
1509         /* e1000_power_up_phy(adapter); */
1510
1511         adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1512         if ((adapter->hw.mng_cookie.status &
1513              E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
1514                 igb_update_mng_vlan(adapter);
1515
1516         /* before we allocate an interrupt, we must be ready to handle it.
1517          * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1518          * as soon as we call pci_request_irq, so we have to setup our
1519          * clean_rx handler before we do so.  */
1520         igb_configure(adapter);
1521
1522         err = igb_request_irq(adapter);
1523         if (err)
1524                 goto err_req_irq;
1525
1526         /* From here on the code is the same as igb_up() */
1527         clear_bit(__IGB_DOWN, &adapter->state);
1528
1529         for (i = 0; i < adapter->num_rx_queues; i++)
1530                 napi_enable(&adapter->rx_ring[i].napi);
1531
1532         /* Clear any pending interrupts. */
1533         rd32(E1000_ICR);
1534
1535         igb_irq_enable(adapter);
1536
1537         netif_tx_start_all_queues(netdev);
1538
1539         /* Fire a link status change interrupt to start the watchdog. */
1540         wr32(E1000_ICS, E1000_ICS_LSC);
1541
1542         return 0;
1543
1544 err_req_irq:
1545         igb_release_hw_control(adapter);
1546         /* e1000_power_down_phy(adapter); */
1547         igb_free_all_rx_resources(adapter);
1548 err_setup_rx:
1549         igb_free_all_tx_resources(adapter);
1550 err_setup_tx:
1551         igb_reset(adapter);
1552
1553         return err;
1554 }
1555
1556 /**
1557  * igb_close - Disables a network interface
1558  * @netdev: network interface device structure
1559  *
1560  * Returns 0, this is not allowed to fail
1561  *
1562  * The close entry point is called when an interface is de-activated
1563  * by the OS.  The hardware is still under the driver's control, but
1564  * needs to be disabled.  A global MAC reset is issued to stop the
1565  * hardware, and all transmit and receive resources are freed.
1566  **/
1567 static int igb_close(struct net_device *netdev)
1568 {
1569         struct igb_adapter *adapter = netdev_priv(netdev);
1570
1571         WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
1572         igb_down(adapter);
1573
1574         igb_free_irq(adapter);
1575
1576         igb_free_all_tx_resources(adapter);
1577         igb_free_all_rx_resources(adapter);
1578
1579         /* kill manageability vlan ID if supported, but not if a vlan with
1580          * the same ID is registered on the host OS (let 8021q kill it) */
1581         if ((adapter->hw.mng_cookie.status &
1582                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
1583              !(adapter->vlgrp &&
1584                vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
1585                 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1586
1587         return 0;
1588 }
1589
1590 /**
1591  * igb_setup_tx_resources - allocate Tx resources (Descriptors)
1592  * @adapter: board private structure
1593  * @tx_ring: tx descriptor ring (for a specific queue) to setup
1594  *
1595  * Return 0 on success, negative on failure
1596  **/
1597
1598 int igb_setup_tx_resources(struct igb_adapter *adapter,
1599                            struct igb_ring *tx_ring)
1600 {
1601         struct pci_dev *pdev = adapter->pdev;
1602         int size;
1603
1604         size = sizeof(struct igb_buffer) * tx_ring->count;
1605         tx_ring->buffer_info = vmalloc(size);
1606         if (!tx_ring->buffer_info)
1607                 goto err;
1608         memset(tx_ring->buffer_info, 0, size);
1609
1610         /* round up to nearest 4K */
1611         tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
1612         tx_ring->size = ALIGN(tx_ring->size, 4096);
1613
1614         tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
1615                                              &tx_ring->dma);
1616
1617         if (!tx_ring->desc)
1618                 goto err;
1619
1620         tx_ring->adapter = adapter;
1621         tx_ring->next_to_use = 0;
1622         tx_ring->next_to_clean = 0;
1623         return 0;
1624
1625 err:
1626         vfree(tx_ring->buffer_info);
1627         dev_err(&adapter->pdev->dev,
1628                 "Unable to allocate memory for the transmit descriptor ring\n");
1629         return -ENOMEM;
1630 }
1631
1632 /**
1633  * igb_setup_all_tx_resources - wrapper to allocate Tx resources
1634  *                                (Descriptors) for all queues
1635  * @adapter: board private structure
1636  *
1637  * Return 0 on success, negative on failure
1638  **/
1639 static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1640 {
1641         int i, err = 0;
1642         int r_idx;
1643
1644         for (i = 0; i < adapter->num_tx_queues; i++) {
1645                 err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1646                 if (err) {
1647                         dev_err(&adapter->pdev->dev,
1648                                 "Allocation for Tx Queue %u failed\n", i);
1649                         for (i--; i >= 0; i--)
1650                                 igb_free_tx_resources(&adapter->tx_ring[i]);
1651                         break;
1652                 }
1653         }
1654
1655         for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
1656                 r_idx = i % adapter->num_tx_queues;
1657                 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
1658         }       
1659         return err;
1660 }
1661
1662 /**
1663  * igb_configure_tx - Configure transmit Unit after Reset
1664  * @adapter: board private structure
1665  *
1666  * Configure the Tx unit of the MAC after a reset.
1667  **/
1668 static void igb_configure_tx(struct igb_adapter *adapter)
1669 {
1670         u64 tdba;
1671         struct e1000_hw *hw = &adapter->hw;
1672         u32 tctl;
1673         u32 txdctl, txctrl;
1674         int i, j;
1675
1676         for (i = 0; i < adapter->num_tx_queues; i++) {
1677                 struct igb_ring *ring = &(adapter->tx_ring[i]);
1678                 j = ring->reg_idx;
1679                 wr32(E1000_TDLEN(j),
1680                                 ring->count * sizeof(struct e1000_tx_desc));
1681                 tdba = ring->dma;
1682                 wr32(E1000_TDBAL(j),
1683                                 tdba & 0x00000000ffffffffULL);
1684                 wr32(E1000_TDBAH(j), tdba >> 32);
1685
1686                 ring->head = E1000_TDH(j);
1687                 ring->tail = E1000_TDT(j);
1688                 writel(0, hw->hw_addr + ring->tail);
1689                 writel(0, hw->hw_addr + ring->head);
1690                 txdctl = rd32(E1000_TXDCTL(j));
1691                 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1692                 wr32(E1000_TXDCTL(j), txdctl);
1693
1694                 /* Turn off Relaxed Ordering on head write-backs.  The
1695                  * writebacks MUST be delivered in order or it will
1696                  * completely screw up our bookeeping.
1697                  */
1698                 txctrl = rd32(E1000_DCA_TXCTRL(j));
1699                 txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1700                 wr32(E1000_DCA_TXCTRL(j), txctrl);
1701         }
1702
1703
1704
1705         /* Use the default values for the Tx Inter Packet Gap (IPG) timer */
1706
1707         /* Program the Transmit Control Register */
1708
1709         tctl = rd32(E1000_TCTL);
1710         tctl &= ~E1000_TCTL_CT;
1711         tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1712                 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1713
1714         igb_config_collision_dist(hw);
1715
1716         /* Setup Transmit Descriptor Settings for eop descriptor */
1717         adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
1718
1719         /* Enable transmits */
1720         tctl |= E1000_TCTL_EN;
1721
1722         wr32(E1000_TCTL, tctl);
1723 }
1724
1725 /**
1726  * igb_setup_rx_resources - allocate Rx resources (Descriptors)
1727  * @adapter: board private structure
1728  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
1729  *
1730  * Returns 0 on success, negative on failure
1731  **/
1732
1733 int igb_setup_rx_resources(struct igb_adapter *adapter,
1734                            struct igb_ring *rx_ring)
1735 {
1736         struct pci_dev *pdev = adapter->pdev;
1737         int size, desc_len;
1738
1739         size = sizeof(struct igb_buffer) * rx_ring->count;
1740         rx_ring->buffer_info = vmalloc(size);
1741         if (!rx_ring->buffer_info)
1742                 goto err;
1743         memset(rx_ring->buffer_info, 0, size);
1744
1745         desc_len = sizeof(union e1000_adv_rx_desc);
1746
1747         /* Round up to nearest 4K */
1748         rx_ring->size = rx_ring->count * desc_len;
1749         rx_ring->size = ALIGN(rx_ring->size, 4096);
1750
1751         rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
1752                                              &rx_ring->dma);
1753
1754         if (!rx_ring->desc)
1755                 goto err;
1756
1757         rx_ring->next_to_clean = 0;
1758         rx_ring->next_to_use = 0;
1759
1760         rx_ring->adapter = adapter;
1761
1762         return 0;
1763
1764 err:
1765         vfree(rx_ring->buffer_info);
1766         dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
1767                 "the receive descriptor ring\n");
1768         return -ENOMEM;
1769 }
1770
1771 /**
1772  * igb_setup_all_rx_resources - wrapper to allocate Rx resources
1773  *                                (Descriptors) for all queues
1774  * @adapter: board private structure
1775  *
1776  * Return 0 on success, negative on failure
1777  **/
1778 static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
1779 {
1780         int i, err = 0;
1781
1782         for (i = 0; i < adapter->num_rx_queues; i++) {
1783                 err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1784                 if (err) {
1785                         dev_err(&adapter->pdev->dev,
1786                                 "Allocation for Rx Queue %u failed\n", i);
1787                         for (i--; i >= 0; i--)
1788                                 igb_free_rx_resources(&adapter->rx_ring[i]);
1789                         break;
1790                 }
1791         }
1792
1793         return err;
1794 }
1795
1796 /**
1797  * igb_setup_rctl - configure the receive control registers
1798  * @adapter: Board private structure
1799  **/
1800 static void igb_setup_rctl(struct igb_adapter *adapter)
1801 {
1802         struct e1000_hw *hw = &adapter->hw;
1803         u32 rctl;
1804         u32 srrctl = 0;
1805         int i, j;
1806
1807         rctl = rd32(E1000_RCTL);
1808
1809         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1810         rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1811
1812         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
1813                 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1814
1815         /*
1816          * enable stripping of CRC. It's unlikely this will break BMC
1817          * redirection as it did with e1000. Newer features require
1818          * that the HW strips the CRC.
1819         */
1820         rctl |= E1000_RCTL_SECRC;
1821
1822         /*
1823          * disable store bad packets, long packet enable, and clear size bits.
1824          */
1825         rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_LPE | E1000_RCTL_SZ_256);
1826
1827         if (adapter->netdev->mtu > ETH_DATA_LEN)
1828                 rctl |= E1000_RCTL_LPE;
1829
1830         /* Setup buffer sizes */
1831         switch (adapter->rx_buffer_len) {
1832         case IGB_RXBUFFER_256:
1833                 rctl |= E1000_RCTL_SZ_256;
1834                 break;
1835         case IGB_RXBUFFER_512:
1836                 rctl |= E1000_RCTL_SZ_512;
1837                 break;
1838         default:
1839                 srrctl = ALIGN(adapter->rx_buffer_len, 1024)
1840                          >> E1000_SRRCTL_BSIZEPKT_SHIFT;
1841                 break;
1842         }
1843
1844         /* 82575 and greater support packet-split where the protocol
1845          * header is placed in skb->data and the packet data is
1846          * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
1847          * In the case of a non-split, skb->data is linearly filled,
1848          * followed by the page buffers.  Therefore, skb->data is
1849          * sized to hold the largest protocol header.
1850          */
1851         /* allocations using alloc_page take too long for regular MTU
1852          * so only enable packet split for jumbo frames */
1853         if (rctl & E1000_RCTL_LPE) {
1854                 adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
1855                 srrctl |= adapter->rx_ps_hdr_size <<
1856                          E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
1857                 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1858         } else {
1859                 adapter->rx_ps_hdr_size = 0;
1860                 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1861         }
1862
1863         for (i = 0; i < adapter->num_rx_queues; i++) {
1864                 j = adapter->rx_ring[i].reg_idx;
1865                 wr32(E1000_SRRCTL(j), srrctl);
1866         }
1867
1868         wr32(E1000_RCTL, rctl);
1869 }
1870
1871 /**
1872  * igb_configure_rx - Configure receive Unit after Reset
1873  * @adapter: board private structure
1874  *
1875  * Configure the Rx unit of the MAC after a reset.
1876  **/
1877 static void igb_configure_rx(struct igb_adapter *adapter)
1878 {
1879         u64 rdba;
1880         struct e1000_hw *hw = &adapter->hw;
1881         u32 rctl, rxcsum;
1882         u32 rxdctl;
1883         int i, j;
1884
1885         /* disable receives while setting up the descriptors */
1886         rctl = rd32(E1000_RCTL);
1887         wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1888         wrfl();
1889         mdelay(10);
1890
1891         if (adapter->itr_setting > 3)
1892                 wr32(E1000_ITR, adapter->itr);
1893
1894         /* Setup the HW Rx Head and Tail Descriptor Pointers and
1895          * the Base and Length of the Rx Descriptor Ring */
1896         for (i = 0; i < adapter->num_rx_queues; i++) {
1897                 struct igb_ring *ring = &(adapter->rx_ring[i]);
1898                 j = ring->reg_idx;
1899                 rdba = ring->dma;
1900                 wr32(E1000_RDBAL(j),
1901                                 rdba & 0x00000000ffffffffULL);
1902                 wr32(E1000_RDBAH(j), rdba >> 32);
1903                 wr32(E1000_RDLEN(j),
1904                                ring->count * sizeof(union e1000_adv_rx_desc));
1905
1906                 ring->head = E1000_RDH(j);
1907                 ring->tail = E1000_RDT(j);
1908                 writel(0, hw->hw_addr + ring->tail);
1909                 writel(0, hw->hw_addr + ring->head);
1910
1911                 rxdctl = rd32(E1000_RXDCTL(j));
1912                 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1913                 rxdctl &= 0xFFF00000;
1914                 rxdctl |= IGB_RX_PTHRESH;
1915                 rxdctl |= IGB_RX_HTHRESH << 8;
1916                 rxdctl |= IGB_RX_WTHRESH << 16;
1917                 wr32(E1000_RXDCTL(j), rxdctl);
1918         }
1919
1920         if (adapter->num_rx_queues > 1) {
1921                 u32 random[10];
1922                 u32 mrqc;
1923                 u32 j, shift;
1924                 union e1000_reta {
1925                         u32 dword;
1926                         u8  bytes[4];
1927                 } reta;
1928
1929                 get_random_bytes(&random[0], 40);
1930
1931                 if (hw->mac.type >= e1000_82576)
1932                         shift = 0;
1933                 else
1934                         shift = 6;
1935                 for (j = 0; j < (32 * 4); j++) {
1936                         reta.bytes[j & 3] =
1937                                 adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift;
1938                         if ((j & 3) == 3)
1939                                 writel(reta.dword,
1940                                        hw->hw_addr + E1000_RETA(0) + (j & ~3));
1941                 }
1942                 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
1943
1944                 /* Fill out hash function seeds */
1945                 for (j = 0; j < 10; j++)
1946                         array_wr32(E1000_RSSRK(0), j, random[j]);
1947
1948                 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
1949                          E1000_MRQC_RSS_FIELD_IPV4_TCP);
1950                 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
1951                          E1000_MRQC_RSS_FIELD_IPV6_TCP);
1952                 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
1953                          E1000_MRQC_RSS_FIELD_IPV6_UDP);
1954                 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
1955                          E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
1956
1957
1958                 wr32(E1000_MRQC, mrqc);
1959
1960                 /* Multiqueue and raw packet checksumming are mutually
1961                  * exclusive.  Note that this not the same as TCP/IP
1962                  * checksumming, which works fine. */
1963                 rxcsum = rd32(E1000_RXCSUM);
1964                 rxcsum |= E1000_RXCSUM_PCSD;
1965                 wr32(E1000_RXCSUM, rxcsum);
1966         } else {
1967                 /* Enable Receive Checksum Offload for TCP and UDP */
1968                 rxcsum = rd32(E1000_RXCSUM);
1969                 if (adapter->rx_csum) {
1970                         rxcsum |= E1000_RXCSUM_TUOFL;
1971
1972                         /* Enable IPv4 payload checksum for UDP fragments
1973                          * Must be used in conjunction with packet-split. */
1974                         if (adapter->rx_ps_hdr_size)
1975                                 rxcsum |= E1000_RXCSUM_IPPCSE;
1976                 } else {
1977                         rxcsum &= ~E1000_RXCSUM_TUOFL;
1978                         /* don't need to clear IPPCSE as it defaults to 0 */
1979                 }
1980                 wr32(E1000_RXCSUM, rxcsum);
1981         }
1982
1983         if (adapter->vlgrp)
1984                 wr32(E1000_RLPML,
1985                                 adapter->max_frame_size + VLAN_TAG_SIZE);
1986         else
1987                 wr32(E1000_RLPML, adapter->max_frame_size);
1988
1989         /* Enable Receives */
1990         wr32(E1000_RCTL, rctl);
1991 }
1992
1993 /**
1994  * igb_free_tx_resources - Free Tx Resources per Queue
1995  * @tx_ring: Tx descriptor ring for a specific queue
1996  *
1997  * Free all transmit software resources
1998  **/
1999 void igb_free_tx_resources(struct igb_ring *tx_ring)
2000 {
2001         struct pci_dev *pdev = tx_ring->adapter->pdev;
2002
2003         igb_clean_tx_ring(tx_ring);
2004
2005         vfree(tx_ring->buffer_info);
2006         tx_ring->buffer_info = NULL;
2007
2008         pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
2009
2010         tx_ring->desc = NULL;
2011 }
2012
2013 /**
2014  * igb_free_all_tx_resources - Free Tx Resources for All Queues
2015  * @adapter: board private structure
2016  *
2017  * Free all transmit software resources
2018  **/
2019 static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2020 {
2021         int i;
2022
2023         for (i = 0; i < adapter->num_tx_queues; i++)
2024                 igb_free_tx_resources(&adapter->tx_ring[i]);
2025 }
2026
2027 static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
2028                                            struct igb_buffer *buffer_info)
2029 {
2030         if (buffer_info->dma) {
2031                 pci_unmap_page(adapter->pdev,
2032                                 buffer_info->dma,
2033                                 buffer_info->length,
2034                                 PCI_DMA_TODEVICE);
2035                 buffer_info->dma = 0;
2036         }
2037         if (buffer_info->skb) {
2038                 dev_kfree_skb_any(buffer_info->skb);
2039                 buffer_info->skb = NULL;
2040         }
2041         buffer_info->time_stamp = 0;
2042         /* buffer_info must be completely set up in the transmit path */
2043 }
2044
2045 /**
2046  * igb_clean_tx_ring - Free Tx Buffers
2047  * @tx_ring: ring to be cleaned
2048  **/
2049 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
2050 {
2051         struct igb_adapter *adapter = tx_ring->adapter;
2052         struct igb_buffer *buffer_info;
2053         unsigned long size;
2054         unsigned int i;
2055
2056         if (!tx_ring->buffer_info)
2057                 return;
2058         /* Free all the Tx ring sk_buffs */
2059
2060         for (i = 0; i < tx_ring->count; i++) {
2061                 buffer_info = &tx_ring->buffer_info[i];
2062                 igb_unmap_and_free_tx_resource(adapter, buffer_info);
2063         }
2064
2065         size = sizeof(struct igb_buffer) * tx_ring->count;
2066         memset(tx_ring->buffer_info, 0, size);
2067
2068         /* Zero out the descriptor ring */
2069
2070         memset(tx_ring->desc, 0, tx_ring->size);
2071
2072         tx_ring->next_to_use = 0;
2073         tx_ring->next_to_clean = 0;
2074
2075         writel(0, adapter->hw.hw_addr + tx_ring->head);
2076         writel(0, adapter->hw.hw_addr + tx_ring->tail);
2077 }
2078
2079 /**
2080  * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2081  * @adapter: board private structure
2082  **/
2083 static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2084 {
2085         int i;
2086
2087         for (i = 0; i < adapter->num_tx_queues; i++)
2088                 igb_clean_tx_ring(&adapter->tx_ring[i]);
2089 }
2090
2091 /**
2092  * igb_free_rx_resources - Free Rx Resources
2093  * @rx_ring: ring to clean the resources from
2094  *
2095  * Free all receive software resources
2096  **/
2097 void igb_free_rx_resources(struct igb_ring *rx_ring)
2098 {
2099         struct pci_dev *pdev = rx_ring->adapter->pdev;
2100
2101         igb_clean_rx_ring(rx_ring);
2102
2103         vfree(rx_ring->buffer_info);
2104         rx_ring->buffer_info = NULL;
2105
2106         pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2107
2108         rx_ring->desc = NULL;
2109 }
2110
2111 /**
2112  * igb_free_all_rx_resources - Free Rx Resources for All Queues
2113  * @adapter: board private structure
2114  *
2115  * Free all receive software resources
2116  **/
2117 static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2118 {
2119         int i;
2120
2121         for (i = 0; i < adapter->num_rx_queues; i++)
2122                 igb_free_rx_resources(&adapter->rx_ring[i]);
2123 }
2124
2125 /**
2126  * igb_clean_rx_ring - Free Rx Buffers per Queue
2127  * @rx_ring: ring to free buffers from
2128  **/
2129 static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2130 {
2131         struct igb_adapter *adapter = rx_ring->adapter;
2132         struct igb_buffer *buffer_info;
2133         struct pci_dev *pdev = adapter->pdev;
2134         unsigned long size;
2135         unsigned int i;
2136
2137         if (!rx_ring->buffer_info)
2138                 return;
2139         /* Free all the Rx ring sk_buffs */
2140         for (i = 0; i < rx_ring->count; i++) {
2141                 buffer_info = &rx_ring->buffer_info[i];
2142                 if (buffer_info->dma) {
2143                         if (adapter->rx_ps_hdr_size)
2144                                 pci_unmap_single(pdev, buffer_info->dma,
2145                                                  adapter->rx_ps_hdr_size,
2146                                                  PCI_DMA_FROMDEVICE);
2147                         else
2148                                 pci_unmap_single(pdev, buffer_info->dma,
2149                                                  adapter->rx_buffer_len,
2150                                                  PCI_DMA_FROMDEVICE);
2151                         buffer_info->dma = 0;
2152                 }
2153
2154                 if (buffer_info->skb) {
2155                         dev_kfree_skb(buffer_info->skb);
2156                         buffer_info->skb = NULL;
2157                 }
2158                 if (buffer_info->page) {
2159                         if (buffer_info->page_dma)
2160                                 pci_unmap_page(pdev, buffer_info->page_dma,
2161                                                PAGE_SIZE / 2,
2162                                                PCI_DMA_FROMDEVICE);
2163                         put_page(buffer_info->page);
2164                         buffer_info->page = NULL;
2165                         buffer_info->page_dma = 0;
2166                         buffer_info->page_offset = 0;
2167                 }
2168         }
2169
2170         size = sizeof(struct igb_buffer) * rx_ring->count;
2171         memset(rx_ring->buffer_info, 0, size);
2172
2173         /* Zero out the descriptor ring */
2174         memset(rx_ring->desc, 0, rx_ring->size);
2175
2176         rx_ring->next_to_clean = 0;
2177         rx_ring->next_to_use = 0;
2178
2179         writel(0, adapter->hw.hw_addr + rx_ring->head);
2180         writel(0, adapter->hw.hw_addr + rx_ring->tail);
2181 }
2182
2183 /**
2184  * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2185  * @adapter: board private structure
2186  **/
2187 static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2188 {
2189         int i;
2190
2191         for (i = 0; i < adapter->num_rx_queues; i++)
2192                 igb_clean_rx_ring(&adapter->rx_ring[i]);
2193 }
2194
2195 /**
2196  * igb_set_mac - Change the Ethernet Address of the NIC
2197  * @netdev: network interface device structure
2198  * @p: pointer to an address structure
2199  *
2200  * Returns 0 on success, negative on failure
2201  **/
2202 static int igb_set_mac(struct net_device *netdev, void *p)
2203 {
2204         struct igb_adapter *adapter = netdev_priv(netdev);
2205         struct sockaddr *addr = p;
2206
2207         if (!is_valid_ether_addr(addr->sa_data))
2208                 return -EADDRNOTAVAIL;
2209
2210         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2211         memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
2212
2213         adapter->hw.mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2214
2215         return 0;
2216 }
2217
2218 /**
2219  * igb_set_multi - Multicast and Promiscuous mode set
2220  * @netdev: network interface device structure
2221  *
2222  * The set_multi entry point is called whenever the multicast address
2223  * list or the network interface flags are updated.  This routine is
2224  * responsible for configuring the hardware for proper multicast,
2225  * promiscuous mode, and all-multi behavior.
2226  **/
2227 static void igb_set_multi(struct net_device *netdev)
2228 {
2229         struct igb_adapter *adapter = netdev_priv(netdev);
2230         struct e1000_hw *hw = &adapter->hw;
2231         struct e1000_mac_info *mac = &hw->mac;
2232         struct dev_mc_list *mc_ptr;
2233         u8  *mta_list;
2234         u32 rctl;
2235         int i;
2236
2237         /* Check for Promiscuous and All Multicast modes */
2238
2239         rctl = rd32(E1000_RCTL);
2240
2241         if (netdev->flags & IFF_PROMISC) {
2242                 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2243                 rctl &= ~E1000_RCTL_VFE;
2244         } else {
2245                 if (netdev->flags & IFF_ALLMULTI) {
2246                         rctl |= E1000_RCTL_MPE;
2247                         rctl &= ~E1000_RCTL_UPE;
2248                 } else
2249                         rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
2250                 rctl |= E1000_RCTL_VFE;
2251         }
2252         wr32(E1000_RCTL, rctl);
2253
2254         if (!netdev->mc_count) {
2255                 /* nothing to program, so clear mc list */
2256                 igb_update_mc_addr_list_82575(hw, NULL, 0, 1,
2257                                           mac->rar_entry_count);
2258                 return;
2259         }
2260
2261         mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
2262         if (!mta_list)
2263                 return;
2264
2265         /* The shared function expects a packed array of only addresses. */
2266         mc_ptr = netdev->mc_list;
2267
2268         for (i = 0; i < netdev->mc_count; i++) {
2269                 if (!mc_ptr)
2270                         break;
2271                 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2272                 mc_ptr = mc_ptr->next;
2273         }
2274         igb_update_mc_addr_list_82575(hw, mta_list, i, 1,
2275                                       mac->rar_entry_count);
2276         kfree(mta_list);
2277 }
2278
2279 /* Need to wait a few seconds after link up to get diagnostic information from
2280  * the phy */
2281 static void igb_update_phy_info(unsigned long data)
2282 {
2283         struct igb_adapter *adapter = (struct igb_adapter *) data;
2284         igb_get_phy_info(&adapter->hw);
2285 }
2286
2287 /**
2288  * igb_watchdog - Timer Call-back
2289  * @data: pointer to adapter cast into an unsigned long
2290  **/
2291 static void igb_watchdog(unsigned long data)
2292 {
2293         struct igb_adapter *adapter = (struct igb_adapter *)data;
2294         /* Do the rest outside of interrupt context */
2295         schedule_work(&adapter->watchdog_task);
2296 }
2297
2298 static void igb_watchdog_task(struct work_struct *work)
2299 {
2300         struct igb_adapter *adapter = container_of(work,
2301                                         struct igb_adapter, watchdog_task);
2302         struct e1000_hw *hw = &adapter->hw;
2303
2304         struct net_device *netdev = adapter->netdev;
2305         struct igb_ring *tx_ring = adapter->tx_ring;
2306         struct e1000_mac_info *mac = &adapter->hw.mac;
2307         u32 link;
2308         u32 eics = 0;
2309         s32 ret_val;
2310         int i;
2311
2312         if ((netif_carrier_ok(netdev)) &&
2313             (rd32(E1000_STATUS) & E1000_STATUS_LU))
2314                 goto link_up;
2315
2316         ret_val = hw->mac.ops.check_for_link(&adapter->hw);
2317         if ((ret_val == E1000_ERR_PHY) &&
2318             (hw->phy.type == e1000_phy_igp_3) &&
2319             (rd32(E1000_CTRL) &
2320              E1000_PHY_CTRL_GBE_DISABLE))
2321                 dev_info(&adapter->pdev->dev,
2322                          "Gigabit has been disabled, downgrading speed\n");
2323
2324         if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
2325             !(rd32(E1000_TXCW) & E1000_TXCW_ANE))
2326                 link = mac->serdes_has_link;
2327         else
2328                 link = rd32(E1000_STATUS) &
2329                                       E1000_STATUS_LU;
2330
2331         if (link) {
2332                 if (!netif_carrier_ok(netdev)) {
2333                         u32 ctrl;
2334                         hw->mac.ops.get_speed_and_duplex(&adapter->hw,
2335                                                    &adapter->link_speed,
2336                                                    &adapter->link_duplex);
2337
2338                         ctrl = rd32(E1000_CTRL);
2339                         /* Links status message must follow this format */
2340                         printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
2341                                  "Flow Control: %s\n",
2342                                  netdev->name,
2343                                  adapter->link_speed,
2344                                  adapter->link_duplex == FULL_DUPLEX ?
2345                                  "Full Duplex" : "Half Duplex",
2346                                  ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2347                                  E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2348                                  E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2349                                  E1000_CTRL_TFCE) ? "TX" : "None")));
2350
2351                         /* tweak tx_queue_len according to speed/duplex and
2352                          * adjust the timeout factor */
2353                         netdev->tx_queue_len = adapter->tx_queue_len;
2354                         adapter->tx_timeout_factor = 1;
2355                         switch (adapter->link_speed) {
2356                         case SPEED_10:
2357                                 netdev->tx_queue_len = 10;
2358                                 adapter->tx_timeout_factor = 14;
2359                                 break;
2360                         case SPEED_100:
2361                                 netdev->tx_queue_len = 100;
2362                                 /* maybe add some timeout factor ? */
2363                                 break;
2364                         }
2365
2366                         netif_carrier_on(netdev);
2367                         netif_tx_wake_all_queues(netdev);
2368
2369                         if (!test_bit(__IGB_DOWN, &adapter->state))
2370                                 mod_timer(&adapter->phy_info_timer,
2371                                           round_jiffies(jiffies + 2 * HZ));
2372                 }
2373         } else {
2374                 if (netif_carrier_ok(netdev)) {
2375                         adapter->link_speed = 0;
2376                         adapter->link_duplex = 0;
2377                         /* Links status message must follow this format */
2378                         printk(KERN_INFO "igb: %s NIC Link is Down\n",
2379                                netdev->name);
2380                         netif_carrier_off(netdev);
2381                         netif_tx_stop_all_queues(netdev);
2382                         if (!test_bit(__IGB_DOWN, &adapter->state))
2383                                 mod_timer(&adapter->phy_info_timer,
2384                                           round_jiffies(jiffies + 2 * HZ));
2385                 }
2386         }
2387
2388 link_up:
2389         igb_update_stats(adapter);
2390
2391         mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2392         adapter->tpt_old = adapter->stats.tpt;
2393         mac->collision_delta = adapter->stats.colc - adapter->colc_old;
2394         adapter->colc_old = adapter->stats.colc;
2395
2396         adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
2397         adapter->gorc_old = adapter->stats.gorc;
2398         adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
2399         adapter->gotc_old = adapter->stats.gotc;
2400
2401         igb_update_adaptive(&adapter->hw);
2402
2403         if (!netif_carrier_ok(netdev)) {
2404                 if (IGB_DESC_UNUSED(tx_ring) + 1 < tx_ring->count) {
2405                         /* We've lost link, so the controller stops DMA,
2406                          * but we've got queued Tx work that's never going
2407                          * to get done, so reset controller to flush Tx.
2408                          * (Do the reset outside of interrupt context). */
2409                         adapter->tx_timeout_count++;
2410                         schedule_work(&adapter->reset_task);
2411                 }
2412         }
2413
2414         /* Cause software interrupt to ensure rx ring is cleaned */
2415         if (adapter->msix_entries) {
2416                 for (i = 0; i < adapter->num_rx_queues; i++)
2417                         eics |= adapter->rx_ring[i].eims_value;
2418                 wr32(E1000_EICS, eics);
2419         } else {
2420                 wr32(E1000_ICS, E1000_ICS_RXDMT0);
2421         }
2422
2423         /* Force detection of hung controller every watchdog period */
2424         tx_ring->detect_tx_hung = true;
2425
2426         /* Reset the timer */
2427         if (!test_bit(__IGB_DOWN, &adapter->state))
2428                 mod_timer(&adapter->watchdog_timer,
2429                           round_jiffies(jiffies + 2 * HZ));
2430 }
2431
2432 enum latency_range {
2433         lowest_latency = 0,
2434         low_latency = 1,
2435         bulk_latency = 2,
2436         latency_invalid = 255
2437 };
2438
2439
2440 /**
2441  * igb_update_ring_itr - update the dynamic ITR value based on packet size
2442  *
2443  *      Stores a new ITR value based on strictly on packet size.  This
2444  *      algorithm is less sophisticated than that used in igb_update_itr,
2445  *      due to the difficulty of synchronizing statistics across multiple
2446  *      receive rings.  The divisors and thresholds used by this fuction
2447  *      were determined based on theoretical maximum wire speed and testing
2448  *      data, in order to minimize response time while increasing bulk
2449  *      throughput.
2450  *      This functionality is controlled by the InterruptThrottleRate module
2451  *      parameter (see igb_param.c)
2452  *      NOTE:  This function is called only when operating in a multiqueue
2453  *             receive environment.
2454  * @rx_ring: pointer to ring
2455  **/
2456 static void igb_update_ring_itr(struct igb_ring *rx_ring)
2457 {
2458         int new_val = rx_ring->itr_val;
2459         int avg_wire_size = 0;
2460         struct igb_adapter *adapter = rx_ring->adapter;
2461
2462         if (!rx_ring->total_packets)
2463                 goto clear_counts; /* no packets, so don't do anything */
2464
2465         /* For non-gigabit speeds, just fix the interrupt rate at 4000
2466          * ints/sec - ITR timer value of 120 ticks.
2467          */
2468         if (adapter->link_speed != SPEED_1000) {
2469                 new_val = 120;
2470                 goto set_itr_val;
2471         }
2472         avg_wire_size = rx_ring->total_bytes / rx_ring->total_packets;
2473
2474         /* Add 24 bytes to size to account for CRC, preamble, and gap */
2475         avg_wire_size += 24;
2476
2477         /* Don't starve jumbo frames */
2478         avg_wire_size = min(avg_wire_size, 3000);
2479
2480         /* Give a little boost to mid-size frames */
2481         if ((avg_wire_size > 300) && (avg_wire_size < 1200))
2482                 new_val = avg_wire_size / 3;
2483         else
2484                 new_val = avg_wire_size / 2;
2485
2486 set_itr_val:
2487         if (new_val != rx_ring->itr_val) {
2488                 rx_ring->itr_val = new_val;
2489                 rx_ring->set_itr = 1;
2490         }
2491 clear_counts:
2492         rx_ring->total_bytes = 0;
2493         rx_ring->total_packets = 0;
2494 }
2495
2496 /**
2497  * igb_update_itr - update the dynamic ITR value based on statistics
2498  *      Stores a new ITR value based on packets and byte
2499  *      counts during the last interrupt.  The advantage of per interrupt
2500  *      computation is faster updates and more accurate ITR for the current
2501  *      traffic pattern.  Constants in this function were computed
2502  *      based on theoretical maximum wire speed and thresholds were set based
2503  *      on testing data as well as attempting to minimize response time
2504  *      while increasing bulk throughput.
2505  *      this functionality is controlled by the InterruptThrottleRate module
2506  *      parameter (see igb_param.c)
2507  *      NOTE:  These calculations are only valid when operating in a single-
2508  *             queue environment.
2509  * @adapter: pointer to adapter
2510  * @itr_setting: current adapter->itr
2511  * @packets: the number of packets during this measurement interval
2512  * @bytes: the number of bytes during this measurement interval
2513  **/
2514 static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
2515                                    int packets, int bytes)
2516 {
2517         unsigned int retval = itr_setting;
2518
2519         if (packets == 0)
2520                 goto update_itr_done;
2521
2522         switch (itr_setting) {
2523         case lowest_latency:
2524                 /* handle TSO and jumbo frames */
2525                 if (bytes/packets > 8000)
2526                         retval = bulk_latency;
2527                 else if ((packets < 5) && (bytes > 512))
2528                         retval = low_latency;
2529                 break;
2530         case low_latency:  /* 50 usec aka 20000 ints/s */
2531                 if (bytes > 10000) {
2532                         /* this if handles the TSO accounting */
2533                         if (bytes/packets > 8000) {
2534                                 retval = bulk_latency;
2535                         } else if ((packets < 10) || ((bytes/packets) > 1200)) {
2536                                 retval = bulk_latency;
2537                         } else if ((packets > 35)) {
2538                                 retval = lowest_latency;
2539                         }
2540                 } else if (bytes/packets > 2000) {
2541                         retval = bulk_latency;
2542                 } else if (packets <= 2 && bytes < 512) {
2543                         retval = lowest_latency;
2544                 }
2545                 break;
2546         case bulk_latency: /* 250 usec aka 4000 ints/s */
2547                 if (bytes > 25000) {
2548                         if (packets > 35)
2549                                 retval = low_latency;
2550                 } else if (bytes < 6000) {
2551                         retval = low_latency;
2552                 }
2553                 break;
2554         }
2555
2556 update_itr_done:
2557         return retval;
2558 }
2559
2560 static void igb_set_itr(struct igb_adapter *adapter)
2561 {
2562         u16 current_itr;
2563         u32 new_itr = adapter->itr;
2564
2565         /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2566         if (adapter->link_speed != SPEED_1000) {
2567                 current_itr = 0;
2568                 new_itr = 4000;
2569                 goto set_itr_now;
2570         }
2571
2572         adapter->rx_itr = igb_update_itr(adapter,
2573                                     adapter->rx_itr,
2574                                     adapter->rx_ring->total_packets,
2575                                     adapter->rx_ring->total_bytes);
2576
2577         if (adapter->rx_ring->buddy) {
2578                 adapter->tx_itr = igb_update_itr(adapter,
2579                                             adapter->tx_itr,
2580                                             adapter->tx_ring->total_packets,
2581                                             adapter->tx_ring->total_bytes);
2582
2583                 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2584         } else {
2585                 current_itr = adapter->rx_itr;
2586         }
2587
2588         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2589         if (adapter->itr_setting == 3 &&
2590             current_itr == lowest_latency)
2591                 current_itr = low_latency;
2592
2593         switch (current_itr) {
2594         /* counts and packets in update_itr are dependent on these numbers */
2595         case lowest_latency:
2596                 new_itr = 70000;
2597                 break;
2598         case low_latency:
2599                 new_itr = 20000; /* aka hwitr = ~200 */
2600                 break;
2601         case bulk_latency:
2602                 new_itr = 4000;
2603                 break;
2604         default:
2605                 break;
2606         }
2607
2608 set_itr_now:
2609         adapter->rx_ring->total_bytes = 0;
2610         adapter->rx_ring->total_packets = 0;
2611         if (adapter->rx_ring->buddy) {
2612                 adapter->rx_ring->buddy->total_bytes = 0;
2613                 adapter->rx_ring->buddy->total_packets = 0;
2614         }
2615
2616         if (new_itr != adapter->itr) {
2617                 /* this attempts to bias the interrupt rate towards Bulk
2618                  * by adding intermediate steps when interrupt rate is
2619                  * increasing */
2620                 new_itr = new_itr > adapter->itr ?
2621                              min(adapter->itr + (new_itr >> 2), new_itr) :
2622                              new_itr;
2623                 /* Don't write the value here; it resets the adapter's
2624                  * internal timer, and causes us to delay far longer than
2625                  * we should between interrupts.  Instead, we write the ITR
2626                  * value at the beginning of the next interrupt so the timing
2627                  * ends up being correct.
2628                  */
2629                 adapter->itr = new_itr;
2630                 adapter->rx_ring->itr_val = 1000000000 / (new_itr * 256);
2631                 adapter->rx_ring->set_itr = 1;
2632         }
2633
2634         return;
2635 }
2636
2637
2638 #define IGB_TX_FLAGS_CSUM               0x00000001
2639 #define IGB_TX_FLAGS_VLAN               0x00000002
2640 #define IGB_TX_FLAGS_TSO                0x00000004
2641 #define IGB_TX_FLAGS_IPV4               0x00000008
2642 #define IGB_TX_FLAGS_VLAN_MASK  0xffff0000
2643 #define IGB_TX_FLAGS_VLAN_SHIFT 16
2644
2645 static inline int igb_tso_adv(struct igb_adapter *adapter,
2646                               struct igb_ring *tx_ring,
2647                               struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2648 {
2649         struct e1000_adv_tx_context_desc *context_desc;
2650         unsigned int i;
2651         int err;
2652         struct igb_buffer *buffer_info;
2653         u32 info = 0, tu_cmd = 0;
2654         u32 mss_l4len_idx, l4len;
2655         *hdr_len = 0;
2656
2657         if (skb_header_cloned(skb)) {
2658                 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2659                 if (err)
2660                         return err;
2661         }
2662
2663         l4len = tcp_hdrlen(skb);
2664         *hdr_len += l4len;
2665
2666         if (skb->protocol == htons(ETH_P_IP)) {
2667                 struct iphdr *iph = ip_hdr(skb);
2668                 iph->tot_len = 0;
2669                 iph->check = 0;
2670                 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2671                                                          iph->daddr, 0,
2672                                                          IPPROTO_TCP,
2673                                                          0);
2674         } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
2675                 ipv6_hdr(skb)->payload_len = 0;
2676                 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2677                                                        &ipv6_hdr(skb)->daddr,
2678                                                        0, IPPROTO_TCP, 0);
2679         }
2680
2681         i = tx_ring->next_to_use;
2682
2683         buffer_info = &tx_ring->buffer_info[i];
2684         context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
2685         /* VLAN MACLEN IPLEN */
2686         if (tx_flags & IGB_TX_FLAGS_VLAN)
2687                 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
2688         info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
2689         *hdr_len += skb_network_offset(skb);
2690         info |= skb_network_header_len(skb);
2691         *hdr_len += skb_network_header_len(skb);
2692         context_desc->vlan_macip_lens = cpu_to_le32(info);
2693
2694         /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2695         tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
2696
2697         if (skb->protocol == htons(ETH_P_IP))
2698                 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2699         tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2700
2701         context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
2702
2703         /* MSS L4LEN IDX */
2704         mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
2705         mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
2706
2707         /* Context index must be unique per ring. */
2708         if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
2709                 mss_l4len_idx |= tx_ring->queue_index << 4;
2710
2711         context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2712         context_desc->seqnum_seed = 0;
2713
2714         buffer_info->time_stamp = jiffies;
2715         buffer_info->next_to_watch = i;
2716         buffer_info->dma = 0;
2717         i++;
2718         if (i == tx_ring->count)
2719                 i = 0;
2720
2721         tx_ring->next_to_use = i;
2722
2723         return true;
2724 }
2725
2726 static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
2727                                         struct igb_ring *tx_ring,
2728                                         struct sk_buff *skb, u32 tx_flags)
2729 {
2730         struct e1000_adv_tx_context_desc *context_desc;
2731         unsigned int i;
2732         struct igb_buffer *buffer_info;
2733         u32 info = 0, tu_cmd = 0;
2734
2735         if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
2736             (tx_flags & IGB_TX_FLAGS_VLAN)) {
2737                 i = tx_ring->next_to_use;
2738                 buffer_info = &tx_ring->buffer_info[i];
2739                 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
2740
2741                 if (tx_flags & IGB_TX_FLAGS_VLAN)
2742                         info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
2743                 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
2744                 if (skb->ip_summed == CHECKSUM_PARTIAL)
2745                         info |= skb_network_header_len(skb);
2746
2747                 context_desc->vlan_macip_lens = cpu_to_le32(info);
2748
2749                 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
2750
2751                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2752                         switch (skb->protocol) {
2753                         case __constant_htons(ETH_P_IP):
2754                                 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2755                                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2756                                         tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2757                                 break;
2758                         case __constant_htons(ETH_P_IPV6):
2759                                 /* XXX what about other V6 headers?? */
2760                                 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2761                                         tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2762                                 break;
2763                         default:
2764                                 if (unlikely(net_ratelimit()))
2765                                         dev_warn(&adapter->pdev->dev,
2766                                             "partial checksum but proto=%x!\n",
2767                                             skb->protocol);
2768                                 break;
2769                         }
2770                 }
2771
2772                 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
2773                 context_desc->seqnum_seed = 0;
2774                 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
2775                         context_desc->mss_l4len_idx =
2776                                 cpu_to_le32(tx_ring->queue_index << 4);
2777
2778                 buffer_info->time_stamp = jiffies;
2779                 buffer_info->next_to_watch = i;
2780                 buffer_info->dma = 0;
2781
2782                 i++;
2783                 if (i == tx_ring->count)
2784                         i = 0;
2785                 tx_ring->next_to_use = i;
2786
2787                 return true;
2788         }
2789
2790
2791         return false;
2792 }
2793
2794 #define IGB_MAX_TXD_PWR 16
2795 #define IGB_MAX_DATA_PER_TXD    (1<<IGB_MAX_TXD_PWR)
2796
2797 static inline int igb_tx_map_adv(struct igb_adapter *adapter,
2798                                  struct igb_ring *tx_ring, struct sk_buff *skb,
2799                                  unsigned int first)
2800 {
2801         struct igb_buffer *buffer_info;
2802         unsigned int len = skb_headlen(skb);
2803         unsigned int count = 0, i;
2804         unsigned int f;
2805
2806         i = tx_ring->next_to_use;
2807
2808         buffer_info = &tx_ring->buffer_info[i];
2809         BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
2810         buffer_info->length = len;
2811         /* set time_stamp *before* dma to help avoid a possible race */
2812         buffer_info->time_stamp = jiffies;
2813         buffer_info->next_to_watch = i;
2814         buffer_info->dma = pci_map_single(adapter->pdev, skb->data, len,
2815                                           PCI_DMA_TODEVICE);
2816         count++;
2817         i++;
2818         if (i == tx_ring->count)
2819                 i = 0;
2820
2821         for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
2822                 struct skb_frag_struct *frag;
2823
2824                 frag = &skb_shinfo(skb)->frags[f];
2825                 len = frag->size;
2826
2827                 buffer_info = &tx_ring->buffer_info[i];
2828                 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
2829                 buffer_info->length = len;
2830                 buffer_info->time_stamp = jiffies;
2831                 buffer_info->next_to_watch = i;
2832                 buffer_info->dma = pci_map_page(adapter->pdev,
2833                                                 frag->page,
2834                                                 frag->page_offset,
2835                                                 len,
2836                                                 PCI_DMA_TODEVICE);
2837
2838                 count++;
2839                 i++;
2840                 if (i == tx_ring->count)
2841                         i = 0;
2842         }
2843
2844         i = ((i == 0) ? tx_ring->count - 1 : i - 1);
2845         tx_ring->buffer_info[i].skb = skb;
2846         tx_ring->buffer_info[first].next_to_watch = i;
2847
2848         return count;
2849 }
2850
2851 static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
2852                                     struct igb_ring *tx_ring,
2853                                     int tx_flags, int count, u32 paylen,
2854                                     u8 hdr_len)
2855 {
2856         union e1000_adv_tx_desc *tx_desc = NULL;
2857         struct igb_buffer *buffer_info;
2858         u32 olinfo_status = 0, cmd_type_len;
2859         unsigned int i;
2860
2861         cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
2862                         E1000_ADVTXD_DCMD_DEXT);
2863
2864         if (tx_flags & IGB_TX_FLAGS_VLAN)
2865                 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
2866
2867         if (tx_flags & IGB_TX_FLAGS_TSO) {
2868                 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
2869
2870                 /* insert tcp checksum */
2871                 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2872
2873                 /* insert ip checksum */
2874                 if (tx_flags & IGB_TX_FLAGS_IPV4)
2875                         olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
2876
2877         } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
2878                 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2879         }
2880
2881         if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) &&
2882             (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
2883                          IGB_TX_FLAGS_VLAN)))
2884                 olinfo_status |= tx_ring->queue_index << 4;
2885
2886         olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
2887
2888         i = tx_ring->next_to_use;
2889         while (count--) {
2890                 buffer_info = &tx_ring->buffer_info[i];
2891                 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
2892                 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
2893                 tx_desc->read.cmd_type_len =
2894                         cpu_to_le32(cmd_type_len | buffer_info->length);
2895                 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2896                 i++;
2897                 if (i == tx_ring->count)
2898                         i = 0;
2899         }
2900
2901         tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
2902         /* Force memory writes to complete before letting h/w
2903          * know there are new descriptors to fetch.  (Only
2904          * applicable for weak-ordered memory model archs,
2905          * such as IA-64). */
2906         wmb();
2907
2908         tx_ring->next_to_use = i;
2909         writel(i, adapter->hw.hw_addr + tx_ring->tail);
2910         /* we need this if more than one processor can write to our tail
2911          * at a time, it syncronizes IO on IA64/Altix systems */
2912         mmiowb();
2913 }
2914
2915 static int __igb_maybe_stop_tx(struct net_device *netdev,
2916                                struct igb_ring *tx_ring, int size)
2917 {
2918         struct igb_adapter *adapter = netdev_priv(netdev);
2919
2920         netif_stop_subqueue(netdev, tx_ring->queue_index);
2921
2922         /* Herbert's original patch had:
2923          *  smp_mb__after_netif_stop_queue();
2924          * but since that doesn't exist yet, just open code it. */
2925         smp_mb();
2926
2927         /* We need to check again in a case another CPU has just
2928          * made room available. */
2929         if (IGB_DESC_UNUSED(tx_ring) < size)
2930                 return -EBUSY;
2931
2932         /* A reprieve! */
2933         netif_wake_subqueue(netdev, tx_ring->queue_index);
2934         ++adapter->restart_queue;
2935         return 0;
2936 }
2937
2938 static int igb_maybe_stop_tx(struct net_device *netdev,
2939                              struct igb_ring *tx_ring, int size)
2940 {
2941         if (IGB_DESC_UNUSED(tx_ring) >= size)
2942                 return 0;
2943         return __igb_maybe_stop_tx(netdev, tx_ring, size);
2944 }
2945
2946 #define TXD_USE_COUNT(S) (((S) >> (IGB_MAX_TXD_PWR)) + 1)
2947
2948 static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
2949                                    struct net_device *netdev,
2950                                    struct igb_ring *tx_ring)
2951 {
2952         struct igb_adapter *adapter = netdev_priv(netdev);
2953         unsigned int first;
2954         unsigned int tx_flags = 0;
2955         unsigned int len;
2956         u8 hdr_len = 0;
2957         int tso = 0;
2958
2959         len = skb_headlen(skb);
2960
2961         if (test_bit(__IGB_DOWN, &adapter->state)) {
2962                 dev_kfree_skb_any(skb);
2963                 return NETDEV_TX_OK;
2964         }
2965
2966         if (skb->len <= 0) {
2967                 dev_kfree_skb_any(skb);
2968                 return NETDEV_TX_OK;
2969         }
2970
2971         /* need: 1 descriptor per page,
2972          *       + 2 desc gap to keep tail from touching head,
2973          *       + 1 desc for skb->data,
2974          *       + 1 desc for context descriptor,
2975          * otherwise try next time */
2976         if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
2977                 /* this is a hard error */
2978                 return NETDEV_TX_BUSY;
2979         }
2980         skb_orphan(skb);
2981
2982         if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
2983                 tx_flags |= IGB_TX_FLAGS_VLAN;
2984                 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
2985         }
2986
2987         if (skb->protocol == htons(ETH_P_IP))
2988                 tx_flags |= IGB_TX_FLAGS_IPV4;
2989
2990         first = tx_ring->next_to_use;
2991
2992         tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
2993                                               &hdr_len) : 0;
2994
2995         if (tso < 0) {
2996                 dev_kfree_skb_any(skb);
2997                 return NETDEV_TX_OK;
2998         }
2999
3000         if (tso)
3001                 tx_flags |= IGB_TX_FLAGS_TSO;
3002         else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags))
3003                         if (skb->ip_summed == CHECKSUM_PARTIAL)
3004                                 tx_flags |= IGB_TX_FLAGS_CSUM;
3005
3006         igb_tx_queue_adv(adapter, tx_ring, tx_flags,
3007                          igb_tx_map_adv(adapter, tx_ring, skb, first),
3008                          skb->len, hdr_len);
3009
3010         netdev->trans_start = jiffies;
3011
3012         /* Make sure there is space in the ring for the next send. */
3013         igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
3014
3015         return NETDEV_TX_OK;
3016 }
3017
3018 static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
3019 {
3020         struct igb_adapter *adapter = netdev_priv(netdev);
3021         struct igb_ring *tx_ring;
3022
3023         int r_idx = 0;
3024         r_idx = skb->queue_mapping & (IGB_MAX_TX_QUEUES - 1);
3025         tx_ring = adapter->multi_tx_table[r_idx];
3026
3027         /* This goes back to the question of how to logically map a tx queue
3028          * to a flow.  Right now, performance is impacted slightly negatively
3029          * if using multiple tx queues.  If the stack breaks away from a
3030          * single qdisc implementation, we can look at this again. */
3031         return (igb_xmit_frame_ring_adv(skb, netdev, tx_ring));
3032 }
3033
3034 /**
3035  * igb_tx_timeout - Respond to a Tx Hang
3036  * @netdev: network interface device structure
3037  **/
3038 static void igb_tx_timeout(struct net_device *netdev)
3039 {
3040         struct igb_adapter *adapter = netdev_priv(netdev);
3041         struct e1000_hw *hw = &adapter->hw;
3042
3043         /* Do the reset outside of interrupt context */
3044         adapter->tx_timeout_count++;
3045         schedule_work(&adapter->reset_task);
3046         wr32(E1000_EICS, adapter->eims_enable_mask &
3047                 ~(E1000_EIMS_TCP_TIMER | E1000_EIMS_OTHER));
3048 }
3049
3050 static void igb_reset_task(struct work_struct *work)
3051 {
3052         struct igb_adapter *adapter;
3053         adapter = container_of(work, struct igb_adapter, reset_task);
3054
3055         igb_reinit_locked(adapter);
3056 }
3057
3058 /**
3059  * igb_get_stats - Get System Network Statistics
3060  * @netdev: network interface device structure
3061  *
3062  * Returns the address of the device statistics structure.
3063  * The statistics are actually updated from the timer callback.
3064  **/
3065 static struct net_device_stats *
3066 igb_get_stats(struct net_device *netdev)
3067 {
3068         struct igb_adapter *adapter = netdev_priv(netdev);
3069
3070         /* only return the current stats */
3071         return &adapter->net_stats;
3072 }
3073
3074 /**
3075  * igb_change_mtu - Change the Maximum Transfer Unit
3076  * @netdev: network interface device structure
3077  * @new_mtu: new value for maximum frame size
3078  *
3079  * Returns 0 on success, negative on failure
3080  **/
3081 static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3082 {
3083         struct igb_adapter *adapter = netdev_priv(netdev);
3084         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3085
3086         if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
3087             (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3088                 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
3089                 return -EINVAL;
3090         }
3091
3092 #define MAX_STD_JUMBO_FRAME_SIZE 9234
3093         if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3094                 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
3095                 return -EINVAL;
3096         }
3097
3098         while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
3099                 msleep(1);
3100         /* igb_down has a dependency on max_frame_size */
3101         adapter->max_frame_size = max_frame;
3102         if (netif_running(netdev))
3103                 igb_down(adapter);
3104
3105         /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3106          * means we reserve 2 more, this pushes us to allocate from the next
3107          * larger slab size.
3108          * i.e. RXBUFFER_2048 --> size-4096 slab
3109          */
3110
3111         if (max_frame <= IGB_RXBUFFER_256)
3112                 adapter->rx_buffer_len = IGB_RXBUFFER_256;
3113         else if (max_frame <= IGB_RXBUFFER_512)
3114                 adapter->rx_buffer_len = IGB_RXBUFFER_512;
3115         else if (max_frame <= IGB_RXBUFFER_1024)
3116                 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3117         else if (max_frame <= IGB_RXBUFFER_2048)
3118                 adapter->rx_buffer_len = IGB_RXBUFFER_2048;
3119         else
3120 #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
3121                 adapter->rx_buffer_len = IGB_RXBUFFER_16384;
3122 #else
3123                 adapter->rx_buffer_len = PAGE_SIZE / 2;
3124 #endif
3125         /* adjust allocation if LPE protects us, and we aren't using SBP */
3126         if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
3127              (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
3128                 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3129
3130         dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
3131                  netdev->mtu, new_mtu);
3132         netdev->mtu = new_mtu;
3133
3134         if (netif_running(netdev))
3135                 igb_up(adapter);
3136         else
3137                 igb_reset(adapter);
3138
3139         clear_bit(__IGB_RESETTING, &adapter->state);
3140
3141         return 0;
3142 }
3143
3144 /**
3145  * igb_update_stats - Update the board statistics counters
3146  * @adapter: board private structure
3147  **/
3148
3149 void igb_update_stats(struct igb_adapter *adapter)
3150 {
3151         struct e1000_hw *hw = &adapter->hw;
3152         struct pci_dev *pdev = adapter->pdev;
3153         u16 phy_tmp;
3154
3155 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3156
3157         /*
3158          * Prevent stats update while adapter is being reset, or if the pci
3159          * connection is down.
3160          */
3161         if (adapter->link_speed == 0)
3162                 return;
3163         if (pci_channel_offline(pdev))
3164                 return;
3165
3166         adapter->stats.crcerrs += rd32(E1000_CRCERRS);
3167         adapter->stats.gprc += rd32(E1000_GPRC);
3168         adapter->stats.gorc += rd32(E1000_GORCL);
3169         rd32(E1000_GORCH); /* clear GORCL */
3170         adapter->stats.bprc += rd32(E1000_BPRC);
3171         adapter->stats.mprc += rd32(E1000_MPRC);
3172         adapter->stats.roc += rd32(E1000_ROC);
3173
3174         adapter->stats.prc64 += rd32(E1000_PRC64);
3175         adapter->stats.prc127 += rd32(E1000_PRC127);
3176         adapter->stats.prc255 += rd32(E1000_PRC255);
3177         adapter->stats.prc511 += rd32(E1000_PRC511);
3178         adapter->stats.prc1023 += rd32(E1000_PRC1023);
3179         adapter->stats.prc1522 += rd32(E1000_PRC1522);
3180         adapter->stats.symerrs += rd32(E1000_SYMERRS);
3181         adapter->stats.sec += rd32(E1000_SEC);
3182
3183         adapter->stats.mpc += rd32(E1000_MPC);
3184         adapter->stats.scc += rd32(E1000_SCC);
3185         adapter->stats.ecol += rd32(E1000_ECOL);
3186         adapter->stats.mcc += rd32(E1000_MCC);
3187         adapter->stats.latecol += rd32(E1000_LATECOL);
3188         adapter->stats.dc += rd32(E1000_DC);
3189         adapter->stats.rlec += rd32(E1000_RLEC);
3190         adapter->stats.xonrxc += rd32(E1000_XONRXC);
3191         adapter->stats.xontxc += rd32(E1000_XONTXC);
3192         adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
3193         adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
3194         adapter->stats.fcruc += rd32(E1000_FCRUC);
3195         adapter->stats.gptc += rd32(E1000_GPTC);
3196         adapter->stats.gotc += rd32(E1000_GOTCL);
3197         rd32(E1000_GOTCH); /* clear GOTCL */
3198         adapter->stats.rnbc += rd32(E1000_RNBC);
3199         adapter->stats.ruc += rd32(E1000_RUC);
3200         adapter->stats.rfc += rd32(E1000_RFC);
3201         adapter->stats.rjc += rd32(E1000_RJC);
3202         adapter->stats.tor += rd32(E1000_TORH);
3203         adapter->stats.tot += rd32(E1000_TOTH);
3204         adapter->stats.tpr += rd32(E1000_TPR);
3205
3206         adapter->stats.ptc64 += rd32(E1000_PTC64);
3207         adapter->stats.ptc127 += rd32(E1000_PTC127);
3208         adapter->stats.ptc255 += rd32(E1000_PTC255);
3209         adapter->stats.ptc511 += rd32(E1000_PTC511);
3210         adapter->stats.ptc1023 += rd32(E1000_PTC1023);
3211         adapter->stats.ptc1522 += rd32(E1000_PTC1522);
3212
3213         adapter->stats.mptc += rd32(E1000_MPTC);
3214         adapter->stats.bptc += rd32(E1000_BPTC);
3215
3216         /* used for adaptive IFS */
3217
3218         hw->mac.tx_packet_delta = rd32(E1000_TPT);
3219         adapter->stats.tpt += hw->mac.tx_packet_delta;
3220         hw->mac.collision_delta = rd32(E1000_COLC);
3221         adapter->stats.colc += hw->mac.collision_delta;
3222
3223         adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
3224         adapter->stats.rxerrc += rd32(E1000_RXERRC);
3225         adapter->stats.tncrs += rd32(E1000_TNCRS);
3226         adapter->stats.tsctc += rd32(E1000_TSCTC);
3227         adapter->stats.tsctfc += rd32(E1000_TSCTFC);
3228
3229         adapter->stats.iac += rd32(E1000_IAC);
3230         adapter->stats.icrxoc += rd32(E1000_ICRXOC);
3231         adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
3232         adapter->stats.icrxatc += rd32(E1000_ICRXATC);
3233         adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
3234         adapter->stats.ictxatc += rd32(E1000_ICTXATC);
3235         adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
3236         adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
3237         adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
3238
3239         /* Fill out the OS statistics structure */
3240         adapter->net_stats.multicast = adapter->stats.mprc;
3241         adapter->net_stats.collisions = adapter->stats.colc;
3242
3243         /* Rx Errors */
3244
3245         /* RLEC on some newer hardware can be incorrect so build
3246         * our own version based on RUC and ROC */
3247         adapter->net_stats.rx_errors = adapter->stats.rxerrc +
3248                 adapter->stats.crcerrs + adapter->stats.algnerrc +
3249                 adapter->stats.ruc + adapter->stats.roc +
3250                 adapter->stats.cexterr;
3251         adapter->net_stats.rx_length_errors = adapter->stats.ruc +
3252                                               adapter->stats.roc;
3253         adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
3254         adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
3255         adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
3256
3257         /* Tx Errors */
3258         adapter->net_stats.tx_errors = adapter->stats.ecol +
3259                                        adapter->stats.latecol;
3260         adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
3261         adapter->net_stats.tx_window_errors = adapter->stats.latecol;
3262         adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
3263
3264         /* Tx Dropped needs to be maintained elsewhere */
3265
3266         /* Phy Stats */
3267         if (hw->phy.media_type == e1000_media_type_copper) {
3268                 if ((adapter->link_speed == SPEED_1000) &&
3269                    (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
3270                                               &phy_tmp))) {
3271                         phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3272                         adapter->phy_stats.idle_errors += phy_tmp;
3273                 }
3274         }
3275
3276         /* Management Stats */
3277         adapter->stats.mgptc += rd32(E1000_MGTPTC);
3278         adapter->stats.mgprc += rd32(E1000_MGTPRC);
3279         adapter->stats.mgpdc += rd32(E1000_MGTPDC);
3280 }
3281
3282
3283 static irqreturn_t igb_msix_other(int irq, void *data)
3284 {
3285         struct net_device *netdev = data;
3286         struct igb_adapter *adapter = netdev_priv(netdev);
3287         struct e1000_hw *hw = &adapter->hw;
3288         u32 icr = rd32(E1000_ICR);
3289
3290         /* reading ICR causes bit 31 of EICR to be cleared */
3291         if (!(icr & E1000_ICR_LSC))
3292                 goto no_link_interrupt;
3293         hw->mac.get_link_status = 1;
3294         /* guard against interrupt when we're going down */
3295         if (!test_bit(__IGB_DOWN, &adapter->state))
3296                 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3297         
3298 no_link_interrupt:
3299         wr32(E1000_IMS, E1000_IMS_LSC);
3300         wr32(E1000_EIMS, adapter->eims_other);
3301
3302         return IRQ_HANDLED;
3303 }
3304
3305 static irqreturn_t igb_msix_tx(int irq, void *data)
3306 {
3307         struct igb_ring *tx_ring = data;
3308         struct igb_adapter *adapter = tx_ring->adapter;
3309         struct e1000_hw *hw = &adapter->hw;
3310
3311 #ifdef CONFIG_IGB_DCA
3312         if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3313                 igb_update_tx_dca(tx_ring);
3314 #endif
3315         tx_ring->total_bytes = 0;
3316         tx_ring->total_packets = 0;
3317
3318         /* auto mask will automatically reenable the interrupt when we write
3319          * EICS */
3320         if (!igb_clean_tx_irq(tx_ring))
3321                 /* Ring was not completely cleaned, so fire another interrupt */
3322                 wr32(E1000_EICS, tx_ring->eims_value);
3323         else
3324                 wr32(E1000_EIMS, tx_ring->eims_value);
3325
3326         return IRQ_HANDLED;
3327 }
3328
3329 static void igb_write_itr(struct igb_ring *ring)
3330 {
3331         struct e1000_hw *hw = &ring->adapter->hw;
3332         if ((ring->adapter->itr_setting & 3) && ring->set_itr) {
3333                 switch (hw->mac.type) {
3334                 case e1000_82576:
3335                         wr32(ring->itr_register,
3336                              ring->itr_val |
3337                              0x80000000);
3338                         break;
3339                 default:
3340                         wr32(ring->itr_register,
3341                              ring->itr_val |
3342                              (ring->itr_val << 16));
3343                         break;
3344                 }
3345                 ring->set_itr = 0;
3346         }
3347 }
3348
3349 static irqreturn_t igb_msix_rx(int irq, void *data)
3350 {
3351         struct igb_ring *rx_ring = data;
3352
3353         /* Write the ITR value calculated at the end of the
3354          * previous interrupt.
3355          */
3356
3357         igb_write_itr(rx_ring);
3358
3359         if (napi_schedule_prep(&rx_ring->napi))
3360                 __napi_schedule(&rx_ring->napi);
3361
3362 #ifdef CONFIG_IGB_DCA
3363         if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
3364                 igb_update_rx_dca(rx_ring);
3365 #endif
3366                 return IRQ_HANDLED;
3367 }
3368
3369 #ifdef CONFIG_IGB_DCA
3370 static void igb_update_rx_dca(struct igb_ring *rx_ring)
3371 {
3372         u32 dca_rxctrl;
3373         struct igb_adapter *adapter = rx_ring->adapter;
3374         struct e1000_hw *hw = &adapter->hw;
3375         int cpu = get_cpu();
3376         int q = rx_ring->reg_idx;
3377
3378         if (rx_ring->cpu != cpu) {
3379                 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
3380                 if (hw->mac.type == e1000_82576) {
3381                         dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
3382                         dca_rxctrl |= dca_get_tag(cpu) <<
3383                                       E1000_DCA_RXCTRL_CPUID_SHIFT;
3384                 } else {
3385                         dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
3386                         dca_rxctrl |= dca_get_tag(cpu);
3387                 }
3388                 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
3389                 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
3390                 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
3391                 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
3392                 rx_ring->cpu = cpu;
3393         }
3394         put_cpu();
3395 }
3396
3397 static void igb_update_tx_dca(struct igb_ring *tx_ring)
3398 {
3399         u32 dca_txctrl;
3400         struct igb_adapter *adapter = tx_ring->adapter;
3401         struct e1000_hw *hw = &adapter->hw;
3402         int cpu = get_cpu();
3403         int q = tx_ring->reg_idx;
3404
3405         if (tx_ring->cpu != cpu) {
3406                 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
3407                 if (hw->mac.type == e1000_82576) {
3408                         dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
3409                         dca_txctrl |= dca_get_tag(cpu) <<
3410                                       E1000_DCA_TXCTRL_CPUID_SHIFT;
3411                 } else {
3412                         dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
3413                         dca_txctrl |= dca_get_tag(cpu);
3414                 }
3415                 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
3416                 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
3417                 tx_ring->cpu = cpu;
3418         }
3419         put_cpu();
3420 }
3421
3422 static void igb_setup_dca(struct igb_adapter *adapter)
3423 {
3424         int i;
3425
3426         if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
3427                 return;
3428
3429         for (i = 0; i < adapter->num_tx_queues; i++) {
3430                 adapter->tx_ring[i].cpu = -1;
3431                 igb_update_tx_dca(&adapter->tx_ring[i]);
3432         }
3433         for (i = 0; i < adapter->num_rx_queues; i++) {
3434                 adapter->rx_ring[i].cpu = -1;
3435                 igb_update_rx_dca(&adapter->rx_ring[i]);
3436         }
3437 }
3438
3439 static int __igb_notify_dca(struct device *dev, void *data)
3440 {
3441         struct net_device *netdev = dev_get_drvdata(dev);
3442         struct igb_adapter *adapter = netdev_priv(netdev);
3443         struct e1000_hw *hw = &adapter->hw;
3444         unsigned long event = *(unsigned long *)data;
3445
3446         if (!(adapter->flags & IGB_FLAG_HAS_DCA))
3447                 goto out;
3448
3449         switch (event) {
3450         case DCA_PROVIDER_ADD:
3451                 /* if already enabled, don't do it again */
3452                 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3453                         break;
3454                 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3455                 /* Always use CB2 mode, difference is masked
3456                  * in the CB driver. */
3457                 wr32(E1000_DCA_CTRL, 2);
3458                 if (dca_add_requester(dev) == 0) {
3459                         dev_info(&adapter->pdev->dev, "DCA enabled\n");
3460                         igb_setup_dca(adapter);
3461                         break;
3462                 }
3463                 /* Fall Through since DCA is disabled. */
3464         case DCA_PROVIDER_REMOVE:
3465                 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3466                         /* without this a class_device is left
3467                          * hanging around in the sysfs model */
3468                         dca_remove_requester(dev);
3469                         dev_info(&adapter->pdev->dev, "DCA disabled\n");
3470                         adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3471                         wr32(E1000_DCA_CTRL, 1);
3472                 }
3473                 break;
3474         }
3475 out:
3476         return 0;
3477 }
3478
3479 static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
3480                           void *p)
3481 {
3482         int ret_val;
3483
3484         ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
3485                                          __igb_notify_dca);
3486
3487         return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
3488 }
3489 #endif /* CONFIG_IGB_DCA */
3490
3491 /**
3492  * igb_intr_msi - Interrupt Handler
3493  * @irq: interrupt number
3494  * @data: pointer to a network interface device structure
3495  **/
3496 static irqreturn_t igb_intr_msi(int irq, void *data)
3497 {
3498         struct net_device *netdev = data;
3499         struct igb_adapter *adapter = netdev_priv(netdev);
3500         struct e1000_hw *hw = &adapter->hw;
3501         /* read ICR disables interrupts using IAM */
3502         u32 icr = rd32(E1000_ICR);
3503
3504         igb_write_itr(adapter->rx_ring);
3505
3506         if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3507                 hw->mac.get_link_status = 1;
3508                 if (!test_bit(__IGB_DOWN, &adapter->state))
3509                         mod_timer(&adapter->watchdog_timer, jiffies + 1);
3510         }
3511
3512         napi_schedule(&adapter->rx_ring[0].napi);
3513
3514         return IRQ_HANDLED;
3515 }
3516
3517 /**
3518  * igb_intr - Interrupt Handler
3519  * @irq: interrupt number
3520  * @data: pointer to a network interface device structure
3521  **/
3522 static irqreturn_t igb_intr(int irq, void *data)
3523 {
3524         struct net_device *netdev = data;
3525         struct igb_adapter *adapter = netdev_priv(netdev);
3526         struct e1000_hw *hw = &adapter->hw;
3527         /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
3528          * need for the IMC write */
3529         u32 icr = rd32(E1000_ICR);
3530         u32 eicr = 0;
3531         if (!icr)
3532                 return IRQ_NONE;  /* Not our interrupt */
3533
3534         igb_write_itr(adapter->rx_ring);
3535
3536         /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
3537          * not set, then the adapter didn't send an interrupt */
3538         if (!(icr & E1000_ICR_INT_ASSERTED))
3539                 return IRQ_NONE;
3540
3541         eicr = rd32(E1000_EICR);
3542
3543         if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3544                 hw->mac.get_link_status = 1;
3545                 /* guard against interrupt when we're going down */
3546                 if (!test_bit(__IGB_DOWN, &adapter->state))
3547                         mod_timer(&adapter->watchdog_timer, jiffies + 1);
3548         }
3549
3550         napi_schedule(&adapter->rx_ring[0].napi);
3551
3552         return IRQ_HANDLED;
3553 }
3554
3555 /**
3556  * igb_poll - NAPI Rx polling callback
3557  * @napi: napi polling structure
3558  * @budget: count of how many packets we should handle
3559  **/
3560 static int igb_poll(struct napi_struct *napi, int budget)
3561 {
3562         struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi);
3563         struct igb_adapter *adapter = rx_ring->adapter;
3564         struct net_device *netdev = adapter->netdev;
3565         int tx_clean_complete, work_done = 0;
3566
3567         /* this poll routine only supports one tx and one rx queue */
3568 #ifdef CONFIG_IGB_DCA
3569         if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3570                 igb_update_tx_dca(&adapter->tx_ring[0]);
3571 #endif
3572         tx_clean_complete = igb_clean_tx_irq(&adapter->tx_ring[0]);
3573
3574 #ifdef CONFIG_IGB_DCA
3575         if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3576                 igb_update_rx_dca(&adapter->rx_ring[0]);
3577 #endif
3578         igb_clean_rx_irq_adv(&adapter->rx_ring[0], &work_done, budget);
3579
3580         /* If no Tx and not enough Rx work done, exit the polling mode */
3581         if ((tx_clean_complete && (work_done < budget)) ||
3582             !netif_running(netdev)) {
3583                 if (adapter->itr_setting & 3)
3584                         igb_set_itr(adapter);
3585                 napi_complete(napi);
3586                 if (!test_bit(__IGB_DOWN, &adapter->state))
3587                         igb_irq_enable(adapter);
3588                 return 0;
3589         }
3590
3591         return 1;
3592 }
3593
3594 static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
3595 {
3596         struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi);
3597         struct igb_adapter *adapter = rx_ring->adapter;
3598         struct e1000_hw *hw = &adapter->hw;
3599         struct net_device *netdev = adapter->netdev;
3600         int work_done = 0;
3601
3602 #ifdef CONFIG_IGB_DCA
3603         if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3604                 igb_update_rx_dca(rx_ring);
3605 #endif
3606         igb_clean_rx_irq_adv(rx_ring, &work_done, budget);
3607
3608
3609         /* If not enough Rx work done, exit the polling mode */
3610         if ((work_done == 0) || !netif_running(netdev)) {
3611                 napi_complete(napi);
3612
3613                 if (adapter->itr_setting & 3) {
3614                         if (adapter->num_rx_queues == 1)
3615                                 igb_set_itr(adapter);
3616                         else
3617                                 igb_update_ring_itr(rx_ring);
3618                 }
3619
3620                 if (!test_bit(__IGB_DOWN, &adapter->state))
3621                         wr32(E1000_EIMS, rx_ring->eims_value);
3622
3623                 return 0;
3624         }
3625
3626         return 1;
3627 }
3628
3629 /**
3630  * igb_clean_tx_irq - Reclaim resources after transmit completes
3631  * @adapter: board private structure
3632  * returns true if ring is completely cleaned
3633  **/
3634 static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
3635 {
3636         struct igb_adapter *adapter = tx_ring->adapter;
3637         struct net_device *netdev = adapter->netdev;
3638         struct e1000_hw *hw = &adapter->hw;
3639         struct igb_buffer *buffer_info;
3640         struct sk_buff *skb;
3641         union e1000_adv_tx_desc *tx_desc, *eop_desc;
3642         unsigned int total_bytes = 0, total_packets = 0;
3643         unsigned int i, eop, count = 0;
3644         bool cleaned = false;
3645
3646         i = tx_ring->next_to_clean;
3647         eop = tx_ring->buffer_info[i].next_to_watch;
3648         eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
3649
3650         while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3651                (count < tx_ring->count)) {
3652                 for (cleaned = false; !cleaned; count++) {
3653                         tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3654                         buffer_info = &tx_ring->buffer_info[i];
3655                         cleaned = (i == eop);
3656                         skb = buffer_info->skb;
3657
3658                         if (skb) {
3659                                 unsigned int segs, bytecount;
3660                                 /* gso_segs is currently only valid for tcp */
3661                                 segs = skb_shinfo(skb)->gso_segs ?: 1;
3662                                 /* multiply data chunks by size of headers */
3663                                 bytecount = ((segs - 1) * skb_headlen(skb)) +
3664                                             skb->len;
3665                                 total_packets += segs;
3666                                 total_bytes += bytecount;
3667                         }
3668
3669                         igb_unmap_and_free_tx_resource(adapter, buffer_info);
3670                         tx_desc->wb.status = 0;
3671
3672                         i++;
3673                         if (i == tx_ring->count)
3674                                 i = 0;
3675                 }
3676
3677                 eop = tx_ring->buffer_info[i].next_to_watch;
3678                 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
3679         }
3680
3681         tx_ring->next_to_clean = i;
3682
3683         if (unlikely(count &&
3684                      netif_carrier_ok(netdev) &&
3685                      IGB_DESC_UNUSED(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
3686                 /* Make sure that anybody stopping the queue after this
3687                  * sees the new next_to_clean.
3688                  */
3689                 smp_mb();
3690                 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
3691                     !(test_bit(__IGB_DOWN, &adapter->state))) {
3692                         netif_wake_subqueue(netdev, tx_ring->queue_index);
3693                         ++adapter->restart_queue;
3694                 }
3695         }
3696
3697         if (tx_ring->detect_tx_hung) {
3698                 /* Detect a transmit hang in hardware, this serializes the
3699                  * check with the clearing of time_stamp and movement of i */
3700                 tx_ring->detect_tx_hung = false;
3701                 if (tx_ring->buffer_info[i].time_stamp &&
3702                     time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
3703                                (adapter->tx_timeout_factor * HZ))
3704                     && !(rd32(E1000_STATUS) &
3705                          E1000_STATUS_TXOFF)) {
3706
3707                         /* detected Tx unit hang */
3708                         dev_err(&adapter->pdev->dev,
3709                                 "Detected Tx Unit Hang\n"
3710                                 "  Tx Queue             <%d>\n"
3711                                 "  TDH                  <%x>\n"
3712                                 "  TDT                  <%x>\n"
3713                                 "  next_to_use          <%x>\n"
3714                                 "  next_to_clean        <%x>\n"
3715                                 "buffer_info[next_to_clean]\n"
3716                                 "  time_stamp           <%lx>\n"
3717                                 "  next_to_watch        <%x>\n"
3718                                 "  jiffies              <%lx>\n"
3719                                 "  desc.status          <%x>\n",
3720                                 tx_ring->queue_index,
3721                                 readl(adapter->hw.hw_addr + tx_ring->head),
3722                                 readl(adapter->hw.hw_addr + tx_ring->tail),
3723                                 tx_ring->next_to_use,
3724                                 tx_ring->next_to_clean,
3725                                 tx_ring->buffer_info[i].time_stamp,
3726                                 eop,
3727                                 jiffies,
3728                                 eop_desc->wb.status);
3729                         netif_stop_subqueue(netdev, tx_ring->queue_index);
3730                 }
3731         }
3732         tx_ring->total_bytes += total_bytes;
3733         tx_ring->total_packets += total_packets;
3734         tx_ring->tx_stats.bytes += total_bytes;
3735         tx_ring->tx_stats.packets += total_packets;
3736         adapter->net_stats.tx_bytes += total_bytes;
3737         adapter->net_stats.tx_packets += total_packets;
3738         return (count < tx_ring->count);
3739 }
3740
3741 /**
3742  * igb_receive_skb - helper function to handle rx indications
3743  * @ring: pointer to receive ring receving this packet 
3744  * @status: descriptor status field as written by hardware
3745  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3746  * @skb: pointer to sk_buff to be indicated to stack
3747  **/
3748 static void igb_receive_skb(struct igb_ring *ring, u8 status,
3749                             union e1000_adv_rx_desc * rx_desc,
3750                             struct sk_buff *skb)
3751 {
3752         struct igb_adapter * adapter = ring->adapter;
3753         bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
3754
3755         if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3756                 if (vlan_extracted)
3757                         vlan_gro_receive(&ring->napi, adapter->vlgrp,
3758                                          le16_to_cpu(rx_desc->wb.upper.vlan),
3759                                          skb);
3760                 else
3761                         napi_gro_receive(&ring->napi, skb);
3762         } else {
3763                 if (vlan_extracted)
3764                         vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3765                                           le16_to_cpu(rx_desc->wb.upper.vlan));
3766                 else
3767                         netif_receive_skb(skb);
3768         }
3769 }
3770
3771
3772 static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
3773                                        u32 status_err, struct sk_buff *skb)
3774 {
3775         skb->ip_summed = CHECKSUM_NONE;
3776
3777         /* Ignore Checksum bit is set or checksum is disabled through ethtool */
3778         if ((status_err & E1000_RXD_STAT_IXSM) || !adapter->rx_csum)
3779                 return;
3780         /* TCP/UDP checksum error bit is set */
3781         if (status_err &
3782             (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
3783                 /* let the stack verify checksum errors */
3784                 adapter->hw_csum_err++;
3785                 return;
3786         }
3787         /* It must be a TCP or UDP packet with a valid checksum */
3788         if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
3789                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3790
3791         adapter->hw_csum_good++;
3792 }
3793
3794 static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
3795                                  int *work_done, int budget)
3796 {
3797         struct igb_adapter *adapter = rx_ring->adapter;
3798         struct net_device *netdev = adapter->netdev;
3799         struct pci_dev *pdev = adapter->pdev;
3800         union e1000_adv_rx_desc *rx_desc , *next_rxd;
3801         struct igb_buffer *buffer_info , *next_buffer;
3802         struct sk_buff *skb;
3803         unsigned int i;
3804         u32 length, hlen, staterr;
3805         bool cleaned = false;
3806         int cleaned_count = 0;
3807         unsigned int total_bytes = 0, total_packets = 0;
3808
3809         i = rx_ring->next_to_clean;
3810         rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
3811         staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
3812
3813         while (staterr & E1000_RXD_STAT_DD) {
3814                 if (*work_done >= budget)
3815                         break;
3816                 (*work_done)++;
3817                 buffer_info = &rx_ring->buffer_info[i];
3818
3819                 /* HW will not DMA in data larger than the given buffer, even
3820                  * if it parses the (NFS, of course) header to be larger.  In
3821                  * that case, it fills the header buffer and spills the rest
3822                  * into the page.
3823                  */
3824                 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
3825                   E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
3826                 if (hlen > adapter->rx_ps_hdr_size)
3827                         hlen = adapter->rx_ps_hdr_size;
3828
3829                 length = le16_to_cpu(rx_desc->wb.upper.length);
3830                 cleaned = true;
3831                 cleaned_count++;
3832
3833                 skb = buffer_info->skb;
3834                 prefetch(skb->data - NET_IP_ALIGN);
3835                 buffer_info->skb = NULL;
3836                 if (!adapter->rx_ps_hdr_size) {
3837                         pci_unmap_single(pdev, buffer_info->dma,
3838                                          adapter->rx_buffer_len +
3839                                            NET_IP_ALIGN,
3840                                          PCI_DMA_FROMDEVICE);
3841                         skb_put(skb, length);
3842                         goto send_up;
3843                 }
3844
3845                 if (!skb_shinfo(skb)->nr_frags) {
3846                         pci_unmap_single(pdev, buffer_info->dma,
3847                                          adapter->rx_ps_hdr_size +
3848                                            NET_IP_ALIGN,
3849                                          PCI_DMA_FROMDEVICE);
3850                         skb_put(skb, hlen);
3851                 }
3852
3853                 if (length) {
3854                         pci_unmap_page(pdev, buffer_info->page_dma,
3855                                        PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
3856                         buffer_info->page_dma = 0;
3857
3858                         skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
3859                                                 buffer_info->page,
3860                                                 buffer_info->page_offset,
3861                                                 length);
3862
3863                         if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
3864                             (page_count(buffer_info->page) != 1))
3865                                 buffer_info->page = NULL;
3866                         else
3867                                 get_page(buffer_info->page);
3868
3869                         skb->len += length;
3870                         skb->data_len += length;
3871
3872                         skb->truesize += length;
3873                 }
3874 send_up:
3875                 i++;
3876                 if (i == rx_ring->count)
3877                         i = 0;
3878                 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
3879                 prefetch(next_rxd);
3880                 next_buffer = &rx_ring->buffer_info[i];
3881
3882                 if (!(staterr & E1000_RXD_STAT_EOP)) {
3883                         buffer_info->skb = next_buffer->skb;
3884                         buffer_info->dma = next_buffer->dma;
3885                         next_buffer->skb = skb;
3886                         next_buffer->dma = 0;
3887                         goto next_desc;
3888                 }
3889
3890                 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
3891                         dev_kfree_skb_irq(skb);
3892                         goto next_desc;
3893                 }
3894
3895                 total_bytes += skb->len;
3896                 total_packets++;
3897
3898                 igb_rx_checksum_adv(adapter, staterr, skb);
3899
3900                 skb->protocol = eth_type_trans(skb, netdev);
3901
3902                 igb_receive_skb(rx_ring, staterr, rx_desc, skb);
3903
3904 next_desc:
3905                 rx_desc->wb.upper.status_error = 0;
3906
3907                 /* return some buffers to hardware, one at a time is too slow */
3908                 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
3909                         igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
3910                         cleaned_count = 0;
3911                 }
3912
3913                 /* use prefetched values */
3914                 rx_desc = next_rxd;
3915                 buffer_info = next_buffer;
3916
3917                 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
3918         }
3919
3920         rx_ring->next_to_clean = i;
3921         cleaned_count = IGB_DESC_UNUSED(rx_ring);
3922
3923         if (cleaned_count)
3924                 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
3925
3926         rx_ring->total_packets += total_packets;
3927         rx_ring->total_bytes += total_bytes;
3928         rx_ring->rx_stats.packets += total_packets;
3929         rx_ring->rx_stats.bytes += total_bytes;
3930         adapter->net_stats.rx_bytes += total_bytes;
3931         adapter->net_stats.rx_packets += total_packets;
3932         return cleaned;
3933 }
3934
3935
3936 /**
3937  * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
3938  * @adapter: address of board private structure
3939  **/
3940 static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
3941                                      int cleaned_count)
3942 {
3943         struct igb_adapter *adapter = rx_ring->adapter;
3944         struct net_device *netdev = adapter->netdev;
3945         struct pci_dev *pdev = adapter->pdev;
3946         union e1000_adv_rx_desc *rx_desc;
3947         struct igb_buffer *buffer_info;
3948         struct sk_buff *skb;
3949         unsigned int i;
3950
3951         i = rx_ring->next_to_use;
3952         buffer_info = &rx_ring->buffer_info[i];
3953
3954         while (cleaned_count--) {
3955                 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
3956
3957                 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
3958                         if (!buffer_info->page) {
3959                                 buffer_info->page = alloc_page(GFP_ATOMIC);
3960                                 if (!buffer_info->page) {
3961                                         adapter->alloc_rx_buff_failed++;
3962                                         goto no_buffers;
3963                                 }
3964                                 buffer_info->page_offset = 0;
3965                         } else {
3966                                 buffer_info->page_offset ^= PAGE_SIZE / 2;
3967                         }
3968                         buffer_info->page_dma =
3969                                 pci_map_page(pdev,
3970                                              buffer_info->page,
3971                                              buffer_info->page_offset,
3972                                              PAGE_SIZE / 2,
3973                                              PCI_DMA_FROMDEVICE);
3974                 }
3975
3976                 if (!buffer_info->skb) {
3977                         int bufsz;
3978
3979                         if (adapter->rx_ps_hdr_size)
3980                                 bufsz = adapter->rx_ps_hdr_size;
3981                         else
3982                                 bufsz = adapter->rx_buffer_len;
3983                         bufsz += NET_IP_ALIGN;
3984                         skb = netdev_alloc_skb(netdev, bufsz);
3985
3986                         if (!skb) {
3987                                 adapter->alloc_rx_buff_failed++;
3988                                 goto no_buffers;
3989                         }
3990
3991                         /* Make buffer alignment 2 beyond a 16 byte boundary
3992                          * this will result in a 16 byte aligned IP header after
3993                          * the 14 byte MAC header is removed
3994                          */
3995                         skb_reserve(skb, NET_IP_ALIGN);
3996
3997                         buffer_info->skb = skb;
3998                         buffer_info->dma = pci_map_single(pdev, skb->data,
3999                                                           bufsz,
4000                                                           PCI_DMA_FROMDEVICE);
4001
4002                 }
4003                 /* Refresh the desc even if buffer_addrs didn't change because
4004                  * each write-back erases this info. */
4005                 if (adapter->rx_ps_hdr_size) {
4006                         rx_desc->read.pkt_addr =
4007                              cpu_to_le64(buffer_info->page_dma);
4008                         rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
4009                 } else {
4010                         rx_desc->read.pkt_addr =
4011                              cpu_to_le64(buffer_info->dma);
4012                         rx_desc->read.hdr_addr = 0;
4013                 }
4014
4015                 i++;
4016                 if (i == rx_ring->count)
4017                         i = 0;
4018                 buffer_info = &rx_ring->buffer_info[i];
4019         }
4020
4021 no_buffers:
4022         if (rx_ring->next_to_use != i) {
4023                 rx_ring->next_to_use = i;
4024                 if (i == 0)
4025                         i = (rx_ring->count - 1);
4026                 else
4027                         i--;
4028
4029                 /* Force memory writes to complete before letting h/w
4030                  * know there are new descriptors to fetch.  (Only
4031                  * applicable for weak-ordered memory model archs,
4032                  * such as IA-64). */
4033                 wmb();
4034                 writel(i, adapter->hw.hw_addr + rx_ring->tail);
4035         }
4036 }
4037
4038 /**
4039  * igb_mii_ioctl -
4040  * @netdev:
4041  * @ifreq:
4042  * @cmd:
4043  **/
4044 static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4045 {
4046         struct igb_adapter *adapter = netdev_priv(netdev);
4047         struct mii_ioctl_data *data = if_mii(ifr);
4048
4049         if (adapter->hw.phy.media_type != e1000_media_type_copper)
4050                 return -EOPNOTSUPP;
4051
4052         switch (cmd) {
4053         case SIOCGMIIPHY:
4054                 data->phy_id = adapter->hw.phy.addr;
4055                 break;
4056         case SIOCGMIIREG:
4057                 if (!capable(CAP_NET_ADMIN))
4058                         return -EPERM;
4059                 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
4060                                      &data->val_out))
4061                         return -EIO;
4062                 break;
4063         case SIOCSMIIREG:
4064         default:
4065                 return -EOPNOTSUPP;
4066         }
4067         return 0;
4068 }
4069
4070 /**
4071  * igb_ioctl -
4072  * @netdev:
4073  * @ifreq:
4074  * @cmd:
4075  **/
4076 static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4077 {
4078         switch (cmd) {
4079         case SIOCGMIIPHY:
4080         case SIOCGMIIREG:
4081         case SIOCSMIIREG:
4082                 return igb_mii_ioctl(netdev, ifr, cmd);
4083         default:
4084                 return -EOPNOTSUPP;
4085         }
4086 }
4087
4088 static void igb_vlan_rx_register(struct net_device *netdev,
4089                                  struct vlan_group *grp)
4090 {
4091         struct igb_adapter *adapter = netdev_priv(netdev);
4092         struct e1000_hw *hw = &adapter->hw;
4093         u32 ctrl, rctl;
4094
4095         igb_irq_disable(adapter);
4096         adapter->vlgrp = grp;
4097
4098         if (grp) {
4099                 /* enable VLAN tag insert/strip */
4100                 ctrl = rd32(E1000_CTRL);
4101                 ctrl |= E1000_CTRL_VME;
4102                 wr32(E1000_CTRL, ctrl);
4103
4104                 /* enable VLAN receive filtering */
4105                 rctl = rd32(E1000_RCTL);
4106                 rctl &= ~E1000_RCTL_CFIEN;
4107                 wr32(E1000_RCTL, rctl);
4108                 igb_update_mng_vlan(adapter);
4109                 wr32(E1000_RLPML,
4110                                 adapter->max_frame_size + VLAN_TAG_SIZE);
4111         } else {
4112                 /* disable VLAN tag insert/strip */
4113                 ctrl = rd32(E1000_CTRL);
4114                 ctrl &= ~E1000_CTRL_VME;
4115                 wr32(E1000_CTRL, ctrl);
4116
4117                 if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
4118                         igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4119                         adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
4120                 }
4121                 wr32(E1000_RLPML,
4122                                 adapter->max_frame_size);
4123         }
4124
4125         if (!test_bit(__IGB_DOWN, &adapter->state))
4126                 igb_irq_enable(adapter);
4127 }
4128
4129 static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4130 {
4131         struct igb_adapter *adapter = netdev_priv(netdev);
4132         struct e1000_hw *hw = &adapter->hw;
4133         u32 vfta, index;
4134
4135         if ((adapter->hw.mng_cookie.status &
4136              E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
4137             (vid == adapter->mng_vlan_id))
4138                 return;
4139         /* add VID to filter table */
4140         index = (vid >> 5) & 0x7F;
4141         vfta = array_rd32(E1000_VFTA, index);
4142         vfta |= (1 << (vid & 0x1F));
4143         igb_write_vfta(&adapter->hw, index, vfta);
4144 }
4145
4146 static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4147 {
4148         struct igb_adapter *adapter = netdev_priv(netdev);
4149         struct e1000_hw *hw = &adapter->hw;
4150         u32 vfta, index;
4151
4152         igb_irq_disable(adapter);
4153         vlan_group_set_device(adapter->vlgrp, vid, NULL);
4154
4155         if (!test_bit(__IGB_DOWN, &adapter->state))
4156                 igb_irq_enable(adapter);
4157
4158         if ((adapter->hw.mng_cookie.status &
4159              E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
4160             (vid == adapter->mng_vlan_id)) {
4161                 /* release control to f/w */
4162                 igb_release_hw_control(adapter);
4163                 return;
4164         }
4165
4166         /* remove VID from filter table */
4167         index = (vid >> 5) & 0x7F;
4168         vfta = array_rd32(E1000_VFTA, index);
4169         vfta &= ~(1 << (vid & 0x1F));
4170         igb_write_vfta(&adapter->hw, index, vfta);
4171 }
4172
4173 static void igb_restore_vlan(struct igb_adapter *adapter)
4174 {
4175         igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
4176
4177         if (adapter->vlgrp) {
4178                 u16 vid;
4179                 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
4180                         if (!vlan_group_get_device(adapter->vlgrp, vid))
4181                                 continue;
4182                         igb_vlan_rx_add_vid(adapter->netdev, vid);
4183                 }
4184         }
4185 }
4186
4187 int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
4188 {
4189         struct e1000_mac_info *mac = &adapter->hw.mac;
4190
4191         mac->autoneg = 0;
4192
4193         /* Fiber NICs only allow 1000 gbps Full duplex */
4194         if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
4195                 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
4196                 dev_err(&adapter->pdev->dev,
4197                         "Unsupported Speed/Duplex configuration\n");
4198                 return -EINVAL;
4199         }
4200
4201         switch (spddplx) {
4202         case SPEED_10 + DUPLEX_HALF:
4203                 mac->forced_speed_duplex = ADVERTISE_10_HALF;
4204                 break;
4205         case SPEED_10 + DUPLEX_FULL:
4206                 mac->forced_speed_duplex = ADVERTISE_10_FULL;
4207                 break;
4208         case SPEED_100 + DUPLEX_HALF:
4209                 mac->forced_speed_duplex = ADVERTISE_100_HALF;
4210                 break;
4211         case SPEED_100 + DUPLEX_FULL:
4212                 mac->forced_speed_duplex = ADVERTISE_100_FULL;
4213                 break;
4214         case SPEED_1000 + DUPLEX_FULL:
4215                 mac->autoneg = 1;
4216                 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
4217                 break;
4218         case SPEED_1000 + DUPLEX_HALF: /* not supported */
4219         default:
4220                 dev_err(&adapter->pdev->dev,
4221                         "Unsupported Speed/Duplex configuration\n");
4222                 return -EINVAL;
4223         }
4224         return 0;
4225 }
4226
4227
4228 static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
4229 {
4230         struct net_device *netdev = pci_get_drvdata(pdev);
4231         struct igb_adapter *adapter = netdev_priv(netdev);
4232         struct e1000_hw *hw = &adapter->hw;
4233         u32 ctrl, rctl, status;
4234         u32 wufc = adapter->wol;
4235 #ifdef CONFIG_PM
4236         int retval = 0;
4237 #endif
4238
4239         netif_device_detach(netdev);
4240
4241         if (netif_running(netdev))
4242                 igb_close(netdev);
4243
4244         igb_reset_interrupt_capability(adapter);
4245
4246         igb_free_queues(adapter);
4247
4248 #ifdef CONFIG_PM
4249         retval = pci_save_state(pdev);
4250         if (retval)
4251                 return retval;
4252 #endif
4253
4254         status = rd32(E1000_STATUS);
4255         if (status & E1000_STATUS_LU)
4256                 wufc &= ~E1000_WUFC_LNKC;
4257
4258         if (wufc) {
4259                 igb_setup_rctl(adapter);
4260                 igb_set_multi(netdev);
4261
4262                 /* turn on all-multi mode if wake on multicast is enabled */
4263                 if (wufc & E1000_WUFC_MC) {
4264                         rctl = rd32(E1000_RCTL);
4265                         rctl |= E1000_RCTL_MPE;
4266                         wr32(E1000_RCTL, rctl);
4267                 }
4268
4269                 ctrl = rd32(E1000_CTRL);
4270                 /* advertise wake from D3Cold */
4271                 #define E1000_CTRL_ADVD3WUC 0x00100000
4272                 /* phy power management enable */
4273                 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4274                 ctrl |= E1000_CTRL_ADVD3WUC;
4275                 wr32(E1000_CTRL, ctrl);
4276
4277                 /* Allow time for pending master requests to run */
4278                 igb_disable_pcie_master(&adapter->hw);
4279
4280                 wr32(E1000_WUC, E1000_WUC_PME_EN);
4281                 wr32(E1000_WUFC, wufc);
4282         } else {
4283                 wr32(E1000_WUC, 0);
4284                 wr32(E1000_WUFC, 0);
4285         }
4286
4287         /* make sure adapter isn't asleep if manageability/wol is enabled */
4288         if (wufc || adapter->en_mng_pt) {
4289                 pci_enable_wake(pdev, PCI_D3hot, 1);
4290                 pci_enable_wake(pdev, PCI_D3cold, 1);
4291         } else {
4292                 igb_shutdown_fiber_serdes_link_82575(hw);
4293                 pci_enable_wake(pdev, PCI_D3hot, 0);
4294                 pci_enable_wake(pdev, PCI_D3cold, 0);
4295         }
4296
4297         /* Release control of h/w to f/w.  If f/w is AMT enabled, this
4298          * would have already happened in close and is redundant. */
4299         igb_release_hw_control(adapter);
4300
4301         pci_disable_device(pdev);
4302
4303         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4304
4305         return 0;
4306 }
4307
4308 #ifdef CONFIG_PM
4309 static int igb_resume(struct pci_dev *pdev)
4310 {
4311         struct net_device *netdev = pci_get_drvdata(pdev);
4312         struct igb_adapter *adapter = netdev_priv(netdev);
4313         struct e1000_hw *hw = &adapter->hw;
4314         u32 err;
4315
4316         pci_set_power_state(pdev, PCI_D0);
4317         pci_restore_state(pdev);
4318
4319         if (adapter->need_ioport)
4320                 err = pci_enable_device(pdev);
4321         else
4322                 err = pci_enable_device_mem(pdev);
4323         if (err) {
4324                 dev_err(&pdev->dev,
4325                         "igb: Cannot enable PCI device from suspend\n");
4326                 return err;
4327         }
4328         pci_set_master(pdev);
4329
4330         pci_enable_wake(pdev, PCI_D3hot, 0);
4331         pci_enable_wake(pdev, PCI_D3cold, 0);
4332
4333         igb_set_interrupt_capability(adapter);
4334
4335         if (igb_alloc_queues(adapter)) {
4336                 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
4337                 return -ENOMEM;
4338         }
4339
4340         /* e1000_power_up_phy(adapter); */
4341
4342         igb_reset(adapter);
4343         wr32(E1000_WUS, ~0);
4344
4345         if (netif_running(netdev)) {
4346                 err = igb_open(netdev);
4347                 if (err)
4348                         return err;
4349         }
4350
4351         netif_device_attach(netdev);
4352
4353         /* let the f/w know that the h/w is now under the control of the
4354          * driver. */
4355         igb_get_hw_control(adapter);
4356
4357         return 0;
4358 }
4359 #endif
4360
4361 static void igb_shutdown(struct pci_dev *pdev)
4362 {
4363         igb_suspend(pdev, PMSG_SUSPEND);
4364 }
4365
4366 #ifdef CONFIG_NET_POLL_CONTROLLER
4367 /*
4368  * Polling 'interrupt' - used by things like netconsole to send skbs
4369  * without having to re-enable interrupts. It's not called while
4370  * the interrupt routine is executing.
4371  */
4372 static void igb_netpoll(struct net_device *netdev)
4373 {
4374         struct igb_adapter *adapter = netdev_priv(netdev);
4375         int i;
4376         int work_done = 0;
4377
4378         igb_irq_disable(adapter);
4379         adapter->flags |= IGB_FLAG_IN_NETPOLL;
4380
4381         for (i = 0; i < adapter->num_tx_queues; i++)
4382                 igb_clean_tx_irq(&adapter->tx_ring[i]);
4383
4384         for (i = 0; i < adapter->num_rx_queues; i++)
4385                 igb_clean_rx_irq_adv(&adapter->rx_ring[i],
4386                                      &work_done,
4387                                      adapter->rx_ring[i].napi.weight);
4388
4389         adapter->flags &= ~IGB_FLAG_IN_NETPOLL;
4390         igb_irq_enable(adapter);
4391 }
4392 #endif /* CONFIG_NET_POLL_CONTROLLER */
4393
4394 /**
4395  * igb_io_error_detected - called when PCI error is detected
4396  * @pdev: Pointer to PCI device
4397  * @state: The current pci connection state
4398  *
4399  * This function is called after a PCI bus error affecting
4400  * this device has been detected.
4401  */
4402 static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
4403                                               pci_channel_state_t state)
4404 {
4405         struct net_device *netdev = pci_get_drvdata(pdev);
4406         struct igb_adapter *adapter = netdev_priv(netdev);
4407
4408         netif_device_detach(netdev);
4409
4410         if (netif_running(netdev))
4411                 igb_down(adapter);
4412         pci_disable_device(pdev);
4413
4414         /* Request a slot slot reset. */
4415         return PCI_ERS_RESULT_NEED_RESET;
4416 }
4417
4418 /**
4419  * igb_io_slot_reset - called after the pci bus has been reset.
4420  * @pdev: Pointer to PCI device
4421  *
4422  * Restart the card from scratch, as if from a cold-boot. Implementation
4423  * resembles the first-half of the igb_resume routine.
4424  */
4425 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
4426 {
4427         struct net_device *netdev = pci_get_drvdata(pdev);
4428         struct igb_adapter *adapter = netdev_priv(netdev);
4429         struct e1000_hw *hw = &adapter->hw;
4430         pci_ers_result_t result;
4431         int err;
4432
4433         if (adapter->need_ioport)
4434                 err = pci_enable_device(pdev);
4435         else
4436                 err = pci_enable_device_mem(pdev);
4437
4438         if (err) {
4439                 dev_err(&pdev->dev,
4440                         "Cannot re-enable PCI device after reset.\n");
4441                 result = PCI_ERS_RESULT_DISCONNECT;
4442         } else {
4443                 pci_set_master(pdev);
4444                 pci_restore_state(pdev);
4445
4446                 pci_enable_wake(pdev, PCI_D3hot, 0);
4447                 pci_enable_wake(pdev, PCI_D3cold, 0);
4448
4449                 igb_reset(adapter);
4450                 wr32(E1000_WUS, ~0);
4451                 result = PCI_ERS_RESULT_RECOVERED;
4452         }
4453
4454         err = pci_cleanup_aer_uncorrect_error_status(pdev);
4455         if (err) {
4456                 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
4457                         "failed 0x%0x\n", err);
4458                 /* non-fatal, continue */
4459         }
4460
4461         return result;
4462 }
4463
4464 /**
4465  * igb_io_resume - called when traffic can start flowing again.
4466  * @pdev: Pointer to PCI device
4467  *
4468  * This callback is called when the error recovery driver tells us that
4469  * its OK to resume normal operation. Implementation resembles the
4470  * second-half of the igb_resume routine.
4471  */
4472 static void igb_io_resume(struct pci_dev *pdev)
4473 {
4474         struct net_device *netdev = pci_get_drvdata(pdev);
4475         struct igb_adapter *adapter = netdev_priv(netdev);
4476
4477         if (netif_running(netdev)) {
4478                 if (igb_up(adapter)) {
4479                         dev_err(&pdev->dev, "igb_up failed after reset\n");
4480                         return;
4481                 }
4482         }
4483
4484         netif_device_attach(netdev);
4485
4486         /* let the f/w know that the h/w is now under the control of the
4487          * driver. */
4488         igb_get_hw_control(adapter);
4489 }
4490
4491 /* igb_main.c */