]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/s2io.c
7c3551bad14cc333fd907985e6af8b5f9fd0420c
[linux-2.6-omap-h63xx.git] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2005 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2 and 3.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     1(MSI), 2(MSI_X). Default value is '0(INTA)'
41  * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  * napi: This parameter used to enable/disable NAPI (polling Rx)
46  *     Possible values '1' for enable and '0' for disable. Default is '1'
47  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48  *      Possible values '1' for enable and '0' for disable. Default is '0'
49  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50  *                 Possible values '1' for enable , '0' for disable.
51  *                 Default is '2' - which means disable in promisc mode
52  *                 and enable in non-promiscuous mode.
53  ************************************************************************/
54
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <net/tcp.h>
76
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
79 #include <asm/io.h>
80 #include <asm/div64.h>
81 #include <asm/irq.h>
82
83 /* local include */
84 #include "s2io.h"
85 #include "s2io-regs.h"
86
87 #define DRV_VERSION "2.0.16.1"
88
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
92
93 static int rxd_size[4] = {32,48,48,64};
94 static int rxd_count[4] = {127,85,85,63};
95
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
97 {
98         int ret;
99
100         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
102
103         return ret;
104 }
105
106 /*
107  * Cards with following subsystem_id have a link state indication
108  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109  * macro below identifies these cards given the subsystem_id.
110  */
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112         (dev_type == XFRAME_I_DEVICE) ?                 \
113                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
115
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
119 #define PANIC   1
120 #define LOW     2
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
122 {
123         struct mac_info *mac_control;
124
125         mac_control = &sp->mac_control;
126         if (rxb_size <= rxd_count[sp->rxd_mode])
127                 return PANIC;
128         else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
129                 return  LOW;
130         return 0;
131 }
132
133 /* Ethtool related variables and Macros. */
134 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
135         "Register test\t(offline)",
136         "Eeprom test\t(offline)",
137         "Link test\t(online)",
138         "RLDRAM test\t(offline)",
139         "BIST Test\t(offline)"
140 };
141
142 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
143         {"tmac_frms"},
144         {"tmac_data_octets"},
145         {"tmac_drop_frms"},
146         {"tmac_mcst_frms"},
147         {"tmac_bcst_frms"},
148         {"tmac_pause_ctrl_frms"},
149         {"tmac_ttl_octets"},
150         {"tmac_ucst_frms"},
151         {"tmac_nucst_frms"},
152         {"tmac_any_err_frms"},
153         {"tmac_ttl_less_fb_octets"},
154         {"tmac_vld_ip_octets"},
155         {"tmac_vld_ip"},
156         {"tmac_drop_ip"},
157         {"tmac_icmp"},
158         {"tmac_rst_tcp"},
159         {"tmac_tcp"},
160         {"tmac_udp"},
161         {"rmac_vld_frms"},
162         {"rmac_data_octets"},
163         {"rmac_fcs_err_frms"},
164         {"rmac_drop_frms"},
165         {"rmac_vld_mcst_frms"},
166         {"rmac_vld_bcst_frms"},
167         {"rmac_in_rng_len_err_frms"},
168         {"rmac_out_rng_len_err_frms"},
169         {"rmac_long_frms"},
170         {"rmac_pause_ctrl_frms"},
171         {"rmac_unsup_ctrl_frms"},
172         {"rmac_ttl_octets"},
173         {"rmac_accepted_ucst_frms"},
174         {"rmac_accepted_nucst_frms"},
175         {"rmac_discarded_frms"},
176         {"rmac_drop_events"},
177         {"rmac_ttl_less_fb_octets"},
178         {"rmac_ttl_frms"},
179         {"rmac_usized_frms"},
180         {"rmac_osized_frms"},
181         {"rmac_frag_frms"},
182         {"rmac_jabber_frms"},
183         {"rmac_ttl_64_frms"},
184         {"rmac_ttl_65_127_frms"},
185         {"rmac_ttl_128_255_frms"},
186         {"rmac_ttl_256_511_frms"},
187         {"rmac_ttl_512_1023_frms"},
188         {"rmac_ttl_1024_1518_frms"},
189         {"rmac_ip"},
190         {"rmac_ip_octets"},
191         {"rmac_hdr_err_ip"},
192         {"rmac_drop_ip"},
193         {"rmac_icmp"},
194         {"rmac_tcp"},
195         {"rmac_udp"},
196         {"rmac_err_drp_udp"},
197         {"rmac_xgmii_err_sym"},
198         {"rmac_frms_q0"},
199         {"rmac_frms_q1"},
200         {"rmac_frms_q2"},
201         {"rmac_frms_q3"},
202         {"rmac_frms_q4"},
203         {"rmac_frms_q5"},
204         {"rmac_frms_q6"},
205         {"rmac_frms_q7"},
206         {"rmac_full_q0"},
207         {"rmac_full_q1"},
208         {"rmac_full_q2"},
209         {"rmac_full_q3"},
210         {"rmac_full_q4"},
211         {"rmac_full_q5"},
212         {"rmac_full_q6"},
213         {"rmac_full_q7"},
214         {"rmac_pause_cnt"},
215         {"rmac_xgmii_data_err_cnt"},
216         {"rmac_xgmii_ctrl_err_cnt"},
217         {"rmac_accepted_ip"},
218         {"rmac_err_tcp"},
219         {"rd_req_cnt"},
220         {"new_rd_req_cnt"},
221         {"new_rd_req_rtry_cnt"},
222         {"rd_rtry_cnt"},
223         {"wr_rtry_rd_ack_cnt"},
224         {"wr_req_cnt"},
225         {"new_wr_req_cnt"},
226         {"new_wr_req_rtry_cnt"},
227         {"wr_rtry_cnt"},
228         {"wr_disc_cnt"},
229         {"rd_rtry_wr_ack_cnt"},
230         {"txp_wr_cnt"},
231         {"txd_rd_cnt"},
232         {"txd_wr_cnt"},
233         {"rxd_rd_cnt"},
234         {"rxd_wr_cnt"},
235         {"txf_rd_cnt"},
236         {"rxf_wr_cnt"},
237         {"rmac_ttl_1519_4095_frms"},
238         {"rmac_ttl_4096_8191_frms"},
239         {"rmac_ttl_8192_max_frms"},
240         {"rmac_ttl_gt_max_frms"},
241         {"rmac_osized_alt_frms"},
242         {"rmac_jabber_alt_frms"},
243         {"rmac_gt_max_alt_frms"},
244         {"rmac_vlan_frms"},
245         {"rmac_len_discard"},
246         {"rmac_fcs_discard"},
247         {"rmac_pf_discard"},
248         {"rmac_da_discard"},
249         {"rmac_red_discard"},
250         {"rmac_rts_discard"},
251         {"rmac_ingm_full_discard"},
252         {"link_fault_cnt"},
253         {"\n DRIVER STATISTICS"},
254         {"single_bit_ecc_errs"},
255         {"double_bit_ecc_errs"},
256         {"parity_err_cnt"},
257         {"serious_err_cnt"},
258         {"soft_reset_cnt"},
259         {"fifo_full_cnt"},
260         {"ring_full_cnt"},
261         ("alarm_transceiver_temp_high"),
262         ("alarm_transceiver_temp_low"),
263         ("alarm_laser_bias_current_high"),
264         ("alarm_laser_bias_current_low"),
265         ("alarm_laser_output_power_high"),
266         ("alarm_laser_output_power_low"),
267         ("warn_transceiver_temp_high"),
268         ("warn_transceiver_temp_low"),
269         ("warn_laser_bias_current_high"),
270         ("warn_laser_bias_current_low"),
271         ("warn_laser_output_power_high"),
272         ("warn_laser_output_power_low"),
273         ("lro_aggregated_pkts"),
274         ("lro_flush_both_count"),
275         ("lro_out_of_sequence_pkts"),
276         ("lro_flush_due_to_max_pkts"),
277         ("lro_avg_aggr_pkts"),
278 };
279
280 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
281 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
282
283 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
284 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
285
286 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
287                         init_timer(&timer);                     \
288                         timer.function = handle;                \
289                         timer.data = (unsigned long) arg;       \
290                         mod_timer(&timer, (jiffies + exp))      \
291
292 /* Add the vlan */
293 static void s2io_vlan_rx_register(struct net_device *dev,
294                                         struct vlan_group *grp)
295 {
296         struct s2io_nic *nic = dev->priv;
297         unsigned long flags;
298
299         spin_lock_irqsave(&nic->tx_lock, flags);
300         nic->vlgrp = grp;
301         spin_unlock_irqrestore(&nic->tx_lock, flags);
302 }
303
304 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
305 int vlan_strip_flag;
306
307 /* Unregister the vlan */
308 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
309 {
310         struct s2io_nic *nic = dev->priv;
311         unsigned long flags;
312
313         spin_lock_irqsave(&nic->tx_lock, flags);
314         if (nic->vlgrp)
315                 nic->vlgrp->vlan_devices[vid] = NULL;
316         spin_unlock_irqrestore(&nic->tx_lock, flags);
317 }
318
319 /*
320  * Constants to be programmed into the Xena's registers, to configure
321  * the XAUI.
322  */
323
324 #define END_SIGN        0x0
325 static const u64 herc_act_dtx_cfg[] = {
326         /* Set address */
327         0x8000051536750000ULL, 0x80000515367500E0ULL,
328         /* Write data */
329         0x8000051536750004ULL, 0x80000515367500E4ULL,
330         /* Set address */
331         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
332         /* Write data */
333         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
334         /* Set address */
335         0x801205150D440000ULL, 0x801205150D4400E0ULL,
336         /* Write data */
337         0x801205150D440004ULL, 0x801205150D4400E4ULL,
338         /* Set address */
339         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
340         /* Write data */
341         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
342         /* Done */
343         END_SIGN
344 };
345
346 static const u64 xena_dtx_cfg[] = {
347         /* Set address */
348         0x8000051500000000ULL, 0x80000515000000E0ULL,
349         /* Write data */
350         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
351         /* Set address */
352         0x8001051500000000ULL, 0x80010515000000E0ULL,
353         /* Write data */
354         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
355         /* Set address */
356         0x8002051500000000ULL, 0x80020515000000E0ULL,
357         /* Write data */
358         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
359         END_SIGN
360 };
361
362 /*
363  * Constants for Fixing the MacAddress problem seen mostly on
364  * Alpha machines.
365  */
366 static const u64 fix_mac[] = {
367         0x0060000000000000ULL, 0x0060600000000000ULL,
368         0x0040600000000000ULL, 0x0000600000000000ULL,
369         0x0020600000000000ULL, 0x0060600000000000ULL,
370         0x0020600000000000ULL, 0x0060600000000000ULL,
371         0x0020600000000000ULL, 0x0060600000000000ULL,
372         0x0020600000000000ULL, 0x0060600000000000ULL,
373         0x0020600000000000ULL, 0x0060600000000000ULL,
374         0x0020600000000000ULL, 0x0060600000000000ULL,
375         0x0020600000000000ULL, 0x0060600000000000ULL,
376         0x0020600000000000ULL, 0x0060600000000000ULL,
377         0x0020600000000000ULL, 0x0060600000000000ULL,
378         0x0020600000000000ULL, 0x0060600000000000ULL,
379         0x0020600000000000ULL, 0x0000600000000000ULL,
380         0x0040600000000000ULL, 0x0060600000000000ULL,
381         END_SIGN
382 };
383
384 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
385 MODULE_LICENSE("GPL");
386 MODULE_VERSION(DRV_VERSION);
387
388
389 /* Module Loadable parameters. */
390 S2IO_PARM_INT(tx_fifo_num, 1);
391 S2IO_PARM_INT(rx_ring_num, 1);
392
393
394 S2IO_PARM_INT(rx_ring_mode, 1);
395 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
396 S2IO_PARM_INT(rmac_pause_time, 0x100);
397 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
398 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
399 S2IO_PARM_INT(shared_splits, 0);
400 S2IO_PARM_INT(tmac_util_period, 5);
401 S2IO_PARM_INT(rmac_util_period, 5);
402 S2IO_PARM_INT(bimodal, 0);
403 S2IO_PARM_INT(l3l4hdr_size, 128);
404 /* Frequency of Rx desc syncs expressed as power of 2 */
405 S2IO_PARM_INT(rxsync_frequency, 3);
406 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
407 S2IO_PARM_INT(intr_type, 0);
408 /* Large receive offload feature */
409 S2IO_PARM_INT(lro, 0);
410 /* Max pkts to be aggregated by LRO at one time. If not specified,
411  * aggregation happens until we hit max IP pkt size(64K)
412  */
413 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
414 S2IO_PARM_INT(indicate_max_pkts, 0);
415
416 S2IO_PARM_INT(napi, 1);
417 S2IO_PARM_INT(ufo, 0);
418 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
419
420 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
421     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
422 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
423     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
424 static unsigned int rts_frm_len[MAX_RX_RINGS] =
425     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
426
427 module_param_array(tx_fifo_len, uint, NULL, 0);
428 module_param_array(rx_ring_sz, uint, NULL, 0);
429 module_param_array(rts_frm_len, uint, NULL, 0);
430
431 /*
432  * S2IO device table.
433  * This table lists all the devices that this driver supports.
434  */
435 static struct pci_device_id s2io_tbl[] __devinitdata = {
436         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
437          PCI_ANY_ID, PCI_ANY_ID},
438         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
439          PCI_ANY_ID, PCI_ANY_ID},
440         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
441          PCI_ANY_ID, PCI_ANY_ID},
442         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
443          PCI_ANY_ID, PCI_ANY_ID},
444         {0,}
445 };
446
447 MODULE_DEVICE_TABLE(pci, s2io_tbl);
448
449 static struct pci_driver s2io_driver = {
450       .name = "S2IO",
451       .id_table = s2io_tbl,
452       .probe = s2io_init_nic,
453       .remove = __devexit_p(s2io_rem_nic),
454 };
455
456 /* A simplifier macro used both by init and free shared_mem Fns(). */
457 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
458
459 /**
460  * init_shared_mem - Allocation and Initialization of Memory
461  * @nic: Device private variable.
462  * Description: The function allocates all the memory areas shared
463  * between the NIC and the driver. This includes Tx descriptors,
464  * Rx descriptors and the statistics block.
465  */
466
467 static int init_shared_mem(struct s2io_nic *nic)
468 {
469         u32 size;
470         void *tmp_v_addr, *tmp_v_addr_next;
471         dma_addr_t tmp_p_addr, tmp_p_addr_next;
472         struct RxD_block *pre_rxd_blk = NULL;
473         int i, j, blk_cnt;
474         int lst_size, lst_per_page;
475         struct net_device *dev = nic->dev;
476         unsigned long tmp;
477         struct buffAdd *ba;
478
479         struct mac_info *mac_control;
480         struct config_param *config;
481
482         mac_control = &nic->mac_control;
483         config = &nic->config;
484
485
486         /* Allocation and initialization of TXDLs in FIOFs */
487         size = 0;
488         for (i = 0; i < config->tx_fifo_num; i++) {
489                 size += config->tx_cfg[i].fifo_len;
490         }
491         if (size > MAX_AVAILABLE_TXDS) {
492                 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
493                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
494                 return -EINVAL;
495         }
496
497         lst_size = (sizeof(struct TxD) * config->max_txds);
498         lst_per_page = PAGE_SIZE / lst_size;
499
500         for (i = 0; i < config->tx_fifo_num; i++) {
501                 int fifo_len = config->tx_cfg[i].fifo_len;
502                 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
503                 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
504                                                           GFP_KERNEL);
505                 if (!mac_control->fifos[i].list_info) {
506                         DBG_PRINT(ERR_DBG,
507                                   "Malloc failed for list_info\n");
508                         return -ENOMEM;
509                 }
510                 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
511         }
512         for (i = 0; i < config->tx_fifo_num; i++) {
513                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
514                                                 lst_per_page);
515                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
516                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
517                     config->tx_cfg[i].fifo_len - 1;
518                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
519                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
520                     config->tx_cfg[i].fifo_len - 1;
521                 mac_control->fifos[i].fifo_no = i;
522                 mac_control->fifos[i].nic = nic;
523                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
524
525                 for (j = 0; j < page_num; j++) {
526                         int k = 0;
527                         dma_addr_t tmp_p;
528                         void *tmp_v;
529                         tmp_v = pci_alloc_consistent(nic->pdev,
530                                                      PAGE_SIZE, &tmp_p);
531                         if (!tmp_v) {
532                                 DBG_PRINT(ERR_DBG,
533                                           "pci_alloc_consistent ");
534                                 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
535                                 return -ENOMEM;
536                         }
537                         /* If we got a zero DMA address(can happen on
538                          * certain platforms like PPC), reallocate.
539                          * Store virtual address of page we don't want,
540                          * to be freed later.
541                          */
542                         if (!tmp_p) {
543                                 mac_control->zerodma_virt_addr = tmp_v;
544                                 DBG_PRINT(INIT_DBG,
545                                 "%s: Zero DMA address for TxDL. ", dev->name);
546                                 DBG_PRINT(INIT_DBG,
547                                 "Virtual address %p\n", tmp_v);
548                                 tmp_v = pci_alloc_consistent(nic->pdev,
549                                                      PAGE_SIZE, &tmp_p);
550                                 if (!tmp_v) {
551                                         DBG_PRINT(ERR_DBG,
552                                           "pci_alloc_consistent ");
553                                         DBG_PRINT(ERR_DBG, "failed for TxDL\n");
554                                         return -ENOMEM;
555                                 }
556                         }
557                         while (k < lst_per_page) {
558                                 int l = (j * lst_per_page) + k;
559                                 if (l == config->tx_cfg[i].fifo_len)
560                                         break;
561                                 mac_control->fifos[i].list_info[l].list_virt_addr =
562                                     tmp_v + (k * lst_size);
563                                 mac_control->fifos[i].list_info[l].list_phy_addr =
564                                     tmp_p + (k * lst_size);
565                                 k++;
566                         }
567                 }
568         }
569
570         nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
571         if (!nic->ufo_in_band_v)
572                 return -ENOMEM;
573
574         /* Allocation and initialization of RXDs in Rings */
575         size = 0;
576         for (i = 0; i < config->rx_ring_num; i++) {
577                 if (config->rx_cfg[i].num_rxd %
578                     (rxd_count[nic->rxd_mode] + 1)) {
579                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
580                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
581                                   i);
582                         DBG_PRINT(ERR_DBG, "RxDs per Block");
583                         return FAILURE;
584                 }
585                 size += config->rx_cfg[i].num_rxd;
586                 mac_control->rings[i].block_count =
587                         config->rx_cfg[i].num_rxd /
588                         (rxd_count[nic->rxd_mode] + 1 );
589                 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
590                         mac_control->rings[i].block_count;
591         }
592         if (nic->rxd_mode == RXD_MODE_1)
593                 size = (size * (sizeof(struct RxD1)));
594         else
595                 size = (size * (sizeof(struct RxD3)));
596
597         for (i = 0; i < config->rx_ring_num; i++) {
598                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
599                 mac_control->rings[i].rx_curr_get_info.offset = 0;
600                 mac_control->rings[i].rx_curr_get_info.ring_len =
601                     config->rx_cfg[i].num_rxd - 1;
602                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
603                 mac_control->rings[i].rx_curr_put_info.offset = 0;
604                 mac_control->rings[i].rx_curr_put_info.ring_len =
605                     config->rx_cfg[i].num_rxd - 1;
606                 mac_control->rings[i].nic = nic;
607                 mac_control->rings[i].ring_no = i;
608
609                 blk_cnt = config->rx_cfg[i].num_rxd /
610                                 (rxd_count[nic->rxd_mode] + 1);
611                 /*  Allocating all the Rx blocks */
612                 for (j = 0; j < blk_cnt; j++) {
613                         struct rx_block_info *rx_blocks;
614                         int l;
615
616                         rx_blocks = &mac_control->rings[i].rx_blocks[j];
617                         size = SIZE_OF_BLOCK; //size is always page size
618                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
619                                                           &tmp_p_addr);
620                         if (tmp_v_addr == NULL) {
621                                 /*
622                                  * In case of failure, free_shared_mem()
623                                  * is called, which should free any
624                                  * memory that was alloced till the
625                                  * failure happened.
626                                  */
627                                 rx_blocks->block_virt_addr = tmp_v_addr;
628                                 return -ENOMEM;
629                         }
630                         memset(tmp_v_addr, 0, size);
631                         rx_blocks->block_virt_addr = tmp_v_addr;
632                         rx_blocks->block_dma_addr = tmp_p_addr;
633                         rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
634                                                   rxd_count[nic->rxd_mode],
635                                                   GFP_KERNEL);
636                         if (!rx_blocks->rxds)
637                                 return -ENOMEM;
638                         for (l=0; l<rxd_count[nic->rxd_mode];l++) {
639                                 rx_blocks->rxds[l].virt_addr =
640                                         rx_blocks->block_virt_addr +
641                                         (rxd_size[nic->rxd_mode] * l);
642                                 rx_blocks->rxds[l].dma_addr =
643                                         rx_blocks->block_dma_addr +
644                                         (rxd_size[nic->rxd_mode] * l);
645                         }
646                 }
647                 /* Interlinking all Rx Blocks */
648                 for (j = 0; j < blk_cnt; j++) {
649                         tmp_v_addr =
650                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
651                         tmp_v_addr_next =
652                                 mac_control->rings[i].rx_blocks[(j + 1) %
653                                               blk_cnt].block_virt_addr;
654                         tmp_p_addr =
655                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
656                         tmp_p_addr_next =
657                                 mac_control->rings[i].rx_blocks[(j + 1) %
658                                               blk_cnt].block_dma_addr;
659
660                         pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
661                         pre_rxd_blk->reserved_2_pNext_RxD_block =
662                             (unsigned long) tmp_v_addr_next;
663                         pre_rxd_blk->pNext_RxD_Blk_physical =
664                             (u64) tmp_p_addr_next;
665                 }
666         }
667         if (nic->rxd_mode >= RXD_MODE_3A) {
668                 /*
669                  * Allocation of Storages for buffer addresses in 2BUFF mode
670                  * and the buffers as well.
671                  */
672                 for (i = 0; i < config->rx_ring_num; i++) {
673                         blk_cnt = config->rx_cfg[i].num_rxd /
674                            (rxd_count[nic->rxd_mode]+ 1);
675                         mac_control->rings[i].ba =
676                                 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
677                                      GFP_KERNEL);
678                         if (!mac_control->rings[i].ba)
679                                 return -ENOMEM;
680                         for (j = 0; j < blk_cnt; j++) {
681                                 int k = 0;
682                                 mac_control->rings[i].ba[j] =
683                                         kmalloc((sizeof(struct buffAdd) *
684                                                 (rxd_count[nic->rxd_mode] + 1)),
685                                                 GFP_KERNEL);
686                                 if (!mac_control->rings[i].ba[j])
687                                         return -ENOMEM;
688                                 while (k != rxd_count[nic->rxd_mode]) {
689                                         ba = &mac_control->rings[i].ba[j][k];
690
691                                         ba->ba_0_org = (void *) kmalloc
692                                             (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
693                                         if (!ba->ba_0_org)
694                                                 return -ENOMEM;
695                                         tmp = (unsigned long)ba->ba_0_org;
696                                         tmp += ALIGN_SIZE;
697                                         tmp &= ~((unsigned long) ALIGN_SIZE);
698                                         ba->ba_0 = (void *) tmp;
699
700                                         ba->ba_1_org = (void *) kmalloc
701                                             (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
702                                         if (!ba->ba_1_org)
703                                                 return -ENOMEM;
704                                         tmp = (unsigned long) ba->ba_1_org;
705                                         tmp += ALIGN_SIZE;
706                                         tmp &= ~((unsigned long) ALIGN_SIZE);
707                                         ba->ba_1 = (void *) tmp;
708                                         k++;
709                                 }
710                         }
711                 }
712         }
713
714         /* Allocation and initialization of Statistics block */
715         size = sizeof(struct stat_block);
716         mac_control->stats_mem = pci_alloc_consistent
717             (nic->pdev, size, &mac_control->stats_mem_phy);
718
719         if (!mac_control->stats_mem) {
720                 /*
721                  * In case of failure, free_shared_mem() is called, which
722                  * should free any memory that was alloced till the
723                  * failure happened.
724                  */
725                 return -ENOMEM;
726         }
727         mac_control->stats_mem_sz = size;
728
729         tmp_v_addr = mac_control->stats_mem;
730         mac_control->stats_info = (struct stat_block *) tmp_v_addr;
731         memset(tmp_v_addr, 0, size);
732         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
733                   (unsigned long long) tmp_p_addr);
734
735         return SUCCESS;
736 }
737
738 /**
739  * free_shared_mem - Free the allocated Memory
740  * @nic:  Device private variable.
741  * Description: This function is to free all memory locations allocated by
742  * the init_shared_mem() function and return it to the kernel.
743  */
744
745 static void free_shared_mem(struct s2io_nic *nic)
746 {
747         int i, j, blk_cnt, size;
748         void *tmp_v_addr;
749         dma_addr_t tmp_p_addr;
750         struct mac_info *mac_control;
751         struct config_param *config;
752         int lst_size, lst_per_page;
753         struct net_device *dev = nic->dev;
754
755         if (!nic)
756                 return;
757
758         mac_control = &nic->mac_control;
759         config = &nic->config;
760
761         lst_size = (sizeof(struct TxD) * config->max_txds);
762         lst_per_page = PAGE_SIZE / lst_size;
763
764         for (i = 0; i < config->tx_fifo_num; i++) {
765                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
766                                                 lst_per_page);
767                 for (j = 0; j < page_num; j++) {
768                         int mem_blks = (j * lst_per_page);
769                         if (!mac_control->fifos[i].list_info)
770                                 return;
771                         if (!mac_control->fifos[i].list_info[mem_blks].
772                                  list_virt_addr)
773                                 break;
774                         pci_free_consistent(nic->pdev, PAGE_SIZE,
775                                             mac_control->fifos[i].
776                                             list_info[mem_blks].
777                                             list_virt_addr,
778                                             mac_control->fifos[i].
779                                             list_info[mem_blks].
780                                             list_phy_addr);
781                 }
782                 /* If we got a zero DMA address during allocation,
783                  * free the page now
784                  */
785                 if (mac_control->zerodma_virt_addr) {
786                         pci_free_consistent(nic->pdev, PAGE_SIZE,
787                                             mac_control->zerodma_virt_addr,
788                                             (dma_addr_t)0);
789                         DBG_PRINT(INIT_DBG,
790                                 "%s: Freeing TxDL with zero DMA addr. ",
791                                 dev->name);
792                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
793                                 mac_control->zerodma_virt_addr);
794                 }
795                 kfree(mac_control->fifos[i].list_info);
796         }
797
798         size = SIZE_OF_BLOCK;
799         for (i = 0; i < config->rx_ring_num; i++) {
800                 blk_cnt = mac_control->rings[i].block_count;
801                 for (j = 0; j < blk_cnt; j++) {
802                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
803                                 block_virt_addr;
804                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
805                                 block_dma_addr;
806                         if (tmp_v_addr == NULL)
807                                 break;
808                         pci_free_consistent(nic->pdev, size,
809                                             tmp_v_addr, tmp_p_addr);
810                         kfree(mac_control->rings[i].rx_blocks[j].rxds);
811                 }
812         }
813
814         if (nic->rxd_mode >= RXD_MODE_3A) {
815                 /* Freeing buffer storage addresses in 2BUFF mode. */
816                 for (i = 0; i < config->rx_ring_num; i++) {
817                         blk_cnt = config->rx_cfg[i].num_rxd /
818                             (rxd_count[nic->rxd_mode] + 1);
819                         for (j = 0; j < blk_cnt; j++) {
820                                 int k = 0;
821                                 if (!mac_control->rings[i].ba[j])
822                                         continue;
823                                 while (k != rxd_count[nic->rxd_mode]) {
824                                         struct buffAdd *ba =
825                                                 &mac_control->rings[i].ba[j][k];
826                                         kfree(ba->ba_0_org);
827                                         kfree(ba->ba_1_org);
828                                         k++;
829                                 }
830                                 kfree(mac_control->rings[i].ba[j]);
831                         }
832                         kfree(mac_control->rings[i].ba);
833                 }
834         }
835
836         if (mac_control->stats_mem) {
837                 pci_free_consistent(nic->pdev,
838                                     mac_control->stats_mem_sz,
839                                     mac_control->stats_mem,
840                                     mac_control->stats_mem_phy);
841         }
842         if (nic->ufo_in_band_v)
843                 kfree(nic->ufo_in_band_v);
844 }
845
846 /**
847  * s2io_verify_pci_mode -
848  */
849
850 static int s2io_verify_pci_mode(struct s2io_nic *nic)
851 {
852         struct XENA_dev_config __iomem *bar0 = nic->bar0;
853         register u64 val64 = 0;
854         int     mode;
855
856         val64 = readq(&bar0->pci_mode);
857         mode = (u8)GET_PCI_MODE(val64);
858
859         if ( val64 & PCI_MODE_UNKNOWN_MODE)
860                 return -1;      /* Unknown PCI mode */
861         return mode;
862 }
863
864 #define NEC_VENID   0x1033
865 #define NEC_DEVID   0x0125
866 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
867 {
868         struct pci_dev *tdev = NULL;
869         while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
870                 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
871                         if (tdev->bus == s2io_pdev->bus->parent)
872                                 pci_dev_put(tdev);
873                                 return 1;
874                 }
875         }
876         return 0;
877 }
878
879 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
880 /**
881  * s2io_print_pci_mode -
882  */
883 static int s2io_print_pci_mode(struct s2io_nic *nic)
884 {
885         struct XENA_dev_config __iomem *bar0 = nic->bar0;
886         register u64 val64 = 0;
887         int     mode;
888         struct config_param *config = &nic->config;
889
890         val64 = readq(&bar0->pci_mode);
891         mode = (u8)GET_PCI_MODE(val64);
892
893         if ( val64 & PCI_MODE_UNKNOWN_MODE)
894                 return -1;      /* Unknown PCI mode */
895
896         config->bus_speed = bus_speed[mode];
897
898         if (s2io_on_nec_bridge(nic->pdev)) {
899                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
900                                                         nic->dev->name);
901                 return mode;
902         }
903
904         if (val64 & PCI_MODE_32_BITS) {
905                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
906         } else {
907                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
908         }
909
910         switch(mode) {
911                 case PCI_MODE_PCI_33:
912                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
913                         break;
914                 case PCI_MODE_PCI_66:
915                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
916                         break;
917                 case PCI_MODE_PCIX_M1_66:
918                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
919                         break;
920                 case PCI_MODE_PCIX_M1_100:
921                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
922                         break;
923                 case PCI_MODE_PCIX_M1_133:
924                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
925                         break;
926                 case PCI_MODE_PCIX_M2_66:
927                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
928                         break;
929                 case PCI_MODE_PCIX_M2_100:
930                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
931                         break;
932                 case PCI_MODE_PCIX_M2_133:
933                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
934                         break;
935                 default:
936                         return -1;      /* Unsupported bus speed */
937         }
938
939         return mode;
940 }
941
942 /**
943  *  init_nic - Initialization of hardware
944  *  @nic: device peivate variable
945  *  Description: The function sequentially configures every block
946  *  of the H/W from their reset values.
947  *  Return Value:  SUCCESS on success and
948  *  '-1' on failure (endian settings incorrect).
949  */
950
951 static int init_nic(struct s2io_nic *nic)
952 {
953         struct XENA_dev_config __iomem *bar0 = nic->bar0;
954         struct net_device *dev = nic->dev;
955         register u64 val64 = 0;
956         void __iomem *add;
957         u32 time;
958         int i, j;
959         struct mac_info *mac_control;
960         struct config_param *config;
961         int dtx_cnt = 0;
962         unsigned long long mem_share;
963         int mem_size;
964
965         mac_control = &nic->mac_control;
966         config = &nic->config;
967
968         /* to set the swapper controle on the card */
969         if(s2io_set_swapper(nic)) {
970                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
971                 return -1;
972         }
973
974         /*
975          * Herc requires EOI to be removed from reset before XGXS, so..
976          */
977         if (nic->device_type & XFRAME_II_DEVICE) {
978                 val64 = 0xA500000000ULL;
979                 writeq(val64, &bar0->sw_reset);
980                 msleep(500);
981                 val64 = readq(&bar0->sw_reset);
982         }
983
984         /* Remove XGXS from reset state */
985         val64 = 0;
986         writeq(val64, &bar0->sw_reset);
987         msleep(500);
988         val64 = readq(&bar0->sw_reset);
989
990         /*  Enable Receiving broadcasts */
991         add = &bar0->mac_cfg;
992         val64 = readq(&bar0->mac_cfg);
993         val64 |= MAC_RMAC_BCAST_ENABLE;
994         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
995         writel((u32) val64, add);
996         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
997         writel((u32) (val64 >> 32), (add + 4));
998
999         /* Read registers in all blocks */
1000         val64 = readq(&bar0->mac_int_mask);
1001         val64 = readq(&bar0->mc_int_mask);
1002         val64 = readq(&bar0->xgxs_int_mask);
1003
1004         /*  Set MTU */
1005         val64 = dev->mtu;
1006         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1007
1008         if (nic->device_type & XFRAME_II_DEVICE) {
1009                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1010                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1011                                           &bar0->dtx_control, UF);
1012                         if (dtx_cnt & 0x1)
1013                                 msleep(1); /* Necessary!! */
1014                         dtx_cnt++;
1015                 }
1016         } else {
1017                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1018                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1019                                           &bar0->dtx_control, UF);
1020                         val64 = readq(&bar0->dtx_control);
1021                         dtx_cnt++;
1022                 }
1023         }
1024
1025         /*  Tx DMA Initialization */
1026         val64 = 0;
1027         writeq(val64, &bar0->tx_fifo_partition_0);
1028         writeq(val64, &bar0->tx_fifo_partition_1);
1029         writeq(val64, &bar0->tx_fifo_partition_2);
1030         writeq(val64, &bar0->tx_fifo_partition_3);
1031
1032
1033         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1034                 val64 |=
1035                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1036                          13) | vBIT(config->tx_cfg[i].fifo_priority,
1037                                     ((i * 32) + 5), 3);
1038
1039                 if (i == (config->tx_fifo_num - 1)) {
1040                         if (i % 2 == 0)
1041                                 i++;
1042                 }
1043
1044                 switch (i) {
1045                 case 1:
1046                         writeq(val64, &bar0->tx_fifo_partition_0);
1047                         val64 = 0;
1048                         break;
1049                 case 3:
1050                         writeq(val64, &bar0->tx_fifo_partition_1);
1051                         val64 = 0;
1052                         break;
1053                 case 5:
1054                         writeq(val64, &bar0->tx_fifo_partition_2);
1055                         val64 = 0;
1056                         break;
1057                 case 7:
1058                         writeq(val64, &bar0->tx_fifo_partition_3);
1059                         break;
1060                 }
1061         }
1062
1063         /*
1064          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1065          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1066          */
1067         if ((nic->device_type == XFRAME_I_DEVICE) &&
1068                 (get_xena_rev_id(nic->pdev) < 4))
1069                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1070
1071         val64 = readq(&bar0->tx_fifo_partition_0);
1072         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1073                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1074
1075         /*
1076          * Initialization of Tx_PA_CONFIG register to ignore packet
1077          * integrity checking.
1078          */
1079         val64 = readq(&bar0->tx_pa_cfg);
1080         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1081             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1082         writeq(val64, &bar0->tx_pa_cfg);
1083
1084         /* Rx DMA intialization. */
1085         val64 = 0;
1086         for (i = 0; i < config->rx_ring_num; i++) {
1087                 val64 |=
1088                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1089                          3);
1090         }
1091         writeq(val64, &bar0->rx_queue_priority);
1092
1093         /*
1094          * Allocating equal share of memory to all the
1095          * configured Rings.
1096          */
1097         val64 = 0;
1098         if (nic->device_type & XFRAME_II_DEVICE)
1099                 mem_size = 32;
1100         else
1101                 mem_size = 64;
1102
1103         for (i = 0; i < config->rx_ring_num; i++) {
1104                 switch (i) {
1105                 case 0:
1106                         mem_share = (mem_size / config->rx_ring_num +
1107                                      mem_size % config->rx_ring_num);
1108                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1109                         continue;
1110                 case 1:
1111                         mem_share = (mem_size / config->rx_ring_num);
1112                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1113                         continue;
1114                 case 2:
1115                         mem_share = (mem_size / config->rx_ring_num);
1116                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1117                         continue;
1118                 case 3:
1119                         mem_share = (mem_size / config->rx_ring_num);
1120                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1121                         continue;
1122                 case 4:
1123                         mem_share = (mem_size / config->rx_ring_num);
1124                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1125                         continue;
1126                 case 5:
1127                         mem_share = (mem_size / config->rx_ring_num);
1128                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1129                         continue;
1130                 case 6:
1131                         mem_share = (mem_size / config->rx_ring_num);
1132                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1133                         continue;
1134                 case 7:
1135                         mem_share = (mem_size / config->rx_ring_num);
1136                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1137                         continue;
1138                 }
1139         }
1140         writeq(val64, &bar0->rx_queue_cfg);
1141
1142         /*
1143          * Filling Tx round robin registers
1144          * as per the number of FIFOs
1145          */
1146         switch (config->tx_fifo_num) {
1147         case 1:
1148                 val64 = 0x0000000000000000ULL;
1149                 writeq(val64, &bar0->tx_w_round_robin_0);
1150                 writeq(val64, &bar0->tx_w_round_robin_1);
1151                 writeq(val64, &bar0->tx_w_round_robin_2);
1152                 writeq(val64, &bar0->tx_w_round_robin_3);
1153                 writeq(val64, &bar0->tx_w_round_robin_4);
1154                 break;
1155         case 2:
1156                 val64 = 0x0000010000010000ULL;
1157                 writeq(val64, &bar0->tx_w_round_robin_0);
1158                 val64 = 0x0100000100000100ULL;
1159                 writeq(val64, &bar0->tx_w_round_robin_1);
1160                 val64 = 0x0001000001000001ULL;
1161                 writeq(val64, &bar0->tx_w_round_robin_2);
1162                 val64 = 0x0000010000010000ULL;
1163                 writeq(val64, &bar0->tx_w_round_robin_3);
1164                 val64 = 0x0100000000000000ULL;
1165                 writeq(val64, &bar0->tx_w_round_robin_4);
1166                 break;
1167         case 3:
1168                 val64 = 0x0001000102000001ULL;
1169                 writeq(val64, &bar0->tx_w_round_robin_0);
1170                 val64 = 0x0001020000010001ULL;
1171                 writeq(val64, &bar0->tx_w_round_robin_1);
1172                 val64 = 0x0200000100010200ULL;
1173                 writeq(val64, &bar0->tx_w_round_robin_2);
1174                 val64 = 0x0001000102000001ULL;
1175                 writeq(val64, &bar0->tx_w_round_robin_3);
1176                 val64 = 0x0001020000000000ULL;
1177                 writeq(val64, &bar0->tx_w_round_robin_4);
1178                 break;
1179         case 4:
1180                 val64 = 0x0001020300010200ULL;
1181                 writeq(val64, &bar0->tx_w_round_robin_0);
1182                 val64 = 0x0100000102030001ULL;
1183                 writeq(val64, &bar0->tx_w_round_robin_1);
1184                 val64 = 0x0200010000010203ULL;
1185                 writeq(val64, &bar0->tx_w_round_robin_2);
1186                 val64 = 0x0001020001000001ULL;
1187                 writeq(val64, &bar0->tx_w_round_robin_3);
1188                 val64 = 0x0203000100000000ULL;
1189                 writeq(val64, &bar0->tx_w_round_robin_4);
1190                 break;
1191         case 5:
1192                 val64 = 0x0001000203000102ULL;
1193                 writeq(val64, &bar0->tx_w_round_robin_0);
1194                 val64 = 0x0001020001030004ULL;
1195                 writeq(val64, &bar0->tx_w_round_robin_1);
1196                 val64 = 0x0001000203000102ULL;
1197                 writeq(val64, &bar0->tx_w_round_robin_2);
1198                 val64 = 0x0001020001030004ULL;
1199                 writeq(val64, &bar0->tx_w_round_robin_3);
1200                 val64 = 0x0001000000000000ULL;
1201                 writeq(val64, &bar0->tx_w_round_robin_4);
1202                 break;
1203         case 6:
1204                 val64 = 0x0001020304000102ULL;
1205                 writeq(val64, &bar0->tx_w_round_robin_0);
1206                 val64 = 0x0304050001020001ULL;
1207                 writeq(val64, &bar0->tx_w_round_robin_1);
1208                 val64 = 0x0203000100000102ULL;
1209                 writeq(val64, &bar0->tx_w_round_robin_2);
1210                 val64 = 0x0304000102030405ULL;
1211                 writeq(val64, &bar0->tx_w_round_robin_3);
1212                 val64 = 0x0001000200000000ULL;
1213                 writeq(val64, &bar0->tx_w_round_robin_4);
1214                 break;
1215         case 7:
1216                 val64 = 0x0001020001020300ULL;
1217                 writeq(val64, &bar0->tx_w_round_robin_0);
1218                 val64 = 0x0102030400010203ULL;
1219                 writeq(val64, &bar0->tx_w_round_robin_1);
1220                 val64 = 0x0405060001020001ULL;
1221                 writeq(val64, &bar0->tx_w_round_robin_2);
1222                 val64 = 0x0304050000010200ULL;
1223                 writeq(val64, &bar0->tx_w_round_robin_3);
1224                 val64 = 0x0102030000000000ULL;
1225                 writeq(val64, &bar0->tx_w_round_robin_4);
1226                 break;
1227         case 8:
1228                 val64 = 0x0001020300040105ULL;
1229                 writeq(val64, &bar0->tx_w_round_robin_0);
1230                 val64 = 0x0200030106000204ULL;
1231                 writeq(val64, &bar0->tx_w_round_robin_1);
1232                 val64 = 0x0103000502010007ULL;
1233                 writeq(val64, &bar0->tx_w_round_robin_2);
1234                 val64 = 0x0304010002060500ULL;
1235                 writeq(val64, &bar0->tx_w_round_robin_3);
1236                 val64 = 0x0103020400000000ULL;
1237                 writeq(val64, &bar0->tx_w_round_robin_4);
1238                 break;
1239         }
1240
1241         /* Enable all configured Tx FIFO partitions */
1242         val64 = readq(&bar0->tx_fifo_partition_0);
1243         val64 |= (TX_FIFO_PARTITION_EN);
1244         writeq(val64, &bar0->tx_fifo_partition_0);
1245
1246         /* Filling the Rx round robin registers as per the
1247          * number of Rings and steering based on QoS.
1248          */
1249         switch (config->rx_ring_num) {
1250         case 1:
1251                 val64 = 0x8080808080808080ULL;
1252                 writeq(val64, &bar0->rts_qos_steering);
1253                 break;
1254         case 2:
1255                 val64 = 0x0000010000010000ULL;
1256                 writeq(val64, &bar0->rx_w_round_robin_0);
1257                 val64 = 0x0100000100000100ULL;
1258                 writeq(val64, &bar0->rx_w_round_robin_1);
1259                 val64 = 0x0001000001000001ULL;
1260                 writeq(val64, &bar0->rx_w_round_robin_2);
1261                 val64 = 0x0000010000010000ULL;
1262                 writeq(val64, &bar0->rx_w_round_robin_3);
1263                 val64 = 0x0100000000000000ULL;
1264                 writeq(val64, &bar0->rx_w_round_robin_4);
1265
1266                 val64 = 0x8080808040404040ULL;
1267                 writeq(val64, &bar0->rts_qos_steering);
1268                 break;
1269         case 3:
1270                 val64 = 0x0001000102000001ULL;
1271                 writeq(val64, &bar0->rx_w_round_robin_0);
1272                 val64 = 0x0001020000010001ULL;
1273                 writeq(val64, &bar0->rx_w_round_robin_1);
1274                 val64 = 0x0200000100010200ULL;
1275                 writeq(val64, &bar0->rx_w_round_robin_2);
1276                 val64 = 0x0001000102000001ULL;
1277                 writeq(val64, &bar0->rx_w_round_robin_3);
1278                 val64 = 0x0001020000000000ULL;
1279                 writeq(val64, &bar0->rx_w_round_robin_4);
1280
1281                 val64 = 0x8080804040402020ULL;
1282                 writeq(val64, &bar0->rts_qos_steering);
1283                 break;
1284         case 4:
1285                 val64 = 0x0001020300010200ULL;
1286                 writeq(val64, &bar0->rx_w_round_robin_0);
1287                 val64 = 0x0100000102030001ULL;
1288                 writeq(val64, &bar0->rx_w_round_robin_1);
1289                 val64 = 0x0200010000010203ULL;
1290                 writeq(val64, &bar0->rx_w_round_robin_2);
1291                 val64 = 0x0001020001000001ULL;
1292                 writeq(val64, &bar0->rx_w_round_robin_3);
1293                 val64 = 0x0203000100000000ULL;
1294                 writeq(val64, &bar0->rx_w_round_robin_4);
1295
1296                 val64 = 0x8080404020201010ULL;
1297                 writeq(val64, &bar0->rts_qos_steering);
1298                 break;
1299         case 5:
1300                 val64 = 0x0001000203000102ULL;
1301                 writeq(val64, &bar0->rx_w_round_robin_0);
1302                 val64 = 0x0001020001030004ULL;
1303                 writeq(val64, &bar0->rx_w_round_robin_1);
1304                 val64 = 0x0001000203000102ULL;
1305                 writeq(val64, &bar0->rx_w_round_robin_2);
1306                 val64 = 0x0001020001030004ULL;
1307                 writeq(val64, &bar0->rx_w_round_robin_3);
1308                 val64 = 0x0001000000000000ULL;
1309                 writeq(val64, &bar0->rx_w_round_robin_4);
1310
1311                 val64 = 0x8080404020201008ULL;
1312                 writeq(val64, &bar0->rts_qos_steering);
1313                 break;
1314         case 6:
1315                 val64 = 0x0001020304000102ULL;
1316                 writeq(val64, &bar0->rx_w_round_robin_0);
1317                 val64 = 0x0304050001020001ULL;
1318                 writeq(val64, &bar0->rx_w_round_robin_1);
1319                 val64 = 0x0203000100000102ULL;
1320                 writeq(val64, &bar0->rx_w_round_robin_2);
1321                 val64 = 0x0304000102030405ULL;
1322                 writeq(val64, &bar0->rx_w_round_robin_3);
1323                 val64 = 0x0001000200000000ULL;
1324                 writeq(val64, &bar0->rx_w_round_robin_4);
1325
1326                 val64 = 0x8080404020100804ULL;
1327                 writeq(val64, &bar0->rts_qos_steering);
1328                 break;
1329         case 7:
1330                 val64 = 0x0001020001020300ULL;
1331                 writeq(val64, &bar0->rx_w_round_robin_0);
1332                 val64 = 0x0102030400010203ULL;
1333                 writeq(val64, &bar0->rx_w_round_robin_1);
1334                 val64 = 0x0405060001020001ULL;
1335                 writeq(val64, &bar0->rx_w_round_robin_2);
1336                 val64 = 0x0304050000010200ULL;
1337                 writeq(val64, &bar0->rx_w_round_robin_3);
1338                 val64 = 0x0102030000000000ULL;
1339                 writeq(val64, &bar0->rx_w_round_robin_4);
1340
1341                 val64 = 0x8080402010080402ULL;
1342                 writeq(val64, &bar0->rts_qos_steering);
1343                 break;
1344         case 8:
1345                 val64 = 0x0001020300040105ULL;
1346                 writeq(val64, &bar0->rx_w_round_robin_0);
1347                 val64 = 0x0200030106000204ULL;
1348                 writeq(val64, &bar0->rx_w_round_robin_1);
1349                 val64 = 0x0103000502010007ULL;
1350                 writeq(val64, &bar0->rx_w_round_robin_2);
1351                 val64 = 0x0304010002060500ULL;
1352                 writeq(val64, &bar0->rx_w_round_robin_3);
1353                 val64 = 0x0103020400000000ULL;
1354                 writeq(val64, &bar0->rx_w_round_robin_4);
1355
1356                 val64 = 0x8040201008040201ULL;
1357                 writeq(val64, &bar0->rts_qos_steering);
1358                 break;
1359         }
1360
1361         /* UDP Fix */
1362         val64 = 0;
1363         for (i = 0; i < 8; i++)
1364                 writeq(val64, &bar0->rts_frm_len_n[i]);
1365
1366         /* Set the default rts frame length for the rings configured */
1367         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1368         for (i = 0 ; i < config->rx_ring_num ; i++)
1369                 writeq(val64, &bar0->rts_frm_len_n[i]);
1370
1371         /* Set the frame length for the configured rings
1372          * desired by the user
1373          */
1374         for (i = 0; i < config->rx_ring_num; i++) {
1375                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1376                  * specified frame length steering.
1377                  * If the user provides the frame length then program
1378                  * the rts_frm_len register for those values or else
1379                  * leave it as it is.
1380                  */
1381                 if (rts_frm_len[i] != 0) {
1382                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1383                                 &bar0->rts_frm_len_n[i]);
1384                 }
1385         }
1386         
1387         /* Disable differentiated services steering logic */
1388         for (i = 0; i < 64; i++) {
1389                 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1390                         DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1391                                 dev->name);
1392                         DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1393                         return FAILURE;
1394                 }
1395         }
1396
1397         /* Program statistics memory */
1398         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1399
1400         if (nic->device_type == XFRAME_II_DEVICE) {
1401                 val64 = STAT_BC(0x320);
1402                 writeq(val64, &bar0->stat_byte_cnt);
1403         }
1404
1405         /*
1406          * Initializing the sampling rate for the device to calculate the
1407          * bandwidth utilization.
1408          */
1409         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1410             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1411         writeq(val64, &bar0->mac_link_util);
1412
1413
1414         /*
1415          * Initializing the Transmit and Receive Traffic Interrupt
1416          * Scheme.
1417          */
1418         /*
1419          * TTI Initialization. Default Tx timer gets us about
1420          * 250 interrupts per sec. Continuous interrupts are enabled
1421          * by default.
1422          */
1423         if (nic->device_type == XFRAME_II_DEVICE) {
1424                 int count = (nic->config.bus_speed * 125)/2;
1425                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1426         } else {
1427
1428                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1429         }
1430         val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1431             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1432             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1433                 if (use_continuous_tx_intrs)
1434                         val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1435         writeq(val64, &bar0->tti_data1_mem);
1436
1437         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1438             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1439             TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1440         writeq(val64, &bar0->tti_data2_mem);
1441
1442         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1443         writeq(val64, &bar0->tti_command_mem);
1444
1445         /*
1446          * Once the operation completes, the Strobe bit of the command
1447          * register will be reset. We poll for this particular condition
1448          * We wait for a maximum of 500ms for the operation to complete,
1449          * if it's not complete by then we return error.
1450          */
1451         time = 0;
1452         while (TRUE) {
1453                 val64 = readq(&bar0->tti_command_mem);
1454                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1455                         break;
1456                 }
1457                 if (time > 10) {
1458                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1459                                   dev->name);
1460                         return -1;
1461                 }
1462                 msleep(50);
1463                 time++;
1464         }
1465
1466         if (nic->config.bimodal) {
1467                 int k = 0;
1468                 for (k = 0; k < config->rx_ring_num; k++) {
1469                         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1470                         val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1471                         writeq(val64, &bar0->tti_command_mem);
1472
1473                 /*
1474                  * Once the operation completes, the Strobe bit of the command
1475                  * register will be reset. We poll for this particular condition
1476                  * We wait for a maximum of 500ms for the operation to complete,
1477                  * if it's not complete by then we return error.
1478                 */
1479                         time = 0;
1480                         while (TRUE) {
1481                                 val64 = readq(&bar0->tti_command_mem);
1482                                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1483                                         break;
1484                                 }
1485                                 if (time > 10) {
1486                                         DBG_PRINT(ERR_DBG,
1487                                                 "%s: TTI init Failed\n",
1488                                         dev->name);
1489                                         return -1;
1490                                 }
1491                                 time++;
1492                                 msleep(50);
1493                         }
1494                 }
1495         } else {
1496
1497                 /* RTI Initialization */
1498                 if (nic->device_type == XFRAME_II_DEVICE) {
1499                         /*
1500                          * Programmed to generate Apprx 500 Intrs per
1501                          * second
1502                          */
1503                         int count = (nic->config.bus_speed * 125)/4;
1504                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1505                 } else {
1506                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1507                 }
1508                 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1509                     RTI_DATA1_MEM_RX_URNG_B(0x10) |
1510                     RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1511
1512                 writeq(val64, &bar0->rti_data1_mem);
1513
1514                 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1515                     RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1516                 if (nic->intr_type == MSI_X)
1517                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1518                                 RTI_DATA2_MEM_RX_UFC_D(0x40));
1519                 else
1520                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1521                                 RTI_DATA2_MEM_RX_UFC_D(0x80));
1522                 writeq(val64, &bar0->rti_data2_mem);
1523
1524                 for (i = 0; i < config->rx_ring_num; i++) {
1525                         val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1526                                         | RTI_CMD_MEM_OFFSET(i);
1527                         writeq(val64, &bar0->rti_command_mem);
1528
1529                         /*
1530                          * Once the operation completes, the Strobe bit of the
1531                          * command register will be reset. We poll for this
1532                          * particular condition. We wait for a maximum of 500ms
1533                          * for the operation to complete, if it's not complete
1534                          * by then we return error.
1535                          */
1536                         time = 0;
1537                         while (TRUE) {
1538                                 val64 = readq(&bar0->rti_command_mem);
1539                                 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1540                                         break;
1541                                 }
1542                                 if (time > 10) {
1543                                         DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1544                                                   dev->name);
1545                                         return -1;
1546                                 }
1547                                 time++;
1548                                 msleep(50);
1549                         }
1550                 }
1551         }
1552
1553         /*
1554          * Initializing proper values as Pause threshold into all
1555          * the 8 Queues on Rx side.
1556          */
1557         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1558         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1559
1560         /* Disable RMAC PAD STRIPPING */
1561         add = &bar0->mac_cfg;
1562         val64 = readq(&bar0->mac_cfg);
1563         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1564         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1565         writel((u32) (val64), add);
1566         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1567         writel((u32) (val64 >> 32), (add + 4));
1568         val64 = readq(&bar0->mac_cfg);
1569
1570         /* Enable FCS stripping by adapter */
1571         add = &bar0->mac_cfg;
1572         val64 = readq(&bar0->mac_cfg);
1573         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1574         if (nic->device_type == XFRAME_II_DEVICE)
1575                 writeq(val64, &bar0->mac_cfg);
1576         else {
1577                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1578                 writel((u32) (val64), add);
1579                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1580                 writel((u32) (val64 >> 32), (add + 4));
1581         }
1582
1583         /*
1584          * Set the time value to be inserted in the pause frame
1585          * generated by xena.
1586          */
1587         val64 = readq(&bar0->rmac_pause_cfg);
1588         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1589         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1590         writeq(val64, &bar0->rmac_pause_cfg);
1591
1592         /*
1593          * Set the Threshold Limit for Generating the pause frame
1594          * If the amount of data in any Queue exceeds ratio of
1595          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1596          * pause frame is generated
1597          */
1598         val64 = 0;
1599         for (i = 0; i < 4; i++) {
1600                 val64 |=
1601                     (((u64) 0xFF00 | nic->mac_control.
1602                       mc_pause_threshold_q0q3)
1603                      << (i * 2 * 8));
1604         }
1605         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1606
1607         val64 = 0;
1608         for (i = 0; i < 4; i++) {
1609                 val64 |=
1610                     (((u64) 0xFF00 | nic->mac_control.
1611                       mc_pause_threshold_q4q7)
1612                      << (i * 2 * 8));
1613         }
1614         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1615
1616         /*
1617          * TxDMA will stop Read request if the number of read split has
1618          * exceeded the limit pointed by shared_splits
1619          */
1620         val64 = readq(&bar0->pic_control);
1621         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1622         writeq(val64, &bar0->pic_control);
1623
1624         if (nic->config.bus_speed == 266) {
1625                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1626                 writeq(0x0, &bar0->read_retry_delay);
1627                 writeq(0x0, &bar0->write_retry_delay);
1628         }
1629
1630         /*
1631          * Programming the Herc to split every write transaction
1632          * that does not start on an ADB to reduce disconnects.
1633          */
1634         if (nic->device_type == XFRAME_II_DEVICE) {
1635                 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1636                         MISC_LINK_STABILITY_PRD(3);
1637                 writeq(val64, &bar0->misc_control);
1638                 val64 = readq(&bar0->pic_control2);
1639                 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1640                 writeq(val64, &bar0->pic_control2);
1641         }
1642         if (strstr(nic->product_name, "CX4")) {
1643                 val64 = TMAC_AVG_IPG(0x17);
1644                 writeq(val64, &bar0->tmac_avg_ipg);
1645         }
1646
1647         return SUCCESS;
1648 }
1649 #define LINK_UP_DOWN_INTERRUPT          1
1650 #define MAC_RMAC_ERR_TIMER              2
1651
1652 static int s2io_link_fault_indication(struct s2io_nic *nic)
1653 {
1654         if (nic->intr_type != INTA)
1655                 return MAC_RMAC_ERR_TIMER;
1656         if (nic->device_type == XFRAME_II_DEVICE)
1657                 return LINK_UP_DOWN_INTERRUPT;
1658         else
1659                 return MAC_RMAC_ERR_TIMER;
1660 }
1661
1662 /**
1663  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1664  *  @nic: device private variable,
1665  *  @mask: A mask indicating which Intr block must be modified and,
1666  *  @flag: A flag indicating whether to enable or disable the Intrs.
1667  *  Description: This function will either disable or enable the interrupts
1668  *  depending on the flag argument. The mask argument can be used to
1669  *  enable/disable any Intr block.
1670  *  Return Value: NONE.
1671  */
1672
1673 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1674 {
1675         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1676         register u64 val64 = 0, temp64 = 0;
1677
1678         /*  Top level interrupt classification */
1679         /*  PIC Interrupts */
1680         if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1681                 /*  Enable PIC Intrs in the general intr mask register */
1682                 val64 = TXPIC_INT_M;
1683                 if (flag == ENABLE_INTRS) {
1684                         temp64 = readq(&bar0->general_int_mask);
1685                         temp64 &= ~((u64) val64);
1686                         writeq(temp64, &bar0->general_int_mask);
1687                         /*
1688                          * If Hercules adapter enable GPIO otherwise
1689                          * disable all PCIX, Flash, MDIO, IIC and GPIO
1690                          * interrupts for now.
1691                          * TODO
1692                          */
1693                         if (s2io_link_fault_indication(nic) ==
1694                                         LINK_UP_DOWN_INTERRUPT ) {
1695                                 temp64 = readq(&bar0->pic_int_mask);
1696                                 temp64 &= ~((u64) PIC_INT_GPIO);
1697                                 writeq(temp64, &bar0->pic_int_mask);
1698                                 temp64 = readq(&bar0->gpio_int_mask);
1699                                 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1700                                 writeq(temp64, &bar0->gpio_int_mask);
1701                         } else {
1702                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1703                         }
1704                         /*
1705                          * No MSI Support is available presently, so TTI and
1706                          * RTI interrupts are also disabled.
1707                          */
1708                 } else if (flag == DISABLE_INTRS) {
1709                         /*
1710                          * Disable PIC Intrs in the general
1711                          * intr mask register
1712                          */
1713                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1714                         temp64 = readq(&bar0->general_int_mask);
1715                         val64 |= temp64;
1716                         writeq(val64, &bar0->general_int_mask);
1717                 }
1718         }
1719
1720         /*  MAC Interrupts */
1721         /*  Enabling/Disabling MAC interrupts */
1722         if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1723                 val64 = TXMAC_INT_M | RXMAC_INT_M;
1724                 if (flag == ENABLE_INTRS) {
1725                         temp64 = readq(&bar0->general_int_mask);
1726                         temp64 &= ~((u64) val64);
1727                         writeq(temp64, &bar0->general_int_mask);
1728                         /*
1729                          * All MAC block error interrupts are disabled for now
1730                          * TODO
1731                          */
1732                 } else if (flag == DISABLE_INTRS) {
1733                         /*
1734                          * Disable MAC Intrs in the general intr mask register
1735                          */
1736                         writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1737                         writeq(DISABLE_ALL_INTRS,
1738                                &bar0->mac_rmac_err_mask);
1739
1740                         temp64 = readq(&bar0->general_int_mask);
1741                         val64 |= temp64;
1742                         writeq(val64, &bar0->general_int_mask);
1743                 }
1744         }
1745
1746         /*  Tx traffic interrupts */
1747         if (mask & TX_TRAFFIC_INTR) {
1748                 val64 = TXTRAFFIC_INT_M;
1749                 if (flag == ENABLE_INTRS) {
1750                         temp64 = readq(&bar0->general_int_mask);
1751                         temp64 &= ~((u64) val64);
1752                         writeq(temp64, &bar0->general_int_mask);
1753                         /*
1754                          * Enable all the Tx side interrupts
1755                          * writing 0 Enables all 64 TX interrupt levels
1756                          */
1757                         writeq(0x0, &bar0->tx_traffic_mask);
1758                 } else if (flag == DISABLE_INTRS) {
1759                         /*
1760                          * Disable Tx Traffic Intrs in the general intr mask
1761                          * register.
1762                          */
1763                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1764                         temp64 = readq(&bar0->general_int_mask);
1765                         val64 |= temp64;
1766                         writeq(val64, &bar0->general_int_mask);
1767                 }
1768         }
1769
1770         /*  Rx traffic interrupts */
1771         if (mask & RX_TRAFFIC_INTR) {
1772                 val64 = RXTRAFFIC_INT_M;
1773                 if (flag == ENABLE_INTRS) {
1774                         temp64 = readq(&bar0->general_int_mask);
1775                         temp64 &= ~((u64) val64);
1776                         writeq(temp64, &bar0->general_int_mask);
1777                         /* writing 0 Enables all 8 RX interrupt levels */
1778                         writeq(0x0, &bar0->rx_traffic_mask);
1779                 } else if (flag == DISABLE_INTRS) {
1780                         /*
1781                          * Disable Rx Traffic Intrs in the general intr mask
1782                          * register.
1783                          */
1784                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1785                         temp64 = readq(&bar0->general_int_mask);
1786                         val64 |= temp64;
1787                         writeq(val64, &bar0->general_int_mask);
1788                 }
1789         }
1790 }
1791
1792 /**
1793  *  verify_pcc_quiescent- Checks for PCC quiescent state
1794  *  Return: 1 If PCC is quiescence
1795  *          0 If PCC is not quiescence
1796  */
1797 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1798 {
1799         int ret = 0, herc;
1800         struct XENA_dev_config __iomem *bar0 = sp->bar0;
1801         u64 val64 = readq(&bar0->adapter_status);
1802         
1803         herc = (sp->device_type == XFRAME_II_DEVICE);
1804
1805         if (flag == FALSE) {
1806                 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1807                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1808                                 ret = 1;
1809                 } else {
1810                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1811                                 ret = 1;
1812                 }
1813         } else {
1814                 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1815                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1816                              ADAPTER_STATUS_RMAC_PCC_IDLE))
1817                                 ret = 1;
1818                 } else {
1819                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1820                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1821                                 ret = 1;
1822                 }
1823         }
1824
1825         return ret;
1826 }
1827 /**
1828  *  verify_xena_quiescence - Checks whether the H/W is ready
1829  *  Description: Returns whether the H/W is ready to go or not. Depending
1830  *  on whether adapter enable bit was written or not the comparison
1831  *  differs and the calling function passes the input argument flag to
1832  *  indicate this.
1833  *  Return: 1 If xena is quiescence
1834  *          0 If Xena is not quiescence
1835  */
1836
1837 static int verify_xena_quiescence(struct s2io_nic *sp)
1838 {
1839         int  mode;
1840         struct XENA_dev_config __iomem *bar0 = sp->bar0;
1841         u64 val64 = readq(&bar0->adapter_status);
1842         mode = s2io_verify_pci_mode(sp);
1843
1844         if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
1845                 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
1846                 return 0;
1847         }
1848         if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
1849         DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
1850                 return 0;
1851         }
1852         if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
1853                 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
1854                 return 0;
1855         }
1856         if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
1857                 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
1858                 return 0;
1859         }
1860         if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
1861                 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
1862                 return 0;
1863         }
1864         if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
1865                 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
1866                 return 0;
1867         }
1868         if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
1869                 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
1870                 return 0;
1871         }
1872         if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
1873                 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
1874                 return 0;
1875         }
1876
1877         /*
1878          * In PCI 33 mode, the P_PLL is not used, and therefore,
1879          * the the P_PLL_LOCK bit in the adapter_status register will
1880          * not be asserted.
1881          */
1882         if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
1883                 sp->device_type == XFRAME_II_DEVICE && mode !=
1884                 PCI_MODE_PCI_33) {
1885                 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
1886                 return 0;
1887         }
1888         if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1889                         ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1890                 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
1891                 return 0;
1892         }
1893         return 1;
1894 }
1895
1896 /**
1897  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
1898  * @sp: Pointer to device specifc structure
1899  * Description :
1900  * New procedure to clear mac address reading  problems on Alpha platforms
1901  *
1902  */
1903
1904 static void fix_mac_address(struct s2io_nic * sp)
1905 {
1906         struct XENA_dev_config __iomem *bar0 = sp->bar0;
1907         u64 val64;
1908         int i = 0;
1909
1910         while (fix_mac[i] != END_SIGN) {
1911                 writeq(fix_mac[i++], &bar0->gpio_control);
1912                 udelay(10);
1913                 val64 = readq(&bar0->gpio_control);
1914         }
1915 }
1916
1917 /**
1918  *  start_nic - Turns the device on
1919  *  @nic : device private variable.
1920  *  Description:
1921  *  This function actually turns the device on. Before this  function is
1922  *  called,all Registers are configured from their reset states
1923  *  and shared memory is allocated but the NIC is still quiescent. On
1924  *  calling this function, the device interrupts are cleared and the NIC is
1925  *  literally switched on by writing into the adapter control register.
1926  *  Return Value:
1927  *  SUCCESS on success and -1 on failure.
1928  */
1929
1930 static int start_nic(struct s2io_nic *nic)
1931 {
1932         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1933         struct net_device *dev = nic->dev;
1934         register u64 val64 = 0;
1935         u16 subid, i;
1936         struct mac_info *mac_control;
1937         struct config_param *config;
1938
1939         mac_control = &nic->mac_control;
1940         config = &nic->config;
1941
1942         /*  PRC Initialization and configuration */
1943         for (i = 0; i < config->rx_ring_num; i++) {
1944                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1945                        &bar0->prc_rxd0_n[i]);
1946
1947                 val64 = readq(&bar0->prc_ctrl_n[i]);
1948                 if (nic->config.bimodal)
1949                         val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1950                 if (nic->rxd_mode == RXD_MODE_1)
1951                         val64 |= PRC_CTRL_RC_ENABLED;
1952                 else
1953                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1954                 if (nic->device_type == XFRAME_II_DEVICE)
1955                         val64 |= PRC_CTRL_GROUP_READS;
1956                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
1957                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1958                 writeq(val64, &bar0->prc_ctrl_n[i]);
1959         }
1960
1961         if (nic->rxd_mode == RXD_MODE_3B) {
1962                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1963                 val64 = readq(&bar0->rx_pa_cfg);
1964                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1965                 writeq(val64, &bar0->rx_pa_cfg);
1966         }
1967
1968         if (vlan_tag_strip == 0) {
1969                 val64 = readq(&bar0->rx_pa_cfg);
1970                 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
1971                 writeq(val64, &bar0->rx_pa_cfg);
1972                 vlan_strip_flag = 0;
1973         }
1974
1975         /*
1976          * Enabling MC-RLDRAM. After enabling the device, we timeout
1977          * for around 100ms, which is approximately the time required
1978          * for the device to be ready for operation.
1979          */
1980         val64 = readq(&bar0->mc_rldram_mrs);
1981         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1982         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1983         val64 = readq(&bar0->mc_rldram_mrs);
1984
1985         msleep(100);    /* Delay by around 100 ms. */
1986
1987         /* Enabling ECC Protection. */
1988         val64 = readq(&bar0->adapter_control);
1989         val64 &= ~ADAPTER_ECC_EN;
1990         writeq(val64, &bar0->adapter_control);
1991
1992         /*
1993          * Clearing any possible Link state change interrupts that
1994          * could have popped up just before Enabling the card.
1995          */
1996         val64 = readq(&bar0->mac_rmac_err_reg);
1997         if (val64)
1998                 writeq(val64, &bar0->mac_rmac_err_reg);
1999
2000         /*
2001          * Verify if the device is ready to be enabled, if so enable
2002          * it.
2003          */
2004         val64 = readq(&bar0->adapter_status);
2005         if (!verify_xena_quiescence(nic)) {
2006                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2007                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2008                           (unsigned long long) val64);
2009                 return FAILURE;
2010         }
2011
2012         /*
2013          * With some switches, link might be already up at this point.
2014          * Because of this weird behavior, when we enable laser,
2015          * we may not get link. We need to handle this. We cannot
2016          * figure out which switch is misbehaving. So we are forced to
2017          * make a global change.
2018          */
2019
2020         /* Enabling Laser. */
2021         val64 = readq(&bar0->adapter_control);
2022         val64 |= ADAPTER_EOI_TX_ON;
2023         writeq(val64, &bar0->adapter_control);
2024
2025         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2026                 /*
2027                  * Dont see link state interrupts initally on some switches,
2028                  * so directly scheduling the link state task here.
2029                  */
2030                 schedule_work(&nic->set_link_task);
2031         }
2032         /* SXE-002: Initialize link and activity LED */
2033         subid = nic->pdev->subsystem_device;
2034         if (((subid & 0xFF) >= 0x07) &&
2035             (nic->device_type == XFRAME_I_DEVICE)) {
2036                 val64 = readq(&bar0->gpio_control);
2037                 val64 |= 0x0000800000000000ULL;
2038                 writeq(val64, &bar0->gpio_control);
2039                 val64 = 0x0411040400000000ULL;
2040                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2041         }
2042
2043         return SUCCESS;
2044 }
2045 /**
2046  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2047  */
2048 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2049                                         TxD *txdlp, int get_off)
2050 {
2051         struct s2io_nic *nic = fifo_data->nic;
2052         struct sk_buff *skb;
2053         struct TxD *txds;
2054         u16 j, frg_cnt;
2055
2056         txds = txdlp;
2057         if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2058                 pci_unmap_single(nic->pdev, (dma_addr_t)
2059                         txds->Buffer_Pointer, sizeof(u64),
2060                         PCI_DMA_TODEVICE);
2061                 txds++;
2062         }
2063
2064         skb = (struct sk_buff *) ((unsigned long)
2065                         txds->Host_Control);
2066         if (!skb) {
2067                 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2068                 return NULL;
2069         }
2070         pci_unmap_single(nic->pdev, (dma_addr_t)
2071                          txds->Buffer_Pointer,
2072                          skb->len - skb->data_len,
2073                          PCI_DMA_TODEVICE);
2074         frg_cnt = skb_shinfo(skb)->nr_frags;
2075         if (frg_cnt) {
2076                 txds++;
2077                 for (j = 0; j < frg_cnt; j++, txds++) {
2078                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2079                         if (!txds->Buffer_Pointer)
2080                                 break;
2081                         pci_unmap_page(nic->pdev, (dma_addr_t)
2082                                         txds->Buffer_Pointer,
2083                                        frag->size, PCI_DMA_TODEVICE);
2084                 }
2085         }
2086         memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2087         return(skb);
2088 }
2089
2090 /**
2091  *  free_tx_buffers - Free all queued Tx buffers
2092  *  @nic : device private variable.
2093  *  Description:
2094  *  Free all queued Tx buffers.
2095  *  Return Value: void
2096 */
2097
2098 static void free_tx_buffers(struct s2io_nic *nic)
2099 {
2100         struct net_device *dev = nic->dev;
2101         struct sk_buff *skb;
2102         struct TxD *txdp;
2103         int i, j;
2104         struct mac_info *mac_control;
2105         struct config_param *config;
2106         int cnt = 0;
2107
2108         mac_control = &nic->mac_control;
2109         config = &nic->config;
2110
2111         for (i = 0; i < config->tx_fifo_num; i++) {
2112                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2113                         txdp = (struct TxD *) mac_control->fifos[i].list_info[j].
2114                             list_virt_addr;
2115                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2116                         if (skb) {
2117                                 dev_kfree_skb(skb);
2118                                 cnt++;
2119                         }
2120                 }
2121                 DBG_PRINT(INTR_DBG,
2122                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2123                           dev->name, cnt, i);
2124                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2125                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2126         }
2127 }
2128
2129 /**
2130  *   stop_nic -  To stop the nic
2131  *   @nic ; device private variable.
2132  *   Description:
2133  *   This function does exactly the opposite of what the start_nic()
2134  *   function does. This function is called to stop the device.
2135  *   Return Value:
2136  *   void.
2137  */
2138
2139 static void stop_nic(struct s2io_nic *nic)
2140 {
2141         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2142         register u64 val64 = 0;
2143         u16 interruptible;
2144         struct mac_info *mac_control;
2145         struct config_param *config;
2146
2147         mac_control = &nic->mac_control;
2148         config = &nic->config;
2149
2150         /*  Disable all interrupts */
2151         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2152         interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2153         interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2154         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2155
2156         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2157         val64 = readq(&bar0->adapter_control);
2158         val64 &= ~(ADAPTER_CNTL_EN);
2159         writeq(val64, &bar0->adapter_control);
2160 }
2161
2162 static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \
2163                                 sk_buff *skb)
2164 {
2165         struct net_device *dev = nic->dev;
2166         struct sk_buff *frag_list;
2167         void *tmp;
2168
2169         /* Buffer-1 receives L3/L4 headers */
2170         ((struct RxD3*)rxdp)->Buffer1_ptr = pci_map_single
2171                         (nic->pdev, skb->data, l3l4hdr_size + 4,
2172                         PCI_DMA_FROMDEVICE);
2173
2174         /* skb_shinfo(skb)->frag_list will have L4 data payload */
2175         skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2176         if (skb_shinfo(skb)->frag_list == NULL) {
2177                 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2178                 return -ENOMEM ;
2179         }
2180         frag_list = skb_shinfo(skb)->frag_list;
2181         skb->truesize += frag_list->truesize;
2182         frag_list->next = NULL;
2183         tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2184         frag_list->data = tmp;
2185         frag_list->tail = tmp;
2186
2187         /* Buffer-2 receives L4 data payload */
2188         ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2189                                 frag_list->data, dev->mtu,
2190                                 PCI_DMA_FROMDEVICE);
2191         rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2192         rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2193
2194         return SUCCESS;
2195 }
2196
2197 /**
2198  *  fill_rx_buffers - Allocates the Rx side skbs
2199  *  @nic:  device private variable
2200  *  @ring_no: ring number
2201  *  Description:
2202  *  The function allocates Rx side skbs and puts the physical
2203  *  address of these buffers into the RxD buffer pointers, so that the NIC
2204  *  can DMA the received frame into these locations.
2205  *  The NIC supports 3 receive modes, viz
2206  *  1. single buffer,
2207  *  2. three buffer and
2208  *  3. Five buffer modes.
2209  *  Each mode defines how many fragments the received frame will be split
2210  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2211  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2212  *  is split into 3 fragments. As of now only single buffer mode is
2213  *  supported.
2214  *   Return Value:
2215  *  SUCCESS on success or an appropriate -ve value on failure.
2216  */
2217
2218 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2219 {
2220         struct net_device *dev = nic->dev;
2221         struct sk_buff *skb;
2222         struct RxD_t *rxdp;
2223         int off, off1, size, block_no, block_no1;
2224         u32 alloc_tab = 0;
2225         u32 alloc_cnt;
2226         struct mac_info *mac_control;
2227         struct config_param *config;
2228         u64 tmp;
2229         struct buffAdd *ba;
2230         unsigned long flags;
2231         struct RxD_t *first_rxdp = NULL;
2232
2233         mac_control = &nic->mac_control;
2234         config = &nic->config;
2235         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2236             atomic_read(&nic->rx_bufs_left[ring_no]);
2237
2238         block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2239         off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2240         while (alloc_tab < alloc_cnt) {
2241                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2242                     block_index;
2243                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2244
2245                 rxdp = mac_control->rings[ring_no].
2246                                 rx_blocks[block_no].rxds[off].virt_addr;
2247
2248                 if ((block_no == block_no1) && (off == off1) &&
2249                                         (rxdp->Host_Control)) {
2250                         DBG_PRINT(INTR_DBG, "%s: Get and Put",
2251                                   dev->name);
2252                         DBG_PRINT(INTR_DBG, " info equated\n");
2253                         goto end;
2254                 }
2255                 if (off && (off == rxd_count[nic->rxd_mode])) {
2256                         mac_control->rings[ring_no].rx_curr_put_info.
2257                             block_index++;
2258                         if (mac_control->rings[ring_no].rx_curr_put_info.
2259                             block_index == mac_control->rings[ring_no].
2260                                         block_count)
2261                                 mac_control->rings[ring_no].rx_curr_put_info.
2262                                         block_index = 0;
2263                         block_no = mac_control->rings[ring_no].
2264                                         rx_curr_put_info.block_index;
2265                         if (off == rxd_count[nic->rxd_mode])
2266                                 off = 0;
2267                         mac_control->rings[ring_no].rx_curr_put_info.
2268                                 offset = off;
2269                         rxdp = mac_control->rings[ring_no].
2270                                 rx_blocks[block_no].block_virt_addr;
2271                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2272                                   dev->name, rxdp);
2273                 }
2274                 if(!napi) {
2275                         spin_lock_irqsave(&nic->put_lock, flags);
2276                         mac_control->rings[ring_no].put_pos =
2277                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2278                         spin_unlock_irqrestore(&nic->put_lock, flags);
2279                 } else {
2280                         mac_control->rings[ring_no].put_pos =
2281                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2282                 }
2283                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2284                         ((nic->rxd_mode >= RXD_MODE_3A) &&
2285                                 (rxdp->Control_2 & BIT(0)))) {
2286                         mac_control->rings[ring_no].rx_curr_put_info.
2287                                         offset = off;
2288                         goto end;
2289                 }
2290                 /* calculate size of skb based on ring mode */
2291                 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2292                                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2293                 if (nic->rxd_mode == RXD_MODE_1)
2294                         size += NET_IP_ALIGN;
2295                 else if (nic->rxd_mode == RXD_MODE_3B)
2296                         size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2297                 else
2298                         size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2299
2300                 /* allocate skb */
2301                 skb = dev_alloc_skb(size);
2302                 if(!skb) {
2303                         DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2304                         DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2305                         if (first_rxdp) {
2306                                 wmb();
2307                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2308                         }
2309                         return -ENOMEM ;
2310                 }
2311                 if (nic->rxd_mode == RXD_MODE_1) {
2312                         /* 1 buffer mode - normal operation mode */
2313                         memset(rxdp, 0, sizeof(struct RxD1));
2314                         skb_reserve(skb, NET_IP_ALIGN);
2315                         ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single
2316                             (nic->pdev, skb->data, size - NET_IP_ALIGN,
2317                                 PCI_DMA_FROMDEVICE);
2318                         rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2319
2320                 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2321                         /*
2322                          * 2 or 3 buffer mode -
2323                          * Both 2 buffer mode and 3 buffer mode provides 128
2324                          * byte aligned receive buffers.
2325                          *
2326                          * 3 buffer mode provides header separation where in
2327                          * skb->data will have L3/L4 headers where as
2328                          * skb_shinfo(skb)->frag_list will have the L4 data
2329                          * payload
2330                          */
2331
2332                         memset(rxdp, 0, sizeof(struct RxD3));
2333                         ba = &mac_control->rings[ring_no].ba[block_no][off];
2334                         skb_reserve(skb, BUF0_LEN);
2335                         tmp = (u64)(unsigned long) skb->data;
2336                         tmp += ALIGN_SIZE;
2337                         tmp &= ~ALIGN_SIZE;
2338                         skb->data = (void *) (unsigned long)tmp;
2339                         skb->tail = (void *) (unsigned long)tmp;
2340
2341                         if (!(((struct RxD3*)rxdp)->Buffer0_ptr))
2342                                 ((struct RxD3*)rxdp)->Buffer0_ptr =
2343                                    pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2344                                            PCI_DMA_FROMDEVICE);
2345                         else
2346                                 pci_dma_sync_single_for_device(nic->pdev,
2347                                     (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr,
2348                                     BUF0_LEN, PCI_DMA_FROMDEVICE);
2349                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2350                         if (nic->rxd_mode == RXD_MODE_3B) {
2351                                 /* Two buffer mode */
2352
2353                                 /*
2354                                  * Buffer2 will have L3/L4 header plus
2355                                  * L4 payload
2356                                  */
2357                                 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single
2358                                 (nic->pdev, skb->data, dev->mtu + 4,
2359                                                 PCI_DMA_FROMDEVICE);
2360
2361                                 /* Buffer-1 will be dummy buffer. Not used */
2362                                 if (!(((struct RxD3*)rxdp)->Buffer1_ptr)) {
2363                                         ((struct RxD3*)rxdp)->Buffer1_ptr =
2364                                                 pci_map_single(nic->pdev,
2365                                                 ba->ba_1, BUF1_LEN,
2366                                                 PCI_DMA_FROMDEVICE);
2367                                 }
2368                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2369                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2370                                                                 (dev->mtu + 4);
2371                         } else {
2372                                 /* 3 buffer mode */
2373                                 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2374                                         dev_kfree_skb_irq(skb);
2375                                         if (first_rxdp) {
2376                                                 wmb();
2377                                                 first_rxdp->Control_1 |=
2378                                                         RXD_OWN_XENA;
2379                                         }
2380                                         return -ENOMEM ;
2381                                 }
2382                         }
2383                         rxdp->Control_2 |= BIT(0);
2384                 }
2385                 rxdp->Host_Control = (unsigned long) (skb);
2386                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2387                         rxdp->Control_1 |= RXD_OWN_XENA;
2388                 off++;
2389                 if (off == (rxd_count[nic->rxd_mode] + 1))
2390                         off = 0;
2391                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2392
2393                 rxdp->Control_2 |= SET_RXD_MARKER;
2394                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2395                         if (first_rxdp) {
2396                                 wmb();
2397                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2398                         }
2399                         first_rxdp = rxdp;
2400                 }
2401                 atomic_inc(&nic->rx_bufs_left[ring_no]);
2402                 alloc_tab++;
2403         }
2404
2405       end:
2406         /* Transfer ownership of first descriptor to adapter just before
2407          * exiting. Before that, use memory barrier so that ownership
2408          * and other fields are seen by adapter correctly.
2409          */
2410         if (first_rxdp) {
2411                 wmb();
2412                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2413         }
2414
2415         return SUCCESS;
2416 }
2417
2418 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2419 {
2420         struct net_device *dev = sp->dev;
2421         int j;
2422         struct sk_buff *skb;
2423         struct RxD_t *rxdp;
2424         struct mac_info *mac_control;
2425         struct buffAdd *ba;
2426
2427         mac_control = &sp->mac_control;
2428         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2429                 rxdp = mac_control->rings[ring_no].
2430                                 rx_blocks[blk].rxds[j].virt_addr;
2431                 skb = (struct sk_buff *)
2432                         ((unsigned long) rxdp->Host_Control);
2433                 if (!skb) {
2434                         continue;
2435                 }
2436                 if (sp->rxd_mode == RXD_MODE_1) {
2437                         pci_unmap_single(sp->pdev, (dma_addr_t)
2438                                  ((struct RxD1*)rxdp)->Buffer0_ptr,
2439                                  dev->mtu +
2440                                  HEADER_ETHERNET_II_802_3_SIZE
2441                                  + HEADER_802_2_SIZE +
2442                                  HEADER_SNAP_SIZE,
2443                                  PCI_DMA_FROMDEVICE);
2444                         memset(rxdp, 0, sizeof(struct RxD1));
2445                 } else if(sp->rxd_mode == RXD_MODE_3B) {
2446                         ba = &mac_control->rings[ring_no].
2447                                 ba[blk][j];
2448                         pci_unmap_single(sp->pdev, (dma_addr_t)
2449                                  ((struct RxD3*)rxdp)->Buffer0_ptr,
2450                                  BUF0_LEN,
2451                                  PCI_DMA_FROMDEVICE);
2452                         pci_unmap_single(sp->pdev, (dma_addr_t)
2453                                  ((struct RxD3*)rxdp)->Buffer1_ptr,
2454                                  BUF1_LEN,
2455                                  PCI_DMA_FROMDEVICE);
2456                         pci_unmap_single(sp->pdev, (dma_addr_t)
2457                                  ((struct RxD3*)rxdp)->Buffer2_ptr,
2458                                  dev->mtu + 4,
2459                                  PCI_DMA_FROMDEVICE);
2460                         memset(rxdp, 0, sizeof(struct RxD3));
2461                 } else {
2462                         pci_unmap_single(sp->pdev, (dma_addr_t)
2463                                 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2464                                 PCI_DMA_FROMDEVICE);
2465                         pci_unmap_single(sp->pdev, (dma_addr_t)
2466                                 ((struct RxD3*)rxdp)->Buffer1_ptr,
2467                                 l3l4hdr_size + 4,
2468                                 PCI_DMA_FROMDEVICE);
2469                         pci_unmap_single(sp->pdev, (dma_addr_t)
2470                                 ((struct RxD3*)rxdp)->Buffer2_ptr, dev->mtu,
2471                                 PCI_DMA_FROMDEVICE);
2472                         memset(rxdp, 0, sizeof(struct RxD3));
2473                 }
2474                 dev_kfree_skb(skb);
2475                 atomic_dec(&sp->rx_bufs_left[ring_no]);
2476         }
2477 }
2478
2479 /**
2480  *  free_rx_buffers - Frees all Rx buffers
2481  *  @sp: device private variable.
2482  *  Description:
2483  *  This function will free all Rx buffers allocated by host.
2484  *  Return Value:
2485  *  NONE.
2486  */
2487
2488 static void free_rx_buffers(struct s2io_nic *sp)
2489 {
2490         struct net_device *dev = sp->dev;
2491         int i, blk = 0, buf_cnt = 0;
2492         struct mac_info *mac_control;
2493         struct config_param *config;
2494
2495         mac_control = &sp->mac_control;
2496         config = &sp->config;
2497
2498         for (i = 0; i < config->rx_ring_num; i++) {
2499                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2500                         free_rxd_blk(sp,i,blk);
2501
2502                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2503                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2504                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2505                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2506                 atomic_set(&sp->rx_bufs_left[i], 0);
2507                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2508                           dev->name, buf_cnt, i);
2509         }
2510 }
2511
2512 /**
2513  * s2io_poll - Rx interrupt handler for NAPI support
2514  * @dev : pointer to the device structure.
2515  * @budget : The number of packets that were budgeted to be processed
2516  * during  one pass through the 'Poll" function.
2517  * Description:
2518  * Comes into picture only if NAPI support has been incorporated. It does
2519  * the same thing that rx_intr_handler does, but not in a interrupt context
2520  * also It will process only a given number of packets.
2521  * Return value:
2522  * 0 on success and 1 if there are No Rx packets to be processed.
2523  */
2524
2525 static int s2io_poll(struct net_device *dev, int *budget)
2526 {
2527         struct s2io_nic *nic = dev->priv;
2528         int pkt_cnt = 0, org_pkts_to_process;
2529         struct mac_info *mac_control;
2530         struct config_param *config;
2531         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2532         int i;
2533
2534         atomic_inc(&nic->isr_cnt);
2535         mac_control = &nic->mac_control;
2536         config = &nic->config;
2537
2538         nic->pkts_to_process = *budget;
2539         if (nic->pkts_to_process > dev->quota)
2540                 nic->pkts_to_process = dev->quota;
2541         org_pkts_to_process = nic->pkts_to_process;
2542
2543         writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2544         readl(&bar0->rx_traffic_int);
2545
2546         for (i = 0; i < config->rx_ring_num; i++) {
2547                 rx_intr_handler(&mac_control->rings[i]);
2548                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2549                 if (!nic->pkts_to_process) {
2550                         /* Quota for the current iteration has been met */
2551                         goto no_rx;
2552                 }
2553         }
2554         if (!pkt_cnt)
2555                 pkt_cnt = 1;
2556
2557         dev->quota -= pkt_cnt;
2558         *budget -= pkt_cnt;
2559         netif_rx_complete(dev);
2560
2561         for (i = 0; i < config->rx_ring_num; i++) {
2562                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2563                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2564                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2565                         break;
2566                 }
2567         }
2568         /* Re enable the Rx interrupts. */
2569         writeq(0x0, &bar0->rx_traffic_mask);
2570         readl(&bar0->rx_traffic_mask);
2571         atomic_dec(&nic->isr_cnt);
2572         return 0;
2573
2574 no_rx:
2575         dev->quota -= pkt_cnt;
2576         *budget -= pkt_cnt;
2577
2578         for (i = 0; i < config->rx_ring_num; i++) {
2579                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2580                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2581                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2582                         break;
2583                 }
2584         }
2585         atomic_dec(&nic->isr_cnt);
2586         return 1;
2587 }
2588
2589 #ifdef CONFIG_NET_POLL_CONTROLLER
2590 /**
2591  * s2io_netpoll - netpoll event handler entry point
2592  * @dev : pointer to the device structure.
2593  * Description:
2594  *      This function will be called by upper layer to check for events on the
2595  * interface in situations where interrupts are disabled. It is used for
2596  * specific in-kernel networking tasks, such as remote consoles and kernel
2597  * debugging over the network (example netdump in RedHat).
2598  */
2599 static void s2io_netpoll(struct net_device *dev)
2600 {
2601         struct s2io_nic *nic = dev->priv;
2602         struct mac_info *mac_control;
2603         struct config_param *config;
2604         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2605         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2606         int i;
2607
2608         disable_irq(dev->irq);
2609
2610         atomic_inc(&nic->isr_cnt);
2611         mac_control = &nic->mac_control;
2612         config = &nic->config;
2613
2614         writeq(val64, &bar0->rx_traffic_int);
2615         writeq(val64, &bar0->tx_traffic_int);
2616
2617         /* we need to free up the transmitted skbufs or else netpoll will
2618          * run out of skbs and will fail and eventually netpoll application such
2619          * as netdump will fail.
2620          */
2621         for (i = 0; i < config->tx_fifo_num; i++)
2622                 tx_intr_handler(&mac_control->fifos[i]);
2623
2624         /* check for received packet and indicate up to network */
2625         for (i = 0; i < config->rx_ring_num; i++)
2626                 rx_intr_handler(&mac_control->rings[i]);
2627
2628         for (i = 0; i < config->rx_ring_num; i++) {
2629                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2630                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2631                         DBG_PRINT(ERR_DBG, " in Rx Netpoll!!\n");
2632                         break;
2633                 }
2634         }
2635         atomic_dec(&nic->isr_cnt);
2636         enable_irq(dev->irq);
2637         return;
2638 }
2639 #endif
2640
2641 /**
2642  *  rx_intr_handler - Rx interrupt handler
2643  *  @nic: device private variable.
2644  *  Description:
2645  *  If the interrupt is because of a received frame or if the
2646  *  receive ring contains fresh as yet un-processed frames,this function is
2647  *  called. It picks out the RxD at which place the last Rx processing had
2648  *  stopped and sends the skb to the OSM's Rx handler and then increments
2649  *  the offset.
2650  *  Return Value:
2651  *  NONE.
2652  */
2653 static void rx_intr_handler(struct ring_info *ring_data)
2654 {
2655         struct s2io_nic *nic = ring_data->nic;
2656         struct net_device *dev = (struct net_device *) nic->dev;
2657         int get_block, put_block, put_offset;
2658         struct rx_curr_get_info get_info, put_info;
2659         struct RxD_t *rxdp;
2660         struct sk_buff *skb;
2661         int pkt_cnt = 0;
2662         int i;
2663
2664         spin_lock(&nic->rx_lock);
2665         if (atomic_read(&nic->card_state) == CARD_DOWN) {
2666                 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2667                           __FUNCTION__, dev->name);
2668                 spin_unlock(&nic->rx_lock);
2669                 return;
2670         }
2671
2672         get_info = ring_data->rx_curr_get_info;
2673         get_block = get_info.block_index;
2674         memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2675         put_block = put_info.block_index;
2676         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2677         if (!napi) {
2678                 spin_lock(&nic->put_lock);
2679                 put_offset = ring_data->put_pos;
2680                 spin_unlock(&nic->put_lock);
2681         } else
2682                 put_offset = ring_data->put_pos;
2683
2684         while (RXD_IS_UP2DT(rxdp)) {
2685                 /*
2686                  * If your are next to put index then it's
2687                  * FIFO full condition
2688                  */
2689                 if ((get_block == put_block) &&
2690                     (get_info.offset + 1) == put_info.offset) {
2691                         DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2692                         break;
2693                 }
2694                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2695                 if (skb == NULL) {
2696                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2697                                   dev->name);
2698                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2699                         spin_unlock(&nic->rx_lock);
2700                         return;
2701                 }
2702                 if (nic->rxd_mode == RXD_MODE_1) {
2703                         pci_unmap_single(nic->pdev, (dma_addr_t)
2704                                  ((struct RxD1*)rxdp)->Buffer0_ptr,
2705                                  dev->mtu +
2706                                  HEADER_ETHERNET_II_802_3_SIZE +
2707                                  HEADER_802_2_SIZE +
2708                                  HEADER_SNAP_SIZE,
2709                                  PCI_DMA_FROMDEVICE);
2710                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2711                         pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2712                                  ((struct RxD3*)rxdp)->Buffer0_ptr,
2713                                  BUF0_LEN, PCI_DMA_FROMDEVICE);
2714                         pci_unmap_single(nic->pdev, (dma_addr_t)
2715                                  ((struct RxD3*)rxdp)->Buffer2_ptr,
2716                                  dev->mtu + 4,
2717                                  PCI_DMA_FROMDEVICE);
2718                 } else {
2719                         pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2720                                          ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2721                                          PCI_DMA_FROMDEVICE);
2722                         pci_unmap_single(nic->pdev, (dma_addr_t)
2723                                          ((struct RxD3*)rxdp)->Buffer1_ptr,
2724                                          l3l4hdr_size + 4,
2725                                          PCI_DMA_FROMDEVICE);
2726                         pci_unmap_single(nic->pdev, (dma_addr_t)
2727                                          ((struct RxD3*)rxdp)->Buffer2_ptr,
2728                                          dev->mtu, PCI_DMA_FROMDEVICE);
2729                 }
2730                 prefetch(skb->data);
2731                 rx_osm_handler(ring_data, rxdp);
2732                 get_info.offset++;
2733                 ring_data->rx_curr_get_info.offset = get_info.offset;
2734                 rxdp = ring_data->rx_blocks[get_block].
2735                                 rxds[get_info.offset].virt_addr;
2736                 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2737                         get_info.offset = 0;
2738                         ring_data->rx_curr_get_info.offset = get_info.offset;
2739                         get_block++;
2740                         if (get_block == ring_data->block_count)
2741                                 get_block = 0;
2742                         ring_data->rx_curr_get_info.block_index = get_block;
2743                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2744                 }
2745
2746                 nic->pkts_to_process -= 1;
2747                 if ((napi) && (!nic->pkts_to_process))
2748                         break;
2749                 pkt_cnt++;
2750                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2751                         break;
2752         }
2753         if (nic->lro) {
2754                 /* Clear all LRO sessions before exiting */
2755                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2756                         struct lro *lro = &nic->lro0_n[i];
2757                         if (lro->in_use) {
2758                                 update_L3L4_header(nic, lro);
2759                                 queue_rx_frame(lro->parent);
2760                                 clear_lro_session(lro);
2761                         }
2762                 }
2763         }
2764
2765         spin_unlock(&nic->rx_lock);
2766 }
2767
2768 /**
2769  *  tx_intr_handler - Transmit interrupt handler
2770  *  @nic : device private variable
2771  *  Description:
2772  *  If an interrupt was raised to indicate DMA complete of the
2773  *  Tx packet, this function is called. It identifies the last TxD
2774  *  whose buffer was freed and frees all skbs whose data have already
2775  *  DMA'ed into the NICs internal memory.
2776  *  Return Value:
2777  *  NONE
2778  */
2779
2780 static void tx_intr_handler(struct fifo_info *fifo_data)
2781 {
2782         struct s2io_nic *nic = fifo_data->nic;
2783         struct net_device *dev = (struct net_device *) nic->dev;
2784         struct tx_curr_get_info get_info, put_info;
2785         struct sk_buff *skb;
2786         struct TxD *txdlp;
2787
2788         get_info = fifo_data->tx_curr_get_info;
2789         memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2790         txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2791             list_virt_addr;
2792         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2793                (get_info.offset != put_info.offset) &&
2794                (txdlp->Host_Control)) {
2795                 /* Check for TxD errors */
2796                 if (txdlp->Control_1 & TXD_T_CODE) {
2797                         unsigned long long err;
2798                         err = txdlp->Control_1 & TXD_T_CODE;
2799                         if (err & 0x1) {
2800                                 nic->mac_control.stats_info->sw_stat.
2801                                                 parity_err_cnt++;
2802                         }
2803                         if ((err >> 48) == 0xA) {
2804                                 DBG_PRINT(TX_DBG, "TxD returned due \
2805                                                 to loss of link\n");
2806                         }
2807                         else {
2808                                 DBG_PRINT(ERR_DBG, "***TxD error %llx\n", err);
2809                         }
2810                 }
2811
2812                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2813                 if (skb == NULL) {
2814                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
2815                         __FUNCTION__);
2816                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2817                         return;
2818                 }
2819
2820                 /* Updating the statistics block */
2821                 nic->stats.tx_bytes += skb->len;
2822                 dev_kfree_skb_irq(skb);
2823
2824                 get_info.offset++;
2825                 if (get_info.offset == get_info.fifo_len + 1)
2826                         get_info.offset = 0;
2827                 txdlp = (struct TxD *) fifo_data->list_info
2828                     [get_info.offset].list_virt_addr;
2829                 fifo_data->tx_curr_get_info.offset =
2830                     get_info.offset;
2831         }
2832
2833         spin_lock(&nic->tx_lock);
2834         if (netif_queue_stopped(dev))
2835                 netif_wake_queue(dev);
2836         spin_unlock(&nic->tx_lock);
2837 }
2838
2839 /**
2840  *  s2io_mdio_write - Function to write in to MDIO registers
2841  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2842  *  @addr     : address value
2843  *  @value    : data value
2844  *  @dev      : pointer to net_device structure
2845  *  Description:
2846  *  This function is used to write values to the MDIO registers
2847  *  NONE
2848  */
2849 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2850 {
2851         u64 val64 = 0x0;
2852         struct s2io_nic *sp = dev->priv;
2853         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2854
2855         //address transaction
2856         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2857                         | MDIO_MMD_DEV_ADDR(mmd_type)
2858                         | MDIO_MMS_PRT_ADDR(0x0);
2859         writeq(val64, &bar0->mdio_control);
2860         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2861         writeq(val64, &bar0->mdio_control);
2862         udelay(100);
2863
2864         //Data transaction
2865         val64 = 0x0;
2866         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2867                         | MDIO_MMD_DEV_ADDR(mmd_type)
2868                         | MDIO_MMS_PRT_ADDR(0x0)
2869                         | MDIO_MDIO_DATA(value)
2870                         | MDIO_OP(MDIO_OP_WRITE_TRANS);
2871         writeq(val64, &bar0->mdio_control);
2872         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2873         writeq(val64, &bar0->mdio_control);
2874         udelay(100);
2875
2876         val64 = 0x0;
2877         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2878         | MDIO_MMD_DEV_ADDR(mmd_type)
2879         | MDIO_MMS_PRT_ADDR(0x0)
2880         | MDIO_OP(MDIO_OP_READ_TRANS);
2881         writeq(val64, &bar0->mdio_control);
2882         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2883         writeq(val64, &bar0->mdio_control);
2884         udelay(100);
2885
2886 }
2887
2888 /**
2889  *  s2io_mdio_read - Function to write in to MDIO registers
2890  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2891  *  @addr     : address value
2892  *  @dev      : pointer to net_device structure
2893  *  Description:
2894  *  This function is used to read values to the MDIO registers
2895  *  NONE
2896  */
2897 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2898 {
2899         u64 val64 = 0x0;
2900         u64 rval64 = 0x0;
2901         struct s2io_nic *sp = dev->priv;
2902         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2903
2904         /* address transaction */
2905         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2906                         | MDIO_MMD_DEV_ADDR(mmd_type)
2907                         | MDIO_MMS_PRT_ADDR(0x0);
2908         writeq(val64, &bar0->mdio_control);
2909         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2910         writeq(val64, &bar0->mdio_control);
2911         udelay(100);
2912
2913         /* Data transaction */
2914         val64 = 0x0;
2915         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2916                         | MDIO_MMD_DEV_ADDR(mmd_type)
2917                         | MDIO_MMS_PRT_ADDR(0x0)
2918                         | MDIO_OP(MDIO_OP_READ_TRANS);
2919         writeq(val64, &bar0->mdio_control);
2920         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2921         writeq(val64, &bar0->mdio_control);
2922         udelay(100);
2923
2924         /* Read the value from regs */
2925         rval64 = readq(&bar0->mdio_control);
2926         rval64 = rval64 & 0xFFFF0000;
2927         rval64 = rval64 >> 16;
2928         return rval64;
2929 }
2930 /**
2931  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
2932  *  @counter      : couter value to be updated
2933  *  @flag         : flag to indicate the status
2934  *  @type         : counter type
2935  *  Description:
2936  *  This function is to check the status of the xpak counters value
2937  *  NONE
2938  */
2939
2940 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
2941 {
2942         u64 mask = 0x3;
2943         u64 val64;
2944         int i;
2945         for(i = 0; i <index; i++)
2946                 mask = mask << 0x2;
2947
2948         if(flag > 0)
2949         {
2950                 *counter = *counter + 1;
2951                 val64 = *regs_stat & mask;
2952                 val64 = val64 >> (index * 0x2);
2953                 val64 = val64 + 1;
2954                 if(val64 == 3)
2955                 {
2956                         switch(type)
2957                         {
2958                         case 1:
2959                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2960                                           "service. Excessive temperatures may "
2961                                           "result in premature transceiver "
2962                                           "failure \n");
2963                         break;
2964                         case 2:
2965                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2966                                           "service Excessive bias currents may "
2967                                           "indicate imminent laser diode "
2968                                           "failure \n");
2969                         break;
2970                         case 3:
2971                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2972                                           "service Excessive laser output "
2973                                           "power may saturate far-end "
2974                                           "receiver\n");
2975                         break;
2976                         default:
2977                                 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
2978                                           "type \n");
2979                         }
2980                         val64 = 0x0;
2981                 }
2982                 val64 = val64 << (index * 0x2);
2983                 *regs_stat = (*regs_stat & (~mask)) | (val64);
2984
2985         } else {
2986                 *regs_stat = *regs_stat & (~mask);
2987         }
2988 }
2989
2990 /**
2991  *  s2io_updt_xpak_counter - Function to update the xpak counters
2992  *  @dev         : pointer to net_device struct
2993  *  Description:
2994  *  This function is to upate the status of the xpak counters value
2995  *  NONE
2996  */
2997 static void s2io_updt_xpak_counter(struct net_device *dev)
2998 {
2999         u16 flag  = 0x0;
3000         u16 type  = 0x0;
3001         u16 val16 = 0x0;
3002         u64 val64 = 0x0;
3003         u64 addr  = 0x0;
3004
3005         struct s2io_nic *sp = dev->priv;
3006         struct stat_block *stat_info = sp->mac_control.stats_info;
3007
3008         /* Check the communication with the MDIO slave */
3009         addr = 0x0000;
3010         val64 = 0x0;
3011         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3012         if((val64 == 0xFFFF) || (val64 == 0x0000))
3013         {
3014                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3015                           "Returned %llx\n", (unsigned long long)val64);
3016                 return;
3017         }
3018
3019         /* Check for the expecte value of 2040 at PMA address 0x0000 */
3020         if(val64 != 0x2040)
3021         {
3022                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3023                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3024                           (unsigned long long)val64);
3025                 return;
3026         }
3027
3028         /* Loading the DOM register to MDIO register */
3029         addr = 0xA100;
3030         s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3031         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3032
3033         /* Reading the Alarm flags */
3034         addr = 0xA070;
3035         val64 = 0x0;
3036         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3037
3038         flag = CHECKBIT(val64, 0x7);
3039         type = 1;
3040         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3041                                 &stat_info->xpak_stat.xpak_regs_stat,
3042                                 0x0, flag, type);
3043
3044         if(CHECKBIT(val64, 0x6))
3045                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3046
3047         flag = CHECKBIT(val64, 0x3);
3048         type = 2;
3049         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3050                                 &stat_info->xpak_stat.xpak_regs_stat,
3051                                 0x2, flag, type);
3052
3053         if(CHECKBIT(val64, 0x2))
3054                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3055
3056         flag = CHECKBIT(val64, 0x1);
3057         type = 3;
3058         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3059                                 &stat_info->xpak_stat.xpak_regs_stat,
3060                                 0x4, flag, type);
3061
3062         if(CHECKBIT(val64, 0x0))
3063                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3064
3065         /* Reading the Warning flags */
3066         addr = 0xA074;
3067         val64 = 0x0;
3068         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3069
3070         if(CHECKBIT(val64, 0x7))
3071                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3072
3073         if(CHECKBIT(val64, 0x6))
3074                 stat_info->xpak_stat.warn_transceiver_temp_low++;
3075
3076         if(CHECKBIT(val64, 0x3))
3077                 stat_info->xpak_stat.warn_laser_bias_current_high++;
3078
3079         if(CHECKBIT(val64, 0x2))
3080                 stat_info->xpak_stat.warn_laser_bias_current_low++;
3081
3082         if(CHECKBIT(val64, 0x1))
3083                 stat_info->xpak_stat.warn_laser_output_power_high++;
3084
3085         if(CHECKBIT(val64, 0x0))
3086                 stat_info->xpak_stat.warn_laser_output_power_low++;
3087 }
3088
3089 /**
3090  *  alarm_intr_handler - Alarm Interrrupt handler
3091  *  @nic: device private variable
3092  *  Description: If the interrupt was neither because of Rx packet or Tx
3093  *  complete, this function is called. If the interrupt was to indicate
3094  *  a loss of link, the OSM link status handler is invoked for any other
3095  *  alarm interrupt the block that raised the interrupt is displayed
3096  *  and a H/W reset is issued.
3097  *  Return Value:
3098  *  NONE
3099 */
3100
3101 static void alarm_intr_handler(struct s2io_nic *nic)
3102 {
3103         struct net_device *dev = (struct net_device *) nic->dev;
3104         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3105         register u64 val64 = 0, err_reg = 0;
3106         u64 cnt;
3107         int i;
3108         if (atomic_read(&nic->card_state) == CARD_DOWN)
3109                 return;
3110         nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3111         /* Handling the XPAK counters update */
3112         if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3113                 /* waiting for an hour */
3114                 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3115         } else {
3116                 s2io_updt_xpak_counter(dev);
3117                 /* reset the count to zero */
3118                 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3119         }
3120
3121         /* Handling link status change error Intr */
3122         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3123                 err_reg = readq(&bar0->mac_rmac_err_reg);
3124                 writeq(err_reg, &bar0->mac_rmac_err_reg);
3125                 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3126                         schedule_work(&nic->set_link_task);
3127                 }
3128         }
3129
3130         /* Handling Ecc errors */
3131         val64 = readq(&bar0->mc_err_reg);
3132         writeq(val64, &bar0->mc_err_reg);
3133         if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3134                 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
3135                         nic->mac_control.stats_info->sw_stat.
3136                                 double_ecc_errs++;
3137                         DBG_PRINT(INIT_DBG, "%s: Device indicates ",
3138                                   dev->name);
3139                         DBG_PRINT(INIT_DBG, "double ECC error!!\n");
3140                         if (nic->device_type != XFRAME_II_DEVICE) {
3141                                 /* Reset XframeI only if critical error */
3142                                 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3143                                              MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3144                                         netif_stop_queue(dev);
3145                                         schedule_work(&nic->rst_timer_task);
3146                                         nic->mac_control.stats_info->sw_stat.
3147                                                         soft_reset_cnt++;
3148                                 }
3149                         }
3150                 } else {
3151                         nic->mac_control.stats_info->sw_stat.
3152                                 single_ecc_errs++;
3153                 }
3154         }
3155
3156         /* In case of a serious error, the device will be Reset. */
3157         val64 = readq(&bar0->serr_source);
3158         if (val64 & SERR_SOURCE_ANY) {
3159                 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
3160                 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
3161                 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
3162                           (unsigned long long)val64);
3163                 netif_stop_queue(dev);
3164                 schedule_work(&nic->rst_timer_task);
3165                 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3166         }
3167
3168         /*
3169          * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3170          * Error occurs, the adapter will be recycled by disabling the
3171          * adapter enable bit and enabling it again after the device
3172          * becomes Quiescent.
3173          */
3174         val64 = readq(&bar0->pcc_err_reg);
3175         writeq(val64, &bar0->pcc_err_reg);
3176         if (val64 & PCC_FB_ECC_DB_ERR) {
3177                 u64 ac = readq(&bar0->adapter_control);
3178                 ac &= ~(ADAPTER_CNTL_EN);
3179                 writeq(ac, &bar0->adapter_control);
3180                 ac = readq(&bar0->adapter_control);
3181                 schedule_work(&nic->set_link_task);
3182         }
3183         /* Check for data parity error */
3184         val64 = readq(&bar0->pic_int_status);
3185         if (val64 & PIC_INT_GPIO) {
3186                 val64 = readq(&bar0->gpio_int_reg);
3187                 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3188                         nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3189                         schedule_work(&nic->rst_timer_task);
3190                         nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3191                 }
3192         }
3193
3194         /* Check for ring full counter */
3195         if (nic->device_type & XFRAME_II_DEVICE) {
3196                 val64 = readq(&bar0->ring_bump_counter1);
3197                 for (i=0; i<4; i++) {
3198                         cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3199                         cnt >>= 64 - ((i+1)*16);
3200                         nic->mac_control.stats_info->sw_stat.ring_full_cnt
3201                                 += cnt;
3202                 }
3203
3204                 val64 = readq(&bar0->ring_bump_counter2);
3205                 for (i=0; i<4; i++) {
3206                         cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3207                         cnt >>= 64 - ((i+1)*16);
3208                         nic->mac_control.stats_info->sw_stat.ring_full_cnt
3209                                 += cnt;
3210                 }
3211         }
3212
3213         /* Other type of interrupts are not being handled now,  TODO */
3214 }
3215
3216 /**
3217  *  wait_for_cmd_complete - waits for a command to complete.
3218  *  @sp : private member of the device structure, which is a pointer to the
3219  *  s2io_nic structure.
3220  *  Description: Function that waits for a command to Write into RMAC
3221  *  ADDR DATA registers to be completed and returns either success or
3222  *  error depending on whether the command was complete or not.
3223  *  Return value:
3224  *   SUCCESS on success and FAILURE on failure.
3225  */
3226
3227 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3228                                 int bit_state)
3229 {
3230         int ret = FAILURE, cnt = 0, delay = 1;
3231         u64 val64;
3232
3233         if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3234                 return FAILURE;
3235
3236         do {
3237                 val64 = readq(addr);
3238                 if (bit_state == S2IO_BIT_RESET) {
3239                         if (!(val64 & busy_bit)) {
3240                                 ret = SUCCESS;
3241                                 break;
3242                         }
3243                 } else {
3244                         if (!(val64 & busy_bit)) {
3245                                 ret = SUCCESS;
3246                                 break;
3247                         }
3248                 }
3249
3250                 if(in_interrupt())
3251                         mdelay(delay);
3252                 else
3253                         msleep(delay);
3254
3255                 if (++cnt >= 10)
3256                         delay = 50;
3257         } while (cnt < 20);
3258         return ret;
3259 }
3260 /*
3261  * check_pci_device_id - Checks if the device id is supported
3262  * @id : device id
3263  * Description: Function to check if the pci device id is supported by driver.
3264  * Return value: Actual device id if supported else PCI_ANY_ID
3265  */
3266 static u16 check_pci_device_id(u16 id)
3267 {
3268         switch (id) {
3269         case PCI_DEVICE_ID_HERC_WIN:
3270         case PCI_DEVICE_ID_HERC_UNI:
3271                 return XFRAME_II_DEVICE;
3272         case PCI_DEVICE_ID_S2IO_UNI:
3273         case PCI_DEVICE_ID_S2IO_WIN:
3274                 return XFRAME_I_DEVICE;
3275         default:
3276                 return PCI_ANY_ID;
3277         }
3278 }
3279
3280 /**
3281  *  s2io_reset - Resets the card.
3282  *  @sp : private member of the device structure.
3283  *  Description: Function to Reset the card. This function then also
3284  *  restores the previously saved PCI configuration space registers as
3285  *  the card reset also resets the configuration space.
3286  *  Return value:
3287  *  void.
3288  */
3289
3290 static void s2io_reset(struct s2io_nic * sp)
3291 {
3292         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3293         u64 val64;
3294         u16 subid, pci_cmd;
3295         int i;
3296         u16 val16;
3297         DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3298                         __FUNCTION__, sp->dev->name);
3299
3300         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3301         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3302
3303         if (sp->device_type == XFRAME_II_DEVICE) {
3304                 int ret;
3305                 ret = pci_set_power_state(sp->pdev, 3);
3306                 if (!ret)
3307                         ret = pci_set_power_state(sp->pdev, 0);
3308                 else {
3309                         DBG_PRINT(ERR_DBG,"%s PME based SW_Reset failed!\n",
3310                                         __FUNCTION__);
3311                         goto old_way;
3312                 }
3313                 msleep(20);
3314                 goto new_way;
3315         }
3316 old_way:
3317         val64 = SW_RESET_ALL;
3318         writeq(val64, &bar0->sw_reset);
3319 new_way:
3320         if (strstr(sp->product_name, "CX4")) {
3321                 msleep(750);
3322         }
3323         msleep(250);
3324         for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3325
3326                 /* Restore the PCI state saved during initialization. */
3327                 pci_restore_state(sp->pdev);
3328                 pci_read_config_word(sp->pdev, 0x2, &val16);
3329                 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3330                         break;
3331                 msleep(200);
3332         }
3333
3334         if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3335                 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3336         }
3337
3338         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3339
3340         s2io_init_pci(sp);
3341
3342         /* Set swapper to enable I/O register access */
3343         s2io_set_swapper(sp);
3344
3345         /* Restore the MSIX table entries from local variables */
3346         restore_xmsi_data(sp);
3347
3348         /* Clear certain PCI/PCI-X fields after reset */
3349         if (sp->device_type == XFRAME_II_DEVICE) {
3350                 /* Clear "detected parity error" bit */
3351                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3352
3353                 /* Clearing PCIX Ecc status register */
3354                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3355
3356                 /* Clearing PCI_STATUS error reflected here */
3357                 writeq(BIT(62), &bar0->txpic_int_reg);
3358         }
3359
3360         /* Reset device statistics maintained by OS */
3361         memset(&sp->stats, 0, sizeof (struct net_device_stats));
3362
3363         /* SXE-002: Configure link and activity LED to turn it off */
3364         subid = sp->pdev->subsystem_device;
3365         if (((subid & 0xFF) >= 0x07) &&
3366             (sp->device_type == XFRAME_I_DEVICE)) {
3367                 val64 = readq(&bar0->gpio_control);
3368                 val64 |= 0x0000800000000000ULL;
3369                 writeq(val64, &bar0->gpio_control);
3370                 val64 = 0x0411040400000000ULL;
3371                 writeq(val64, (void __iomem *)bar0 + 0x2700);
3372         }
3373
3374         /*
3375          * Clear spurious ECC interrupts that would have occured on
3376          * XFRAME II cards after reset.
3377          */
3378         if (sp->device_type == XFRAME_II_DEVICE) {
3379                 val64 = readq(&bar0->pcc_err_reg);
3380                 writeq(val64, &bar0->pcc_err_reg);
3381         }
3382
3383         sp->device_enabled_once = FALSE;
3384 }
3385
3386 /**
3387  *  s2io_set_swapper - to set the swapper controle on the card
3388  *  @sp : private member of the device structure,
3389  *  pointer to the s2io_nic structure.
3390  *  Description: Function to set the swapper control on the card
3391  *  correctly depending on the 'endianness' of the system.
3392  *  Return value:
3393  *  SUCCESS on success and FAILURE on failure.
3394  */
3395
3396 static int s2io_set_swapper(struct s2io_nic * sp)
3397 {
3398         struct net_device *dev = sp->dev;
3399         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3400         u64 val64, valt, valr;
3401
3402         /*
3403          * Set proper endian settings and verify the same by reading
3404          * the PIF Feed-back register.
3405          */
3406
3407         val64 = readq(&bar0->pif_rd_swapper_fb);
3408         if (val64 != 0x0123456789ABCDEFULL) {
3409                 int i = 0;
3410                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3411                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
3412                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
3413                                 0};                     /* FE=0, SE=0 */
3414
3415                 while(i<4) {
3416                         writeq(value[i], &bar0->swapper_ctrl);
3417                         val64 = readq(&bar0->pif_rd_swapper_fb);
3418                         if (val64 == 0x0123456789ABCDEFULL)
3419                                 break;
3420                         i++;
3421                 }
3422                 if (i == 4) {
3423                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3424                                 dev->name);
3425                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3426                                 (unsigned long long) val64);
3427                         return FAILURE;
3428                 }
3429                 valr = value[i];
3430         } else {
3431                 valr = readq(&bar0->swapper_ctrl);
3432         }
3433
3434         valt = 0x0123456789ABCDEFULL;
3435         writeq(valt, &bar0->xmsi_address);
3436         val64 = readq(&bar0->xmsi_address);
3437
3438         if(val64 != valt) {
3439                 int i = 0;
3440                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3441                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
3442                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
3443                                 0};                     /* FE=0, SE=0 */
3444
3445                 while(i<4) {
3446                         writeq((value[i] | valr), &bar0->swapper_ctrl);
3447                         writeq(valt, &bar0->xmsi_address);
3448                         val64 = readq(&bar0->xmsi_address);
3449                         if(val64 == valt)
3450                                 break;
3451                         i++;
3452                 }
3453                 if(i == 4) {
3454                         unsigned long long x = val64;
3455                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3456                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3457                         return FAILURE;
3458                 }
3459         }
3460         val64 = readq(&bar0->swapper_ctrl);
3461         val64 &= 0xFFFF000000000000ULL;
3462
3463 #ifdef  __BIG_ENDIAN
3464         /*
3465          * The device by default set to a big endian format, so a
3466          * big endian driver need not set anything.
3467          */
3468         val64 |= (SWAPPER_CTRL_TXP_FE |
3469                  SWAPPER_CTRL_TXP_SE |
3470                  SWAPPER_CTRL_TXD_R_FE |
3471                  SWAPPER_CTRL_TXD_W_FE |
3472                  SWAPPER_CTRL_TXF_R_FE |
3473                  SWAPPER_CTRL_RXD_R_FE |
3474                  SWAPPER_CTRL_RXD_W_FE |
3475                  SWAPPER_CTRL_RXF_W_FE |
3476                  SWAPPER_CTRL_XMSI_FE |
3477                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3478         if (sp->intr_type == INTA)
3479                 val64 |= SWAPPER_CTRL_XMSI_SE;
3480         writeq(val64, &bar0->swapper_ctrl);
3481 #else
3482         /*
3483          * Initially we enable all bits to make it accessible by the
3484          * driver, then we selectively enable only those bits that
3485          * we want to set.
3486          */
3487         val64 |= (SWAPPER_CTRL_TXP_FE |
3488                  SWAPPER_CTRL_TXP_SE |
3489                  SWAPPER_CTRL_TXD_R_FE |
3490                  SWAPPER_CTRL_TXD_R_SE |
3491                  SWAPPER_CTRL_TXD_W_FE |
3492                  SWAPPER_CTRL_TXD_W_SE |
3493                  SWAPPER_CTRL_TXF_R_FE |
3494                  SWAPPER_CTRL_RXD_R_FE |
3495                  SWAPPER_CTRL_RXD_R_SE |
3496                  SWAPPER_CTRL_RXD_W_FE |
3497                  SWAPPER_CTRL_RXD_W_SE |
3498                  SWAPPER_CTRL_RXF_W_FE |
3499                  SWAPPER_CTRL_XMSI_FE |
3500                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3501         if (sp->intr_type == INTA)
3502                 val64 |= SWAPPER_CTRL_XMSI_SE;
3503         writeq(val64, &bar0->swapper_ctrl);
3504 #endif
3505         val64 = readq(&bar0->swapper_ctrl);
3506
3507         /*
3508          * Verifying if endian settings are accurate by reading a
3509          * feedback register.
3510          */
3511         val64 = readq(&bar0->pif_rd_swapper_fb);
3512         if (val64 != 0x0123456789ABCDEFULL) {
3513                 /* Endian settings are incorrect, calls for another dekko. */
3514                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3515                           dev->name);
3516                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3517                           (unsigned long long) val64);
3518                 return FAILURE;
3519         }
3520
3521         return SUCCESS;
3522 }
3523
3524 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3525 {
3526         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3527         u64 val64;
3528         int ret = 0, cnt = 0;
3529
3530         do {
3531                 val64 = readq(&bar0->xmsi_access);
3532                 if (!(val64 & BIT(15)))
3533                         break;
3534                 mdelay(1);
3535                 cnt++;
3536         } while(cnt < 5);
3537         if (cnt == 5) {
3538                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3539                 ret = 1;
3540         }
3541
3542         return ret;
3543 }
3544
3545 static void restore_xmsi_data(struct s2io_nic *nic)
3546 {
3547         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3548         u64 val64;
3549         int i;
3550
3551         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3552                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3553                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3554                 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3555                 writeq(val64, &bar0->xmsi_access);
3556                 if (wait_for_msix_trans(nic, i)) {
3557                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3558                         continue;
3559                 }
3560         }
3561 }
3562
3563 static void store_xmsi_data(struct s2io_nic *nic)
3564 {
3565         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3566         u64 val64, addr, data;
3567         int i;
3568
3569         /* Store and display */
3570         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3571                 val64 = (BIT(15) | vBIT(i, 26, 6));
3572                 writeq(val64, &bar0->xmsi_access);
3573                 if (wait_for_msix_trans(nic, i)) {
3574                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3575                         continue;
3576                 }
3577                 addr = readq(&bar0->xmsi_address);
3578                 data = readq(&bar0->xmsi_data);
3579                 if (addr && data) {
3580                         nic->msix_info[i].addr = addr;
3581                         nic->msix_info[i].data = data;
3582                 }
3583         }
3584 }
3585
3586 int s2io_enable_msi(struct s2io_nic *nic)
3587 {
3588         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3589         u16 msi_ctrl, msg_val;
3590         struct config_param *config = &nic->config;
3591         struct net_device *dev = nic->dev;
3592         u64 val64, tx_mat, rx_mat;
3593         int i, err;
3594
3595         val64 = readq(&bar0->pic_control);
3596         val64 &= ~BIT(1);
3597         writeq(val64, &bar0->pic_control);
3598
3599         err = pci_enable_msi(nic->pdev);
3600         if (err) {
3601                 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3602                           nic->dev->name);
3603                 return err;
3604         }
3605
3606         /*
3607          * Enable MSI and use MSI-1 in stead of the standard MSI-0
3608          * for interrupt handling.
3609          */
3610         pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3611         msg_val ^= 0x1;
3612         pci_write_config_word(nic->pdev, 0x4c, msg_val);
3613         pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3614
3615         pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3616         msi_ctrl |= 0x10;
3617         pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3618
3619         /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3620         tx_mat = readq(&bar0->tx_mat0_n[0]);
3621         for (i=0; i<config->tx_fifo_num; i++) {
3622                 tx_mat |= TX_MAT_SET(i, 1);
3623         }
3624         writeq(tx_mat, &bar0->tx_mat0_n[0]);
3625
3626         rx_mat = readq(&bar0->rx_mat);
3627         for (i=0; i<config->rx_ring_num; i++) {
3628                 rx_mat |= RX_MAT_SET(i, 1);
3629         }
3630         writeq(rx_mat, &bar0->rx_mat);
3631
3632         dev->irq = nic->pdev->irq;
3633         return 0;
3634 }
3635
3636 static int s2io_enable_msi_x(struct s2io_nic *nic)
3637 {
3638         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3639         u64 tx_mat, rx_mat;
3640         u16 msi_control; /* Temp variable */
3641         int ret, i, j, msix_indx = 1;
3642
3643         nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3644                                GFP_KERNEL);
3645         if (nic->entries == NULL) {
3646                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3647                 return -ENOMEM;
3648         }
3649         memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3650
3651         nic->s2io_entries =
3652                 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3653                                    GFP_KERNEL);
3654         if (nic->s2io_entries == NULL) {
3655                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3656                 kfree(nic->entries);
3657                 return -ENOMEM;
3658         }
3659         memset(nic->s2io_entries, 0,
3660                MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3661
3662         for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3663                 nic->entries[i].entry = i;
3664                 nic->s2io_entries[i].entry = i;
3665                 nic->s2io_entries[i].arg = NULL;
3666                 nic->s2io_entries[i].in_use = 0;
3667         }
3668
3669         tx_mat = readq(&bar0->tx_mat0_n[0]);
3670         for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3671                 tx_mat |= TX_MAT_SET(i, msix_indx);
3672                 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3673                 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3674                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3675         }
3676         writeq(tx_mat, &bar0->tx_mat0_n[0]);
3677
3678         if (!nic->config.bimodal) {
3679                 rx_mat = readq(&bar0->rx_mat);
3680                 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3681                         rx_mat |= RX_MAT_SET(j, msix_indx);
3682                         nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3683                         nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3684                         nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3685                 }
3686                 writeq(rx_mat, &bar0->rx_mat);
3687         } else {
3688                 tx_mat = readq(&bar0->tx_mat0_n[7]);
3689                 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3690                         tx_mat |= TX_MAT_SET(i, msix_indx);
3691                         nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3692                         nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3693                         nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3694                 }
3695                 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3696         }
3697
3698         nic->avail_msix_vectors = 0;
3699         ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3700         /* We fail init if error or we get less vectors than min required */
3701         if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3702                 nic->avail_msix_vectors = ret;
3703                 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3704         }
3705         if (ret) {
3706                 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3707                 kfree(nic->entries);
3708                 kfree(nic->s2io_entries);
3709                 nic->entries = NULL;
3710                 nic->s2io_entries = NULL;
3711                 nic->avail_msix_vectors = 0;
3712                 return -ENOMEM;
3713         }
3714         if (!nic->avail_msix_vectors)
3715                 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3716
3717         /*
3718          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3719          * in the herc NIC. (Temp change, needs to be removed later)
3720          */
3721         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3722         msi_control |= 0x1; /* Enable MSI */
3723         pci_write_config_word(nic->pdev, 0x42, msi_control);
3724
3725         return 0;
3726 }
3727
3728 /* ********************************************************* *
3729  * Functions defined below concern the OS part of the driver *
3730  * ********************************************************* */
3731
3732 /**
3733  *  s2io_open - open entry point of the driver
3734  *  @dev : pointer to the device structure.
3735  *  Description:
3736  *  This function is the open entry point of the driver. It mainly calls a
3737  *  function to allocate Rx buffers and inserts them into the buffer
3738  *  descriptors and then enables the Rx part of the NIC.
3739  *  Return value:
3740  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3741  *   file on failure.
3742  */
3743
3744 static int s2io_open(struct net_device *dev)
3745 {
3746         struct s2io_nic *sp = dev->priv;
3747         int err = 0;
3748
3749         /*
3750          * Make sure you have link off by default every time
3751          * Nic is initialized
3752          */
3753         netif_carrier_off(dev);
3754         sp->last_link_state = 0;
3755
3756         /* Initialize H/W and enable interrupts */
3757         err = s2io_card_up(sp);
3758         if (err) {
3759                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3760                           dev->name);
3761                 goto hw_init_failed;
3762         }
3763
3764         if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3765                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3766                 s2io_card_down(sp);
3767                 err = -ENODEV;
3768                 goto hw_init_failed;
3769         }
3770
3771         netif_start_queue(dev);
3772         return 0;
3773
3774 hw_init_failed:
3775         if (sp->intr_type == MSI_X) {
3776                 if (sp->entries)
3777                         kfree(sp->entries);
3778                 if (sp->s2io_entries)
3779                         kfree(sp->s2io_entries);
3780         }
3781         return err;
3782 }
3783
3784 /**
3785  *  s2io_close -close entry point of the driver
3786  *  @dev : device pointer.
3787  *  Description:
3788  *  This is the stop entry point of the driver. It needs to undo exactly
3789  *  whatever was done by the open entry point,thus it's usually referred to
3790  *  as the close function.Among other things this function mainly stops the
3791  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3792  *  Return value:
3793  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3794  *  file on failure.
3795  */
3796
3797 static int s2io_close(struct net_device *dev)
3798 {
3799         struct s2io_nic *sp = dev->priv;
3800
3801         netif_stop_queue(dev);
3802         /* Reset card, kill tasklet and free Tx and Rx buffers. */
3803         s2io_card_down(sp);
3804
3805         sp->device_close_flag = TRUE;   /* Device is shut down. */
3806         return 0;
3807 }
3808
3809 /**
3810  *  s2io_xmit - Tx entry point of te driver
3811  *  @skb : the socket buffer containing the Tx data.
3812  *  @dev : device pointer.
3813  *  Description :
3814  *  This function is the Tx entry point of the driver. S2IO NIC supports
3815  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
3816  *  NOTE: when device cant queue the pkt,just the trans_start variable will
3817  *  not be upadted.
3818  *  Return value:
3819  *  0 on success & 1 on failure.
3820  */
3821
3822 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3823 {
3824         struct s2io_nic *sp = dev->priv;
3825         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3826         register u64 val64;
3827         struct TxD *txdp;
3828         struct TxFIFO_element __iomem *tx_fifo;
3829         unsigned long flags;
3830         u16 vlan_tag = 0;
3831         int vlan_priority = 0;
3832         struct mac_info *mac_control;
3833         struct config_param *config;
3834         int offload_type;
3835
3836         mac_control = &sp->mac_control;
3837         config = &sp->config;
3838
3839         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3840         spin_lock_irqsave(&sp->tx_lock, flags);
3841         if (atomic_read(&sp->card_state) == CARD_DOWN) {
3842                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3843                           dev->name);
3844                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3845                 dev_kfree_skb(skb);
3846                 return 0;
3847         }
3848
3849         queue = 0;
3850
3851         /* Get Fifo number to Transmit based on vlan priority */
3852         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3853                 vlan_tag = vlan_tx_tag_get(skb);
3854                 vlan_priority = vlan_tag >> 13;
3855                 queue = config->fifo_mapping[vlan_priority];
3856         }
3857
3858         put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3859         get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3860         txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
3861                 list_virt_addr;
3862
3863         queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3864         /* Avoid "put" pointer going beyond "get" pointer */
3865         if (txdp->Host_Control ||
3866                    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
3867                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3868                 netif_stop_queue(dev);
3869                 dev_kfree_skb(skb);
3870                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3871                 return 0;
3872         }
3873
3874         /* A buffer with no data will be dropped */
3875         if (!skb->len) {
3876                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3877                 dev_kfree_skb(skb);
3878                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3879                 return 0;
3880         }
3881
3882         offload_type = s2io_offload_type(skb);
3883         if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3884                 txdp->Control_1 |= TXD_TCP_LSO_EN;
3885                 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
3886         }
3887         if (skb->ip_summed == CHECKSUM_PARTIAL) {
3888                 txdp->Control_2 |=
3889                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3890                      TXD_TX_CKO_UDP_EN);
3891         }
3892         txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
3893         txdp->Control_1 |= TXD_LIST_OWN_XENA;
3894         txdp->Control_2 |= config->tx_intr_type;
3895
3896         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3897                 txdp->Control_2 |= TXD_VLAN_ENABLE;
3898                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3899         }
3900
3901         frg_len = skb->len - skb->data_len;
3902         if (offload_type == SKB_GSO_UDP) {
3903                 int ufo_size;
3904
3905                 ufo_size = s2io_udp_mss(skb);
3906                 ufo_size &= ~7;
3907                 txdp->Control_1 |= TXD_UFO_EN;
3908                 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
3909                 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
3910 #ifdef __BIG_ENDIAN
3911                 sp->ufo_in_band_v[put_off] =
3912                                 (u64)skb_shinfo(skb)->ip6_frag_id;
3913 #else
3914                 sp->ufo_in_band_v[put_off] =
3915                                 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
3916 #endif
3917                 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
3918                 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
3919                                         sp->ufo_in_band_v,
3920                                         sizeof(u64), PCI_DMA_TODEVICE);
3921                 txdp++;
3922         }
3923
3924         txdp->Buffer_Pointer = pci_map_single
3925             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3926         txdp->Host_Control = (unsigned long) skb;
3927         txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
3928         if (offload_type == SKB_GSO_UDP)
3929                 txdp->Control_1 |= TXD_UFO_EN;
3930
3931         frg_cnt = skb_shinfo(skb)->nr_frags;
3932         /* For fragmented SKB. */
3933         for (i = 0; i < frg_cnt; i++) {
3934                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3935                 /* A '0' length fragment will be ignored */
3936                 if (!frag->size)
3937                         continue;
3938                 txdp++;
3939                 txdp->Buffer_Pointer = (u64) pci_map_page
3940                     (sp->pdev, frag->page, frag->page_offset,
3941                      frag->size, PCI_DMA_TODEVICE);
3942                 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
3943                 if (offload_type == SKB_GSO_UDP)
3944                         txdp->Control_1 |= TXD_UFO_EN;
3945         }
3946         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3947
3948         if (offload_type == SKB_GSO_UDP)
3949                 frg_cnt++; /* as Txd0 was used for inband header */
3950
3951         tx_fifo = mac_control->tx_FIFO_start[queue];
3952         val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
3953         writeq(val64, &tx_fifo->TxDL_Pointer);
3954
3955         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3956                  TX_FIFO_LAST_LIST);
3957         if (offload_type)
3958                 val64 |= TX_FIFO_SPECIAL_FUNC;
3959
3960         writeq(val64, &tx_fifo->List_Control);
3961
3962         mmiowb();
3963
3964         put_off++;
3965         if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
3966                 put_off = 0;
3967         mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
3968
3969         /* Avoid "put" pointer going beyond "get" pointer */
3970         if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
3971                 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
3972                 DBG_PRINT(TX_DBG,
3973                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3974                           put_off, get_off);
3975                 netif_stop_queue(dev);
3976         }
3977
3978         dev->trans_start = jiffies;
3979         spin_unlock_irqrestore(&sp->tx_lock, flags);
3980
3981         return 0;
3982 }
3983
3984 static void
3985 s2io_alarm_handle(unsigned long data)
3986 {
3987         struct s2io_nic *sp = (struct s2io_nic *)data;
3988
3989         alarm_intr_handler(sp);
3990         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3991 }
3992
3993 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
3994 {
3995         int rxb_size, level;
3996
3997         if (!sp->lro) {
3998                 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
3999                 level = rx_buffer_level(sp, rxb_size, rng_n);
4000
4001                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4002                         int ret;
4003                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4004                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
4005                         if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4006                                 DBG_PRINT(ERR_DBG, "Out of memory in %s",
4007                                           __FUNCTION__);
4008                                 clear_bit(0, (&sp->tasklet_status));
4009                                 return -1;
4010                         }
4011                         clear_bit(0, (&sp->tasklet_status));
4012                 } else if (level == LOW)
4013                         tasklet_schedule(&sp->task);
4014
4015         } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4016                         DBG_PRINT(ERR_DBG, "%s:Out of memory", sp->dev->name);
4017                         DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4018         }
4019         return 0;
4020 }
4021
4022 static irqreturn_t s2io_msi_handle(int irq, void *dev_id)
4023 {
4024         struct net_device *dev = (struct net_device *) dev_id;
4025         struct s2io_nic *sp = dev->priv;
4026         int i;
4027         struct mac_info *mac_control;
4028         struct config_param *config;
4029
4030         atomic_inc(&sp->isr_cnt);
4031         mac_control = &sp->mac_control;
4032         config = &sp->config;
4033         DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
4034
4035         /* If Intr is because of Rx Traffic */
4036         for (i = 0; i < config->rx_ring_num; i++)
4037                 rx_intr_handler(&mac_control->rings[i]);
4038
4039         /* If Intr is because of Tx Traffic */
4040         for (i = 0; i < config->tx_fifo_num; i++)
4041                 tx_intr_handler(&mac_control->fifos[i]);
4042
4043         /*
4044          * If the Rx buffer count is below the panic threshold then
4045          * reallocate the buffers from the interrupt handler itself,
4046          * else schedule a tasklet to reallocate the buffers.
4047          */
4048         for (i = 0; i < config->rx_ring_num; i++)
4049                 s2io_chk_rx_buffers(sp, i);
4050
4051         atomic_dec(&sp->isr_cnt);
4052         return IRQ_HANDLED;
4053 }
4054
4055 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4056 {
4057         struct ring_info *ring = (struct ring_info *)dev_id;
4058         struct s2io_nic *sp = ring->nic;
4059
4060         atomic_inc(&sp->isr_cnt);
4061
4062         rx_intr_handler(ring);
4063         s2io_chk_rx_buffers(sp, ring->ring_no);
4064
4065         atomic_dec(&sp->isr_cnt);
4066         return IRQ_HANDLED;
4067 }
4068
4069 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4070 {
4071         struct fifo_info *fifo = (struct fifo_info *)dev_id;
4072         struct s2io_nic *sp = fifo->nic;
4073
4074         atomic_inc(&sp->isr_cnt);
4075         tx_intr_handler(fifo);
4076         atomic_dec(&sp->isr_cnt);
4077         return IRQ_HANDLED;
4078 }
4079 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4080 {
4081         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4082         u64 val64;
4083
4084         val64 = readq(&bar0->pic_int_status);
4085         if (val64 & PIC_INT_GPIO) {
4086                 val64 = readq(&bar0->gpio_int_reg);
4087                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4088                     (val64 & GPIO_INT_REG_LINK_UP)) {
4089                         /*
4090                          * This is unstable state so clear both up/down
4091                          * interrupt and adapter to re-evaluate the link state.
4092                          */
4093                         val64 |=  GPIO_INT_REG_LINK_DOWN;
4094                         val64 |= GPIO_INT_REG_LINK_UP;
4095                         writeq(val64, &bar0->gpio_int_reg);
4096                         val64 = readq(&bar0->gpio_int_mask);
4097                         val64 &= ~(GPIO_INT_MASK_LINK_UP |
4098                                    GPIO_INT_MASK_LINK_DOWN);
4099                         writeq(val64, &bar0->gpio_int_mask);
4100                 }
4101                 else if (val64 & GPIO_INT_REG_LINK_UP) {
4102                         val64 = readq(&bar0->adapter_status);
4103                                 /* Enable Adapter */
4104                         val64 = readq(&bar0->adapter_control);
4105                         val64 |= ADAPTER_CNTL_EN;
4106                         writeq(val64, &bar0->adapter_control);
4107                         val64 |= ADAPTER_LED_ON;
4108                         writeq(val64, &bar0->adapter_control);
4109                         if (!sp->device_enabled_once)
4110                                 sp->device_enabled_once = 1;
4111
4112                         s2io_link(sp, LINK_UP);
4113                         /*
4114                          * unmask link down interrupt and mask link-up
4115                          * intr
4116                          */
4117                         val64 = readq(&bar0->gpio_int_mask);
4118                         val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4119                         val64 |= GPIO_INT_MASK_LINK_UP;
4120                         writeq(val64, &bar0->gpio_int_mask);
4121
4122                 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4123                         val64 = readq(&bar0->adapter_status);
4124                         s2io_link(sp, LINK_DOWN);
4125                         /* Link is down so unmaks link up interrupt */
4126                         val64 = readq(&bar0->gpio_int_mask);
4127                         val64 &= ~GPIO_INT_MASK_LINK_UP;
4128                         val64 |= GPIO_INT_MASK_LINK_DOWN;
4129                         writeq(val64, &bar0->gpio_int_mask);
4130
4131                         /* turn off LED */
4132                         val64 = readq(&bar0->adapter_control);
4133                         val64 = val64 &(~ADAPTER_LED_ON);
4134                         writeq(val64, &bar0->adapter_control);
4135                 }
4136         }
4137         val64 = readq(&bar0->gpio_int_mask);
4138 }
4139
4140 /**
4141  *  s2io_isr - ISR handler of the device .
4142  *  @irq: the irq of the device.
4143  *  @dev_id: a void pointer to the dev structure of the NIC.
4144  *  Description:  This function is the ISR handler of the device. It
4145  *  identifies the reason for the interrupt and calls the relevant
4146  *  service routines. As a contongency measure, this ISR allocates the
4147  *  recv buffers, if their numbers are below the panic value which is
4148  *  presently set to 25% of the original number of rcv buffers allocated.
4149  *  Return value:
4150  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4151  *   IRQ_NONE: will be returned if interrupt is not from our device
4152  */
4153 static irqreturn_t s2io_isr(int irq, void *dev_id)
4154 {
4155         struct net_device *dev = (struct net_device *) dev_id;
4156         struct s2io_nic *sp = dev->priv;
4157         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4158         int i;
4159         u64 reason = 0;
4160         struct mac_info *mac_control;
4161         struct config_param *config;
4162
4163         atomic_inc(&sp->isr_cnt);
4164         mac_control = &sp->mac_control;
4165         config = &sp->config;
4166
4167         /*
4168          * Identify the cause for interrupt and call the appropriate
4169          * interrupt handler. Causes for the interrupt could be;
4170          * 1. Rx of packet.
4171          * 2. Tx complete.
4172          * 3. Link down.
4173          * 4. Error in any functional blocks of the NIC.
4174          */
4175         reason = readq(&bar0->general_int_status);
4176
4177         if (!reason) {
4178                 /* The interrupt was not raised by us. */
4179                 atomic_dec(&sp->isr_cnt);
4180                 return IRQ_NONE;
4181         }
4182         else if (unlikely(reason == S2IO_MINUS_ONE) ) {
4183                 /* Disable device and get out */
4184                 atomic_dec(&sp->isr_cnt);
4185                 return IRQ_NONE;
4186         }
4187
4188         if (napi) {
4189                 if (reason & GEN_INTR_RXTRAFFIC) {
4190                         if ( likely ( netif_rx_schedule_prep(dev)) ) {
4191                                 __netif_rx_schedule(dev);
4192                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4193                         }
4194                         else
4195                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4196                 }
4197         } else {
4198                 /*
4199                  * Rx handler is called by default, without checking for the
4200                  * cause of interrupt.
4201                  * rx_traffic_int reg is an R1 register, writing all 1's
4202                  * will ensure that the actual interrupt causing bit get's
4203                  * cleared and hence a read can be avoided.
4204                  */
4205                 if (reason & GEN_INTR_RXTRAFFIC)
4206                         writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4207
4208                 for (i = 0; i < config->rx_ring_num; i++) {
4209                         rx_intr_handler(&mac_control->rings[i]);
4210                 }
4211         }
4212
4213         /*
4214          * tx_traffic_int reg is an R1 register, writing all 1's
4215          * will ensure that the actual interrupt causing bit get's
4216          * cleared and hence a read can be avoided.
4217          */
4218         if (reason & GEN_INTR_TXTRAFFIC)
4219                 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4220
4221         for (i = 0; i < config->tx_fifo_num; i++)
4222                 tx_intr_handler(&mac_control->fifos[i]);
4223
4224         if (reason & GEN_INTR_TXPIC)
4225                 s2io_txpic_intr_handle(sp);
4226         /*
4227          * If the Rx buffer count is below the panic threshold then
4228          * reallocate the buffers from the interrupt handler itself,
4229          * else schedule a tasklet to reallocate the buffers.
4230          */
4231         if (!napi) {
4232                 for (i = 0; i < config->rx_ring_num; i++)
4233                         s2io_chk_rx_buffers(sp, i);
4234         }
4235
4236         writeq(0, &bar0->general_int_mask);
4237         readl(&bar0->general_int_status);
4238
4239         atomic_dec(&sp->isr_cnt);
4240         return IRQ_HANDLED;
4241 }
4242
4243 /**
4244  * s2io_updt_stats -
4245  */
4246 static void s2io_updt_stats(struct s2io_nic *sp)
4247 {
4248         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4249         u64 val64;
4250         int cnt = 0;
4251
4252         if (atomic_read(&sp->card_state) == CARD_UP) {
4253                 /* Apprx 30us on a 133 MHz bus */
4254                 val64 = SET_UPDT_CLICKS(10) |
4255                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4256                 writeq(val64, &bar0->stat_cfg);
4257                 do {
4258                         udelay(100);
4259                         val64 = readq(&bar0->stat_cfg);
4260                         if (!(val64 & BIT(0)))
4261                                 break;
4262                         cnt++;
4263                         if (cnt == 5)
4264                                 break; /* Updt failed */
4265                 } while(1);
4266         } else {
4267                 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
4268         }
4269 }
4270
4271 /**
4272  *  s2io_get_stats - Updates the device statistics structure.
4273  *  @dev : pointer to the device structure.
4274  *  Description:
4275  *  This function updates the device statistics structure in the s2io_nic
4276  *  structure and returns a pointer to the same.
4277  *  Return value:
4278  *  pointer to the updated net_device_stats structure.
4279  */
4280
4281 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4282 {
4283         struct s2io_nic *sp = dev->priv;
4284         struct mac_info *mac_control;
4285         struct config_param *config;
4286
4287
4288         mac_control = &sp->mac_control;
4289         config = &sp->config;
4290
4291         /* Configure Stats for immediate updt */
4292         s2io_updt_stats(sp);
4293
4294         sp->stats.tx_packets =
4295                 le32_to_cpu(mac_control->stats_info->tmac_frms);
4296         sp->stats.tx_errors =
4297                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4298         sp->stats.rx_errors =
4299                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4300         sp->stats.multicast =
4301                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4302         sp->stats.rx_length_errors =
4303                 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4304
4305         return (&sp->stats);
4306 }
4307
4308 /**
4309  *  s2io_set_multicast - entry point for multicast address enable/disable.
4310  *  @dev : pointer to the device structure
4311  *  Description:
4312  *  This function is a driver entry point which gets called by the kernel
4313  *  whenever multicast addresses must be enabled/disabled. This also gets
4314  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4315  *  determine, if multicast address must be enabled or if promiscuous mode
4316  *  is to be disabled etc.
4317  *  Return value:
4318  *  void.
4319  */
4320
4321 static void s2io_set_multicast(struct net_device *dev)
4322 {
4323         int i, j, prev_cnt;
4324         struct dev_mc_list *mclist;
4325         struct s2io_nic *sp = dev->priv;
4326         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4327         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4328             0xfeffffffffffULL;
4329         u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4330         void __iomem *add;
4331
4332         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4333                 /*  Enable all Multicast addresses */
4334                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4335                        &bar0->rmac_addr_data0_mem);
4336                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4337                        &bar0->rmac_addr_data1_mem);
4338                 val64 = RMAC_ADDR_CMD_MEM_WE |
4339                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4340                     RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4341                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4342                 /* Wait till command completes */
4343                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4344                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4345                                         S2IO_BIT_RESET);
4346
4347                 sp->m_cast_flg = 1;
4348                 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4349         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4350                 /*  Disable all Multicast addresses */
4351                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4352                        &bar0->rmac_addr_data0_mem);
4353                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4354                        &bar0->rmac_addr_data1_mem);
4355                 val64 = RMAC_ADDR_CMD_MEM_WE |
4356                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4357                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4358                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4359                 /* Wait till command completes */
4360                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4361                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4362                                         S2IO_BIT_RESET);
4363
4364                 sp->m_cast_flg = 0;
4365                 sp->all_multi_pos = 0;
4366         }
4367
4368         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4369                 /*  Put the NIC into promiscuous mode */
4370                 add = &bar0->mac_cfg;
4371                 val64 = readq(&bar0->mac_cfg);
4372                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4373
4374                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4375                 writel((u32) val64, add);
4376                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4377                 writel((u32) (val64 >> 32), (add + 4));
4378
4379                 if (vlan_tag_strip != 1) {
4380                         val64 = readq(&bar0->rx_pa_cfg);
4381                         val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4382                         writeq(val64, &bar0->rx_pa_cfg);
4383                         vlan_strip_flag = 0;
4384                 }
4385
4386                 val64 = readq(&bar0->mac_cfg);
4387                 sp->promisc_flg = 1;
4388                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4389                           dev->name);
4390         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4391                 /*  Remove the NIC from promiscuous mode */
4392                 add = &bar0->mac_cfg;
4393                 val64 = readq(&bar0->mac_cfg);
4394                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4395
4396                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4397                 writel((u32) val64, add);
4398                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4399                 writel((u32) (val64 >> 32), (add + 4));
4400
4401                 if (vlan_tag_strip != 0) {
4402                         val64 = readq(&bar0->rx_pa_cfg);
4403                         val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4404                         writeq(val64, &bar0->rx_pa_cfg);
4405                         vlan_strip_flag = 1;
4406                 }
4407
4408                 val64 = readq(&bar0->mac_cfg);
4409                 sp->promisc_flg = 0;
4410                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4411                           dev->name);
4412         }
4413
4414         /*  Update individual M_CAST address list */
4415         if ((!sp->m_cast_flg) && dev->mc_count) {
4416                 if (dev->mc_count >
4417                     (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4418                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4419                                   dev->name);
4420                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
4421                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4422                         return;
4423                 }
4424
4425                 prev_cnt = sp->mc_addr_count;
4426                 sp->mc_addr_count = dev->mc_count;
4427
4428                 /* Clear out the previous list of Mc in the H/W. */
4429                 for (i = 0; i < prev_cnt; i++) {
4430                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4431                                &bar0->rmac_addr_data0_mem);
4432                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4433                                 &bar0->rmac_addr_data1_mem);
4434                         val64 = RMAC_ADDR_CMD_MEM_WE |
4435                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4436                             RMAC_ADDR_CMD_MEM_OFFSET
4437                             (MAC_MC_ADDR_START_OFFSET + i);
4438                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4439
4440                         /* Wait for command completes */
4441                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4442                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4443                                         S2IO_BIT_RESET)) {
4444                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4445                                           dev->name);
4446                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4447                                 return;
4448                         }
4449                 }
4450
4451                 /* Create the new Rx filter list and update the same in H/W. */
4452                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4453                      i++, mclist = mclist->next) {
4454                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4455                                ETH_ALEN);
4456                         mac_addr = 0;
4457                         for (j = 0; j < ETH_ALEN; j++) {
4458                                 mac_addr |= mclist->dmi_addr[j];
4459                                 mac_addr <<= 8;
4460                         }
4461                         mac_addr >>= 8;
4462                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4463                                &bar0->rmac_addr_data0_mem);
4464                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4465                                 &bar0->rmac_addr_data1_mem);
4466                         val64 = RMAC_ADDR_CMD_MEM_WE |
4467                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4468                             RMAC_ADDR_CMD_MEM_OFFSET
4469                             (i + MAC_MC_ADDR_START_OFFSET);
4470                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4471
4472                         /* Wait for command completes */
4473                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4474                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4475                                         S2IO_BIT_RESET)) {
4476                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4477                                           dev->name);
4478                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4479                                 return;
4480                         }
4481                 }
4482         }
4483 }
4484
4485 /**
4486  *  s2io_set_mac_addr - Programs the Xframe mac address
4487  *  @dev : pointer to the device structure.
4488  *  @addr: a uchar pointer to the new mac address which is to be set.
4489  *  Description : This procedure will program the Xframe to receive
4490  *  frames with new Mac Address
4491  *  Return value: SUCCESS on success and an appropriate (-)ve integer
4492  *  as defined in errno.h file on failure.
4493  */
4494
4495 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4496 {
4497         struct s2io_nic *sp = dev->priv;
4498         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4499         register u64 val64, mac_addr = 0;
4500         int i;
4501
4502         /*
4503          * Set the new MAC address as the new unicast filter and reflect this
4504          * change on the device address registered with the OS. It will be
4505          * at offset 0.
4506          */
4507         for (i = 0; i < ETH_ALEN; i++) {
4508                 mac_addr <<= 8;
4509                 mac_addr |= addr[i];
4510         }
4511
4512         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4513                &bar0->rmac_addr_data0_mem);
4514
4515         val64 =
4516             RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4517             RMAC_ADDR_CMD_MEM_OFFSET(0);
4518         writeq(val64, &bar0->rmac_addr_cmd_mem);
4519         /* Wait till command completes */
4520         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4521                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
4522                 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4523                 return FAILURE;
4524         }
4525
4526         return SUCCESS;
4527 }
4528
4529 /**
4530  * s2io_ethtool_sset - Sets different link parameters.
4531  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
4532  * @info: pointer to the structure with parameters given by ethtool to set
4533  * link information.
4534  * Description:
4535  * The function sets different link parameters provided by the user onto
4536  * the NIC.
4537  * Return value:
4538  * 0 on success.
4539 */
4540
4541 static int s2io_ethtool_sset(struct net_device *dev,
4542                              struct ethtool_cmd *info)
4543 {
4544         struct s2io_nic *sp = dev->priv;
4545         if ((info->autoneg == AUTONEG_ENABLE) ||
4546             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4547                 return -EINVAL;
4548         else {
4549                 s2io_close(sp->dev);
4550                 s2io_open(sp->dev);
4551         }
4552
4553         return 0;
4554 }
4555
4556 /**
4557  * s2io_ethtol_gset - Return link specific information.
4558  * @sp : private member of the device structure, pointer to the
4559  *      s2io_nic structure.
4560  * @info : pointer to the structure with parameters given by ethtool
4561  * to return link information.
4562  * Description:
4563  * Returns link specific information like speed, duplex etc.. to ethtool.
4564  * Return value :
4565  * return 0 on success.
4566  */
4567
4568 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4569 {
4570         struct s2io_nic *sp = dev->priv;
4571         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4572         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4573         info->port = PORT_FIBRE;
4574         /* info->transceiver?? TODO */
4575
4576         if (netif_carrier_ok(sp->dev)) {
4577                 info->speed = 10000;
4578                 info->duplex = DUPLEX_FULL;
4579         } else {
4580                 info->speed = -1;
4581                 info->duplex = -1;
4582         }
4583
4584         info->autoneg = AUTONEG_DISABLE;
4585         return 0;
4586 }
4587
4588 /**
4589  * s2io_ethtool_gdrvinfo - Returns driver specific information.
4590  * @sp : private member of the device structure, which is a pointer to the
4591  * s2io_nic structure.
4592  * @info : pointer to the structure with parameters given by ethtool to
4593  * return driver information.
4594  * Description:
4595  * Returns driver specefic information like name, version etc.. to ethtool.
4596  * Return value:
4597  *  void
4598  */
4599
4600 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4601                                   struct ethtool_drvinfo *info)
4602 {
4603         struct s2io_nic *sp = dev->priv;
4604
4605         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4606         strncpy(info->version, s2io_driver_version, sizeof(info->version));
4607         strncpy(info->fw_version, "", sizeof(info->fw_version));
4608         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
4609         info->regdump_len = XENA_REG_SPACE;
4610         info->eedump_len = XENA_EEPROM_SPACE;
4611         info->testinfo_len = S2IO_TEST_LEN;
4612         info->n_stats = S2IO_STAT_LEN;
4613 }
4614
4615 /**
4616  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4617  *  @sp: private member of the device structure, which is a pointer to the
4618  *  s2io_nic structure.
4619  *  @regs : pointer to the structure with parameters given by ethtool for
4620  *  dumping the registers.
4621  *  @reg_space: The input argumnet into which all the registers are dumped.
4622  *  Description:
4623  *  Dumps the entire register space of xFrame NIC into the user given
4624  *  buffer area.
4625  * Return value :
4626  * void .
4627 */
4628
4629 static void s2io_ethtool_gregs(struct net_device *dev,
4630                                struct ethtool_regs *regs, void *space)
4631 {
4632         int i;
4633         u64 reg;
4634         u8 *reg_space = (u8 *) space;
4635         struct s2io_nic *sp = dev->priv;
4636
4637         regs->len = XENA_REG_SPACE;
4638         regs->version = sp->pdev->subsystem_device;
4639
4640         for (i = 0; i < regs->len; i += 8) {
4641                 reg = readq(sp->bar0 + i);
4642                 memcpy((reg_space + i), &reg, 8);
4643         }
4644 }
4645
4646 /**
4647  *  s2io_phy_id  - timer function that alternates adapter LED.
4648  *  @data : address of the private member of the device structure, which
4649  *  is a pointer to the s2io_nic structure, provided as an u32.
4650  * Description: This is actually the timer function that alternates the
4651  * adapter LED bit of the adapter control bit to set/reset every time on
4652  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4653  *  once every second.
4654 */
4655 static void s2io_phy_id(unsigned long data)
4656 {
4657         struct s2io_nic *sp = (struct s2io_nic *) data;
4658         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4659         u64 val64 = 0;
4660         u16 subid;
4661
4662         subid = sp->pdev->subsystem_device;
4663         if ((sp->device_type == XFRAME_II_DEVICE) ||
4664                    ((subid & 0xFF) >= 0x07)) {
4665                 val64 = readq(&bar0->gpio_control);
4666                 val64 ^= GPIO_CTRL_GPIO_0;
4667                 writeq(val64, &bar0->gpio_control);
4668         } else {
4669                 val64 = readq(&bar0->adapter_control);
4670                 val64 ^= ADAPTER_LED_ON;
4671                 writeq(val64, &bar0->adapter_control);
4672         }
4673
4674         mod_timer(&sp->id_timer, jiffies + HZ / 2);
4675 }
4676
4677 /**
4678  * s2io_ethtool_idnic - To physically identify the nic on the system.
4679  * @sp : private member of the device structure, which is a pointer to the
4680  * s2io_nic structure.
4681  * @id : pointer to the structure with identification parameters given by
4682  * ethtool.
4683  * Description: Used to physically identify the NIC on the system.
4684  * The Link LED will blink for a time specified by the user for
4685  * identification.
4686  * NOTE: The Link has to be Up to be able to blink the LED. Hence
4687  * identification is possible only if it's link is up.
4688  * Return value:
4689  * int , returns 0 on success
4690  */
4691
4692 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4693 {
4694         u64 val64 = 0, last_gpio_ctrl_val;
4695         struct s2io_nic *sp = dev->priv;
4696         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4697         u16 subid;
4698
4699         subid = sp->pdev->subsystem_device;
4700         last_gpio_ctrl_val = readq(&bar0->gpio_control);
4701         if ((sp->device_type == XFRAME_I_DEVICE) &&
4702                 ((subid & 0xFF) < 0x07)) {
4703                 val64 = readq(&bar0->adapter_control);
4704                 if (!(val64 & ADAPTER_CNTL_EN)) {
4705                         printk(KERN_ERR
4706                                "Adapter Link down, cannot blink LED\n");
4707                         return -EFAULT;
4708                 }
4709         }
4710         if (sp->id_timer.function == NULL) {
4711                 init_timer(&sp->id_timer);
4712                 sp->id_timer.function = s2io_phy_id;
4713                 sp->id_timer.data = (unsigned long) sp;
4714         }
4715         mod_timer(&sp->id_timer, jiffies);
4716         if (data)
4717                 msleep_interruptible(data * HZ);
4718         else
4719                 msleep_interruptible(MAX_FLICKER_TIME);
4720         del_timer_sync(&sp->id_timer);
4721
4722         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
4723                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4724                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4725         }
4726
4727         return 0;
4728 }
4729
4730 /**
4731  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
4732  * @sp : private member of the device structure, which is a pointer to the
4733  *      s2io_nic structure.
4734  * @ep : pointer to the structure with pause parameters given by ethtool.
4735  * Description:
4736  * Returns the Pause frame generation and reception capability of the NIC.
4737  * Return value:
4738  *  void
4739  */
4740 static void s2io_ethtool_getpause_data(struct net_device *dev,
4741                                        struct ethtool_pauseparam *ep)
4742 {
4743         u64 val64;
4744         struct s2io_nic *sp = dev->priv;
4745         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4746
4747         val64 = readq(&bar0->rmac_pause_cfg);
4748         if (val64 & RMAC_PAUSE_GEN_ENABLE)
4749                 ep->tx_pause = TRUE;
4750         if (val64 & RMAC_PAUSE_RX_ENABLE)
4751                 ep->rx_pause = TRUE;
4752         ep->autoneg = FALSE;
4753 }
4754
4755 /**
4756  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
4757  * @sp : private member of the device structure, which is a pointer to the
4758  *      s2io_nic structure.
4759  * @ep : pointer to the structure with pause parameters given by ethtool.
4760  * Description:
4761  * It can be used to set or reset Pause frame generation or reception
4762  * support of the NIC.
4763  * Return value:
4764  * int, returns 0 on Success
4765  */
4766
4767 static int s2io_ethtool_setpause_data(struct net_device *dev,
4768                                struct ethtool_pauseparam *ep)
4769 {
4770         u64 val64;
4771         struct s2io_nic *sp = dev->priv;
4772         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4773
4774         val64 = readq(&bar0->rmac_pause_cfg);
4775         if (ep->tx_pause)
4776                 val64 |= RMAC_PAUSE_GEN_ENABLE;
4777         else
4778                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4779         if (ep->rx_pause)
4780                 val64 |= RMAC_PAUSE_RX_ENABLE;
4781         else
4782                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4783         writeq(val64, &bar0->rmac_pause_cfg);
4784         return 0;
4785 }
4786
4787 /**
4788  * read_eeprom - reads 4 bytes of data from user given offset.
4789  * @sp : private member of the device structure, which is a pointer to the
4790  *      s2io_nic structure.
4791  * @off : offset at which the data must be written
4792  * @data : Its an output parameter where the data read at the given
4793  *      offset is stored.
4794  * Description:
4795  * Will read 4 bytes of data from the user given offset and return the
4796  * read data.
4797  * NOTE: Will allow to read only part of the EEPROM visible through the
4798  *   I2C bus.
4799  * Return value:
4800  *  -1 on failure and 0 on success.
4801  */
4802
4803 #define S2IO_DEV_ID             5
4804 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
4805 {
4806         int ret = -1;
4807         u32 exit_cnt = 0;
4808         u64 val64;
4809         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4810
4811         if (sp->device_type == XFRAME_I_DEVICE) {
4812                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4813                     I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
4814                     I2C_CONTROL_CNTL_START;
4815                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4816
4817                 while (exit_cnt < 5) {
4818                         val64 = readq(&bar0->i2c_control);
4819                         if (I2C_CONTROL_CNTL_END(val64)) {
4820                                 *data = I2C_CONTROL_GET_DATA(val64);
4821                                 ret = 0;
4822                                 break;
4823                         }
4824                         msleep(50);
4825                         exit_cnt++;
4826                 }
4827         }
4828
4829         if (sp->device_type == XFRAME_II_DEVICE) {
4830                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4831                         SPI_CONTROL_BYTECNT(0x3) |
4832                         SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
4833                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4834                 val64 |= SPI_CONTROL_REQ;
4835                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4836                 while (exit_cnt < 5) {
4837                         val64 = readq(&bar0->spi_control);
4838                         if (val64 & SPI_CONTROL_NACK) {
4839                                 ret = 1;
4840                                 break;
4841                         } else if (val64 & SPI_CONTROL_DONE) {
4842                                 *data = readq(&bar0->spi_data);
4843                                 *data &= 0xffffff;
4844                                 ret = 0;
4845                                 break;
4846                         }
4847                         msleep(50);
4848                         exit_cnt++;
4849                 }
4850         }
4851         return ret;
4852 }
4853
4854 /**
4855  *  write_eeprom - actually writes the relevant part of the data value.
4856  *  @sp : private member of the device structure, which is a pointer to the
4857  *       s2io_nic structure.
4858  *  @off : offset at which the data must be written
4859  *  @data : The data that is to be written
4860  *  @cnt : Number of bytes of the data that are actually to be written into
4861  *  the Eeprom. (max of 3)
4862  * Description:
4863  *  Actually writes the relevant part of the data value into the Eeprom
4864  *  through the I2C bus.
4865  * Return value:
4866  *  0 on success, -1 on failure.
4867  */
4868
4869 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
4870 {
4871         int exit_cnt = 0, ret = -1;
4872         u64 val64;
4873         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4874
4875         if (sp->device_type == XFRAME_I_DEVICE) {
4876                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4877                     I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
4878                     I2C_CONTROL_CNTL_START;
4879                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4880
4881                 while (exit_cnt < 5) {
4882                         val64 = readq(&bar0->i2c_control);
4883                         if (I2C_CONTROL_CNTL_END(val64)) {
4884                                 if (!(val64 & I2C_CONTROL_NACK))
4885                                         ret = 0;
4886                                 break;
4887                         }
4888                         msleep(50);
4889                         exit_cnt++;
4890                 }
4891         }
4892
4893         if (sp->device_type == XFRAME_II_DEVICE) {
4894                 int write_cnt = (cnt == 8) ? 0 : cnt;
4895                 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
4896
4897                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4898                         SPI_CONTROL_BYTECNT(write_cnt) |
4899                         SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
4900                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4901                 val64 |= SPI_CONTROL_REQ;
4902                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4903                 while (exit_cnt < 5) {
4904                         val64 = readq(&bar0->spi_control);
4905                         if (val64 & SPI_CONTROL_NACK) {
4906                                 ret = 1;
4907                                 break;
4908                         } else if (val64 & SPI_CONTROL_DONE) {
4909                                 ret = 0;
4910                                 break;
4911                         }
4912                         msleep(50);
4913                         exit_cnt++;
4914                 }
4915         }
4916         return ret;
4917 }
4918 static void s2io_vpd_read(struct s2io_nic *nic)
4919 {
4920         u8 *vpd_data;
4921         u8 data;
4922         int i=0, cnt, fail = 0;
4923         int vpd_addr = 0x80;
4924
4925         if (nic->device_type == XFRAME_II_DEVICE) {
4926                 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
4927                 vpd_addr = 0x80;
4928         }
4929         else {
4930                 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
4931                 vpd_addr = 0x50;
4932         }
4933         strcpy(nic->serial_num, "NOT AVAILABLE");
4934
4935         vpd_data = kmalloc(256, GFP_KERNEL);
4936         if (!vpd_data)
4937                 return;
4938
4939         for (i = 0; i < 256; i +=4 ) {
4940                 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
4941                 pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
4942                 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
4943                 for (cnt = 0; cnt <5; cnt++) {
4944                         msleep(2);
4945                         pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
4946                         if (data == 0x80)
4947                                 break;
4948                 }
4949                 if (cnt >= 5) {
4950                         DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
4951                         fail = 1;
4952                         break;
4953                 }
4954                 pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
4955                                       (u32 *)&vpd_data[i]);
4956         }
4957
4958         if(!fail) {
4959                 /* read serial number of adapter */
4960                 for (cnt = 0; cnt < 256; cnt++) {
4961                 if ((vpd_data[cnt] == 'S') &&
4962                         (vpd_data[cnt+1] == 'N') &&
4963                         (vpd_data[cnt+2] < VPD_STRING_LEN)) {
4964                                 memset(nic->serial_num, 0, VPD_STRING_LEN);
4965                                 memcpy(nic->serial_num, &vpd_data[cnt + 3],
4966                                         vpd_data[cnt+2]);
4967                                 break;
4968                         }
4969                 }
4970         }
4971
4972         if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
4973                 memset(nic->product_name, 0, vpd_data[1]);
4974                 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
4975         }
4976         kfree(vpd_data);
4977 }
4978
4979 /**
4980  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
4981  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
4982  *  @eeprom : pointer to the user level structure provided by ethtool,
4983  *  containing all relevant information.
4984  *  @data_buf : user defined value to be written into Eeprom.
4985  *  Description: Reads the values stored in the Eeprom at given offset
4986  *  for a given length. Stores these values int the input argument data
4987  *  buffer 'data_buf' and returns these to the caller (ethtool.)
4988  *  Return value:
4989  *  int  0 on success
4990  */
4991
4992 static int s2io_ethtool_geeprom(struct net_device *dev,
4993                          struct ethtool_eeprom *eeprom, u8 * data_buf)
4994 {
4995         u32 i, valid;
4996         u64 data;
4997         struct s2io_nic *sp = dev->priv;
4998
4999         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5000
5001         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5002                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5003
5004         for (i = 0; i < eeprom->len; i += 4) {
5005                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5006                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5007                         return -EFAULT;
5008                 }
5009                 valid = INV(data);
5010                 memcpy((data_buf + i), &valid, 4);
5011         }
5012         return 0;
5013 }
5014
5015 /**
5016  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5017  *  @sp : private member of the device structure, which is a pointer to the
5018  *  s2io_nic structure.
5019  *  @eeprom : pointer to the user level structure provided by ethtool,
5020  *  containing all relevant information.
5021  *  @data_buf ; user defined value to be written into Eeprom.
5022  *  Description:
5023  *  Tries to write the user provided value in the Eeprom, at the offset
5024  *  given by the user.
5025  *  Return value:
5026  *  0 on success, -EFAULT on failure.
5027  */
5028
5029 static int s2io_ethtool_seeprom(struct net_device *dev,
5030                                 struct ethtool_eeprom *eeprom,
5031                                 u8 * data_buf)
5032 {
5033         int len = eeprom->len, cnt = 0;
5034         u64 valid = 0, data;
5035         struct s2io_nic *sp = dev->priv;
5036
5037         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5038                 DBG_PRINT(ERR_DBG,
5039                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5040                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5041                           eeprom->magic);
5042                 return -EFAULT;
5043         }
5044
5045         while (len) {
5046                 data = (u32) data_buf[cnt] & 0x000000FF;
5047                 if (data) {
5048                         valid = (u32) (data << 24);
5049                 } else
5050                         valid = data;
5051
5052                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5053                         DBG_PRINT(ERR_DBG,
5054                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5055                         DBG_PRINT(ERR_DBG,
5056                                   "write into the specified offset\n");
5057                         return -EFAULT;
5058                 }
5059                 cnt++;
5060                 len--;
5061         }
5062
5063         return 0;
5064 }
5065
5066 /**
5067  * s2io_register_test - reads and writes into all clock domains.
5068  * @sp : private member of the device structure, which is a pointer to the
5069  * s2io_nic structure.
5070  * @data : variable that returns the result of each of the test conducted b
5071  * by the driver.
5072  * Description:
5073  * Read and write into all clock domains. The NIC has 3 clock domains,
5074  * see that registers in all the three regions are accessible.
5075  * Return value:
5076  * 0 on success.
5077  */
5078
5079 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5080 {
5081         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5082         u64 val64 = 0, exp_val;
5083         int fail = 0;
5084
5085         val64 = readq(&bar0->pif_rd_swapper_fb);
5086         if (val64 != 0x123456789abcdefULL) {
5087                 fail = 1;
5088                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5089         }
5090
5091         val64 = readq(&bar0->rmac_pause_cfg);
5092         if (val64 != 0xc000ffff00000000ULL) {
5093                 fail = 1;
5094                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5095         }
5096
5097         val64 = readq(&bar0->rx_queue_cfg);
5098         if (sp->device_type == XFRAME_II_DEVICE)
5099                 exp_val = 0x0404040404040404ULL;
5100         else
5101                 exp_val = 0x0808080808080808ULL;
5102         if (val64 != exp_val) {
5103                 fail = 1;
5104                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5105         }
5106
5107         val64 = readq(&bar0->xgxs_efifo_cfg);
5108         if (val64 != 0x000000001923141EULL) {
5109                 fail = 1;
5110                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5111         }
5112
5113         val64 = 0x5A5A5A5A5A5A5A5AULL;
5114         writeq(val64, &bar0->xmsi_data);
5115         val64 = readq(&bar0->xmsi_data);
5116         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5117                 fail = 1;
5118                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5119         }
5120
5121         val64 = 0xA5A5A5A5A5A5A5A5ULL;
5122         writeq(val64, &bar0->xmsi_data);
5123         val64 = readq(&bar0->xmsi_data);
5124         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5125                 fail = 1;
5126                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5127         }
5128
5129         *data = fail;
5130         return fail;
5131 }
5132
5133 /**
5134  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5135  * @sp : private member of the device structure, which is a pointer to the
5136  * s2io_nic structure.
5137  * @data:variable that returns the result of each of the test conducted by
5138  * the driver.
5139  * Description:
5140  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5141  * register.
5142  * Return value:
5143  * 0 on success.
5144  */
5145
5146 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5147 {
5148         int fail = 0;
5149         u64 ret_data, org_4F0, org_7F0;
5150         u8 saved_4F0 = 0, saved_7F0 = 0;
5151         struct net_device *dev = sp->dev;
5152
5153         /* Test Write Error at offset 0 */
5154         /* Note that SPI interface allows write access to all areas
5155          * of EEPROM. Hence doing all negative testing only for Xframe I.
5156          */
5157         if (sp->device_type == XFRAME_I_DEVICE)
5158                 if (!write_eeprom(sp, 0, 0, 3))
5159                         fail = 1;
5160
5161         /* Save current values at offsets 0x4F0 and 0x7F0 */
5162         if (!read_eeprom(sp, 0x4F0, &org_4F0))
5163                 saved_4F0 = 1;
5164         if (!read_eeprom(sp, 0x7F0, &org_7F0))
5165                 saved_7F0 = 1;
5166
5167         /* Test Write at offset 4f0 */
5168         if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5169                 fail = 1;
5170         if (read_eeprom(sp, 0x4F0, &ret_data))
5171                 fail = 1;
5172
5173         if (ret_data != 0x012345) {
5174                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5175                         "Data written %llx Data read %llx\n",
5176                         dev->name, (unsigned long long)0x12345,
5177                         (unsigned long long)ret_data);
5178                 fail = 1;
5179         }
5180
5181         /* Reset the EEPROM data go FFFF */
5182         write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5183
5184         /* Test Write Request Error at offset 0x7c */
5185         if (sp->device_type == XFRAME_I_DEVICE)
5186                 if (!write_eeprom(sp, 0x07C, 0, 3))
5187                         fail = 1;
5188
5189         /* Test Write Request at offset 0x7f0 */
5190         if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5191                 fail = 1;
5192         if (read_eeprom(sp, 0x7F0, &ret_data))
5193                 fail = 1;
5194
5195         if (ret_data != 0x012345) {
5196                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5197                         "Data written %llx Data read %llx\n",
5198                         dev->name, (unsigned long long)0x12345,
5199                         (unsigned long long)ret_data);
5200                 fail = 1;
5201         }
5202
5203         /* Reset the EEPROM data go FFFF */
5204         write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5205
5206         if (sp->device_type == XFRAME_I_DEVICE) {
5207                 /* Test Write Error at offset 0x80 */
5208                 if (!write_eeprom(sp, 0x080, 0, 3))
5209                         fail = 1;
5210
5211                 /* Test Write Error at offset 0xfc */
5212                 if (!write_eeprom(sp, 0x0FC, 0, 3))
5213                         fail = 1;
5214
5215                 /* Test Write Error at offset 0x100 */
5216                 if (!write_eeprom(sp, 0x100, 0, 3))
5217                         fail = 1;
5218
5219                 /* Test Write Error at offset 4ec */
5220                 if (!write_eeprom(sp, 0x4EC, 0, 3))
5221                         fail = 1;
5222         }
5223
5224         /* Restore values at offsets 0x4F0 and 0x7F0 */
5225         if (saved_4F0)
5226                 write_eeprom(sp, 0x4F0, org_4F0, 3);
5227         if (saved_7F0)
5228                 write_eeprom(sp, 0x7F0, org_7F0, 3);
5229
5230         *data = fail;
5231         return fail;
5232 }
5233
5234 /**
5235  * s2io_bist_test - invokes the MemBist test of the card .
5236  * @sp : private member of the device structure, which is a pointer to the
5237  * s2io_nic structure.
5238  * @data:variable that returns the result of each of the test conducted by
5239  * the driver.
5240  * Description:
5241  * This invokes the MemBist test of the card. We give around
5242  * 2 secs time for the Test to complete. If it's still not complete
5243  * within this peiod, we consider that the test failed.
5244  * Return value:
5245  * 0 on success and -1 on failure.
5246  */
5247
5248 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5249 {
5250         u8 bist = 0;
5251         int cnt = 0, ret = -1;
5252
5253         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5254         bist |= PCI_BIST_START;
5255         pci_write_config_word(sp->pdev, PCI_BIST, bist);
5256
5257         while (cnt < 20) {
5258                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5259                 if (!(bist & PCI_BIST_START)) {
5260                         *data = (bist & PCI_BIST_CODE_MASK);
5261                         ret = 0;
5262                         break;
5263                 }
5264                 msleep(100);
5265                 cnt++;
5266         }
5267
5268         return ret;
5269 }
5270
5271 /**
5272  * s2io-link_test - verifies the link state of the nic
5273  * @sp ; private member of the device structure, which is a pointer to the
5274  * s2io_nic structure.
5275  * @data: variable that returns the result of each of the test conducted by
5276  * the driver.
5277  * Description:
5278  * The function verifies the link state of the NIC and updates the input
5279  * argument 'data' appropriately.
5280  * Return value:
5281  * 0 on success.
5282  */
5283
5284 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5285 {
5286         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5287         u64 val64;
5288
5289         val64 = readq(&bar0->adapter_status);
5290         if(!(LINK_IS_UP(val64)))
5291                 *data = 1;
5292         else
5293                 *data = 0;
5294
5295         return *data;
5296 }
5297
5298 /**
5299  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5300  * @sp - private member of the device structure, which is a pointer to the
5301  * s2io_nic structure.
5302  * @data - variable that returns the result of each of the test
5303  * conducted by the driver.
5304  * Description:
5305  *  This is one of the offline test that tests the read and write
5306  *  access to the RldRam chip on the NIC.
5307  * Return value:
5308  *  0 on success.
5309  */
5310
5311 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5312 {
5313         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5314         u64 val64;
5315         int cnt, iteration = 0, test_fail = 0;
5316
5317         val64 = readq(&bar0->adapter_control);
5318         val64 &= ~ADAPTER_ECC_EN;
5319         writeq(val64, &bar0->adapter_control);
5320
5321         val64 = readq(&bar0->mc_rldram_test_ctrl);
5322         val64 |= MC_RLDRAM_TEST_MODE;
5323         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5324
5325         val64 = readq(&bar0->mc_rldram_mrs);
5326         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5327         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5328
5329         val64 |= MC_RLDRAM_MRS_ENABLE;
5330         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5331
5332         while (iteration < 2) {
5333                 val64 = 0x55555555aaaa0000ULL;
5334                 if (iteration == 1) {
5335                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5336                 }
5337                 writeq(val64, &bar0->mc_rldram_test_d0);
5338
5339                 val64 = 0xaaaa5a5555550000ULL;
5340                 if (iteration == 1) {
5341                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5342                 }
5343                 writeq(val64, &bar0->mc_rldram_test_d1);
5344
5345                 val64 = 0x55aaaaaaaa5a0000ULL;
5346                 if (iteration == 1) {
5347                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5348                 }
5349                 writeq(val64, &bar0->mc_rldram_test_d2);
5350
5351                 val64 = (u64) (0x0000003ffffe0100ULL);
5352                 writeq(val64, &bar0->mc_rldram_test_add);
5353
5354                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5355                         MC_RLDRAM_TEST_GO;
5356                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5357
5358                 for (cnt = 0; cnt < 5; cnt++) {
5359                         val64 = readq(&bar0->mc_rldram_test_ctrl);
5360                         if (val64 & MC_RLDRAM_TEST_DONE)
5361                                 break;
5362                         msleep(200);
5363                 }
5364
5365                 if (cnt == 5)
5366                         break;
5367
5368                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5369                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5370
5371                 for (cnt = 0; cnt < 5; cnt++) {
5372                         val64 = readq(&bar0->mc_rldram_test_ctrl);
5373                         if (val64 & MC_RLDRAM_TEST_DONE)
5374                                 break;
5375                         msleep(500);
5376                 }
5377
5378                 if (cnt == 5)
5379                         break;
5380
5381                 val64 = readq(&bar0->mc_rldram_test_ctrl);
5382                 if (!(val64 & MC_RLDRAM_TEST_PASS))
5383                         test_fail = 1;
5384
5385                 iteration++;
5386         }
5387
5388         *data = test_fail;
5389
5390         /* Bring the adapter out of test mode */
5391         SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5392
5393         return test_fail;
5394 }
5395
5396 /**
5397  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5398  *  @sp : private member of the device structure, which is a pointer to the
5399  *  s2io_nic structure.
5400  *  @ethtest : pointer to a ethtool command specific structure that will be
5401  *  returned to the user.
5402  *  @data : variable that returns the result of each of the test
5403  * conducted by the driver.
5404  * Description:
5405  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
5406  *  the health of the card.
5407  * Return value:
5408  *  void
5409  */
5410
5411 static void s2io_ethtool_test(struct net_device *dev,
5412                               struct ethtool_test *ethtest,
5413                               uint64_t * data)
5414 {
5415         struct s2io_nic *sp = dev->priv;
5416         int orig_state = netif_running(sp->dev);
5417
5418         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5419                 /* Offline Tests. */
5420                 if (orig_state)
5421                         s2io_close(sp->dev);
5422
5423                 if (s2io_register_test(sp, &data[0]))
5424                         ethtest->flags |= ETH_TEST_FL_FAILED;
5425
5426                 s2io_reset(sp);
5427
5428                 if (s2io_rldram_test(sp, &data[3]))
5429                         ethtest->flags |= ETH_TEST_FL_FAILED;
5430
5431                 s2io_reset(sp);
5432
5433                 if (s2io_eeprom_test(sp, &data[1]))
5434                         ethtest->flags |= ETH_TEST_FL_FAILED;
5435
5436                 if (s2io_bist_test(sp, &data[4]))
5437                         ethtest->flags |= ETH_TEST_FL_FAILED;
5438
5439                 if (orig_state)
5440                         s2io_open(sp->dev);
5441
5442                 data[2] = 0;
5443         } else {
5444                 /* Online Tests. */
5445                 if (!orig_state) {
5446                         DBG_PRINT(ERR_DBG,
5447                                   "%s: is not up, cannot run test\n",
5448                                   dev->name);
5449                         data[0] = -1;
5450                         data[1] = -1;
5451                         data[2] = -1;
5452                         data[3] = -1;
5453                         data[4] = -1;
5454                 }
5455
5456                 if (s2io_link_test(sp, &data[2]))
5457                         ethtest->flags |= ETH_TEST_FL_FAILED;
5458
5459                 data[0] = 0;
5460                 data[1] = 0;
5461                 data[3] = 0;
5462                 data[4] = 0;
5463         }
5464 }
5465
5466 static void s2io_get_ethtool_stats(struct net_device *dev,
5467                                    struct ethtool_stats *estats,
5468                                    u64 * tmp_stats)
5469 {
5470         int i = 0;
5471         struct s2io_nic *sp = dev->priv;
5472         struct stat_block *stat_info = sp->mac_control.stats_info;
5473
5474         s2io_updt_stats(sp);
5475         tmp_stats[i++] =
5476                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
5477                 le32_to_cpu(stat_info->tmac_frms);
5478         tmp_stats[i++] =
5479                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5480                 le32_to_cpu(stat_info->tmac_data_octets);
5481         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5482         tmp_stats[i++] =
5483                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5484                 le32_to_cpu(stat_info->tmac_mcst_frms);
5485         tmp_stats[i++] =
5486                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5487                 le32_to_cpu(stat_info->tmac_bcst_frms);
5488         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5489         tmp_stats[i++] =
5490                 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5491                 le32_to_cpu(stat_info->tmac_ttl_octets);
5492         tmp_stats[i++] =
5493                 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5494                 le32_to_cpu(stat_info->tmac_ucst_frms);
5495         tmp_stats[i++] =
5496                 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5497                 le32_to_cpu(stat_info->tmac_nucst_frms);
5498         tmp_stats[i++] =
5499                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5500                 le32_to_cpu(stat_info->tmac_any_err_frms);
5501         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5502         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5503         tmp_stats[i++] =
5504                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5505                 le32_to_cpu(stat_info->tmac_vld_ip);
5506         tmp_stats[i++] =
5507                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5508                 le32_to_cpu(stat_info->tmac_drop_ip);
5509         tmp_stats[i++] =
5510                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5511                 le32_to_cpu(stat_info->tmac_icmp);
5512         tmp_stats[i++] =
5513                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5514                 le32_to_cpu(stat_info->tmac_rst_tcp);
5515         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5516         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5517                 le32_to_cpu(stat_info->tmac_udp);
5518         tmp_stats[i++] =
5519                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5520                 le32_to_cpu(stat_info->rmac_vld_frms);
5521         tmp_stats[i++] =
5522                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5523                 le32_to_cpu(stat_info->rmac_data_octets);
5524         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5525         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5526         tmp_stats[i++] =
5527                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5528                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5529         tmp_stats[i++] =
5530                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5531                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5532         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5533         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
5534         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5535         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5536         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5537         tmp_stats[i++] =
5538                 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5539                 le32_to_cpu(stat_info->rmac_ttl_octets);
5540         tmp_stats[i++] =
5541                 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5542                 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5543         tmp_stats[i++] =
5544                 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5545                  << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
5546         tmp_stats[i++] =
5547                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5548                 le32_to_cpu(stat_info->rmac_discarded_frms);
5549         tmp_stats[i++] =
5550                 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5551                  << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5552         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5553         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
5554         tmp_stats[i++] =
5555                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5556                 le32_to_cpu(stat_info->rmac_usized_frms);
5557         tmp_stats[i++] =
5558                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5559                 le32_to_cpu(stat_info->rmac_osized_frms);
5560         tmp_stats[i++] =
5561                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5562                 le32_to_cpu(stat_info->rmac_frag_frms);
5563         tmp_stats[i++] =
5564                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5565                 le32_to_cpu(stat_info->rmac_jabber_frms);
5566         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
5567         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
5568         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
5569         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
5570         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
5571         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
5572         tmp_stats[i++] =
5573                 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
5574                 le32_to_cpu(stat_info->rmac_ip);
5575         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5576         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
5577         tmp_stats[i++] =
5578                 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
5579                 le32_to_cpu(stat_info->rmac_drop_ip);
5580         tmp_stats[i++] =
5581                 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
5582                 le32_to_cpu(stat_info->rmac_icmp);
5583         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
5584         tmp_stats[i++] =
5585                 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
5586                 le32_to_cpu(stat_info->rmac_udp);
5587         tmp_stats[i++] =
5588                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5589                 le32_to_cpu(stat_info->rmac_err_drp_udp);
5590         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
5591         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
5592         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
5593         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
5594         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
5595         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
5596         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
5597         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
5598         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
5599         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
5600         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
5601         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
5602         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
5603         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
5604         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
5605         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
5606         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
5607         tmp_stats[i++] =
5608                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5609                 le32_to_cpu(stat_info->rmac_pause_cnt);
5610         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
5611         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
5612         tmp_stats[i++] =
5613                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5614                 le32_to_cpu(stat_info->rmac_accepted_ip);
5615         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
5616         tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
5617         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
5618         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
5619         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
5620         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
5621         tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
5622         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
5623         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
5624         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
5625         tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
5626         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
5627         tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
5628         tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
5629         tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
5630         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
5631         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5632         tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5633         tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
5634         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5635         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5636         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5637         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5638         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5639         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5640         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5641         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5642         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5643         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5644         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5645         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5646         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5647         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5648         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5649         tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
5650         tmp_stats[i++] = 0;
5651         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5652         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5653         tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
5654         tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
5655         tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
5656         tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
5657         tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
5658         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
5659         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
5660         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
5661         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
5662         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
5663         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
5664         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
5665         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
5666         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
5667         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
5668         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
5669         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
5670         tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5671         tmp_stats[i++] = stat_info->sw_stat.sending_both;
5672         tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5673         tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
5674         if (stat_info->sw_stat.num_aggregations) {
5675                 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5676                 int count = 0;
5677                 /*
5678                  * Since 64-bit divide does not work on all platforms,
5679                  * do repeated subtraction.
5680                  */
5681                 while (tmp >= stat_info->sw_stat.num_aggregations) {
5682                         tmp -= stat_info->sw_stat.num_aggregations;
5683                         count++;
5684                 }
5685                 tmp_stats[i++] = count;
5686         }
5687         else
5688                 tmp_stats[i++] = 0;
5689 }
5690
5691 static int s2io_ethtool_get_regs_len(struct net_device *dev)
5692 {
5693         return (XENA_REG_SPACE);
5694 }
5695
5696
5697 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5698 {
5699         struct s2io_nic *sp = dev->priv;
5700
5701         return (sp->rx_csum);
5702 }
5703
5704 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5705 {
5706         struct s2io_nic *sp = dev->priv;
5707
5708         if (data)
5709                 sp->rx_csum = 1;
5710         else
5711                 sp->rx_csum = 0;
5712
5713         return 0;
5714 }
5715
5716 static int s2io_get_eeprom_len(struct net_device *dev)
5717 {
5718         return (XENA_EEPROM_SPACE);
5719 }
5720
5721 static int s2io_ethtool_self_test_count(struct net_device *dev)
5722 {
5723         return (S2IO_TEST_LEN);
5724 }
5725
5726 static void s2io_ethtool_get_strings(struct net_device *dev,
5727                                      u32 stringset, u8 * data)
5728 {
5729         switch (stringset) {
5730         case ETH_SS_TEST:
5731                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5732                 break;
5733         case ETH_SS_STATS:
5734                 memcpy(data, &ethtool_stats_keys,
5735                        sizeof(ethtool_stats_keys));
5736         }
5737 }
5738 static int s2io_ethtool_get_stats_count(struct net_device *dev)
5739 {
5740         return (S2IO_STAT_LEN);
5741 }
5742
5743 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
5744 {
5745         if (data)
5746                 dev->features |= NETIF_F_IP_CSUM;
5747         else
5748                 dev->features &= ~NETIF_F_IP_CSUM;
5749
5750         return 0;
5751 }
5752
5753 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
5754 {
5755         return (dev->features & NETIF_F_TSO) != 0;
5756 }
5757 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
5758 {
5759         if (data)
5760                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
5761         else
5762                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
5763
5764         return 0;
5765 }
5766
5767 static const struct ethtool_ops netdev_ethtool_ops = {
5768         .get_settings = s2io_ethtool_gset,
5769         .set_settings = s2io_ethtool_sset,
5770         .get_drvinfo = s2io_ethtool_gdrvinfo,
5771         .get_regs_len = s2io_ethtool_get_regs_len,
5772         .get_regs = s2io_ethtool_gregs,
5773         .get_link = ethtool_op_get_link,
5774         .get_eeprom_len = s2io_get_eeprom_len,
5775         .get_eeprom = s2io_ethtool_geeprom,
5776         .set_eeprom = s2io_ethtool_seeprom,
5777         .get_pauseparam = s2io_ethtool_getpause_data,
5778         .set_pauseparam = s2io_ethtool_setpause_data,
5779         .get_rx_csum = s2io_ethtool_get_rx_csum,
5780         .set_rx_csum = s2io_ethtool_set_rx_csum,
5781         .get_tx_csum = ethtool_op_get_tx_csum,
5782         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5783         .get_sg = ethtool_op_get_sg,
5784         .set_sg = ethtool_op_set_sg,
5785         .get_tso = s2io_ethtool_op_get_tso,
5786         .set_tso = s2io_ethtool_op_set_tso,
5787         .get_ufo = ethtool_op_get_ufo,
5788         .set_ufo = ethtool_op_set_ufo,
5789         .self_test_count = s2io_ethtool_self_test_count,
5790         .self_test = s2io_ethtool_test,
5791         .get_strings = s2io_ethtool_get_strings,
5792         .phys_id = s2io_ethtool_idnic,
5793         .get_stats_count = s2io_ethtool_get_stats_count,
5794         .get_ethtool_stats = s2io_get_ethtool_stats
5795 };
5796
5797 /**
5798  *  s2io_ioctl - Entry point for the Ioctl
5799  *  @dev :  Device pointer.
5800  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
5801  *  a proprietary structure used to pass information to the driver.
5802  *  @cmd :  This is used to distinguish between the different commands that
5803  *  can be passed to the IOCTL functions.
5804  *  Description:
5805  *  Currently there are no special functionality supported in IOCTL, hence
5806  *  function always return EOPNOTSUPPORTED
5807  */
5808
5809 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5810 {
5811         return -EOPNOTSUPP;
5812 }
5813
5814 /**
5815  *  s2io_change_mtu - entry point to change MTU size for the device.
5816  *   @dev : device pointer.
5817  *   @new_mtu : the new MTU size for the device.
5818  *   Description: A driver entry point to change MTU size for the device.
5819  *   Before changing the MTU the device must be stopped.
5820  *  Return value:
5821  *   0 on success and an appropriate (-)ve integer as defined in errno.h
5822  *   file on failure.
5823  */
5824
5825 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
5826 {
5827         struct s2io_nic *sp = dev->priv;
5828
5829         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5830                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
5831                           dev->name);
5832                 return -EPERM;
5833         }
5834
5835         dev->mtu = new_mtu;
5836         if (netif_running(dev)) {
5837                 s2io_card_down(sp);
5838                 netif_stop_queue(dev);
5839                 if (s2io_card_up(sp)) {
5840                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5841                                   __FUNCTION__);
5842                 }
5843                 if (netif_queue_stopped(dev))
5844                         netif_wake_queue(dev);
5845         } else { /* Device is down */
5846                 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5847                 u64 val64 = new_mtu;
5848
5849                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
5850         }
5851
5852         return 0;
5853 }
5854
5855 /**
5856  *  s2io_tasklet - Bottom half of the ISR.
5857  *  @dev_adr : address of the device structure in dma_addr_t format.
5858  *  Description:
5859  *  This is the tasklet or the bottom half of the ISR. This is
5860  *  an extension of the ISR which is scheduled by the scheduler to be run
5861  *  when the load on the CPU is low. All low priority tasks of the ISR can
5862  *  be pushed into the tasklet. For now the tasklet is used only to
5863  *  replenish the Rx buffers in the Rx buffer descriptors.
5864  *  Return value:
5865  *  void.
5866  */
5867
5868 static void s2io_tasklet(unsigned long dev_addr)
5869 {
5870         struct net_device *dev = (struct net_device *) dev_addr;
5871         struct s2io_nic *sp = dev->priv;
5872         int i, ret;
5873         struct mac_info *mac_control;
5874         struct config_param *config;
5875
5876         mac_control = &sp->mac_control;
5877         config = &sp->config;
5878
5879         if (!TASKLET_IN_USE) {
5880                 for (i = 0; i < config->rx_ring_num; i++) {
5881                         ret = fill_rx_buffers(sp, i);
5882                         if (ret == -ENOMEM) {
5883                                 DBG_PRINT(ERR_DBG, "%s: Out of ",
5884                                           dev->name);
5885                                 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
5886                                 break;
5887                         } else if (ret == -EFILL) {
5888                                 DBG_PRINT(ERR_DBG,
5889                                           "%s: Rx Ring %d is full\n",
5890                                           dev->name, i);
5891                                 break;
5892                         }
5893                 }
5894                 clear_bit(0, (&sp->tasklet_status));
5895         }
5896 }
5897
5898 /**
5899  * s2io_set_link - Set the LInk status
5900  * @data: long pointer to device private structue
5901  * Description: Sets the link status for the adapter
5902  */
5903
5904 static void s2io_set_link(struct work_struct *work)
5905 {
5906         struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
5907         struct net_device *dev = nic->dev;
5908         struct XENA_dev_config __iomem *bar0 = nic->bar0;
5909         register u64 val64;
5910         u16 subid;
5911
5912         rtnl_lock();
5913
5914         if (!netif_running(dev))
5915                 goto out_unlock;
5916
5917         if (test_and_set_bit(0, &(nic->link_state))) {
5918                 /* The card is being reset, no point doing anything */
5919                 goto out_unlock;
5920         }
5921
5922         subid = nic->pdev->subsystem_device;
5923         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
5924                 /*
5925                  * Allow a small delay for the NICs self initiated
5926                  * cleanup to complete.
5927                  */
5928                 msleep(100);
5929         }
5930
5931         val64 = readq(&bar0->adapter_status);
5932         if (LINK_IS_UP(val64)) {
5933                 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
5934                         if (verify_xena_quiescence(nic)) {
5935                                 val64 = readq(&bar0->adapter_control);
5936                                 val64 |= ADAPTER_CNTL_EN;
5937                                 writeq(val64, &bar0->adapter_control);
5938                                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
5939                                         nic->device_type, subid)) {
5940                                         val64 = readq(&bar0->gpio_control);
5941                                         val64 |= GPIO_CTRL_GPIO_0;
5942                                         writeq(val64, &bar0->gpio_control);
5943                                         val64 = readq(&bar0->gpio_control);
5944                                 } else {
5945                                         val64 |= ADAPTER_LED_ON;
5946                                         writeq(val64, &bar0->adapter_control);
5947                                 }
5948                                 nic->device_enabled_once = TRUE;
5949                         } else {
5950                                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
5951                                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
5952                                 netif_stop_queue(dev);
5953                         }
5954                 }
5955                 val64 = readq(&bar0->adapter_status);
5956                 if (!LINK_IS_UP(val64)) {
5957                         DBG_PRINT(ERR_DBG, "%s:", dev->name);
5958                         DBG_PRINT(ERR_DBG, " Link down after enabling ");
5959                         DBG_PRINT(ERR_DBG, "device \n");
5960                 } else
5961                         s2io_link(nic, LINK_UP);
5962         } else {
5963                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5964                                                       subid)) {
5965                         val64 = readq(&bar0->gpio_control);
5966                         val64 &= ~GPIO_CTRL_GPIO_0;
5967                         writeq(val64, &bar0->gpio_control);
5968                         val64 = readq(&bar0->gpio_control);
5969                 }
5970                 s2io_link(nic, LINK_DOWN);
5971         }
5972         clear_bit(0, &(nic->link_state));
5973
5974 out_unlock:
5975         rtnl_lock();
5976 }
5977
5978 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
5979                                 struct buffAdd *ba,
5980                                 struct sk_buff **skb, u64 *temp0, u64 *temp1,
5981                                 u64 *temp2, int size)
5982 {
5983         struct net_device *dev = sp->dev;
5984         struct sk_buff *frag_list;
5985
5986         if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
5987                 /* allocate skb */
5988                 if (*skb) {
5989                         DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
5990                         /*
5991                          * As Rx frame are not going to be processed,
5992                          * using same mapped address for the Rxd
5993                          * buffer pointer
5994                          */
5995                         ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0;
5996                 } else {
5997                         *skb = dev_alloc_skb(size);
5998                         if (!(*skb)) {
5999                                 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
6000                                 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
6001                                 return -ENOMEM ;
6002                         }
6003                         /* storing the mapped addr in a temp variable
6004                          * such it will be used for next rxd whose
6005                          * Host Control is NULL
6006                          */
6007                         ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0 =
6008                                 pci_map_single( sp->pdev, (*skb)->data,
6009                                         size - NET_IP_ALIGN,
6010                                         PCI_DMA_FROMDEVICE);
6011                         rxdp->Host_Control = (unsigned long) (*skb);
6012                 }
6013         } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6014                 /* Two buffer Mode */
6015                 if (*skb) {
6016                         ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
6017                         ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6018                         ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6019                 } else {
6020                         *skb = dev_alloc_skb(size);
6021                         if (!(*skb)) {
6022                                 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n",
6023                                         dev->name);
6024                                 return -ENOMEM;
6025                         }
6026                         ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
6027                                 pci_map_single(sp->pdev, (*skb)->data,
6028                                                dev->mtu + 4,
6029                                                PCI_DMA_FROMDEVICE);
6030                         ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
6031                                 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6032                                                 PCI_DMA_FROMDEVICE);
6033                         rxdp->Host_Control = (unsigned long) (*skb);
6034
6035                         /* Buffer-1 will be dummy buffer not used */
6036                         ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
6037                                 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6038                                                PCI_DMA_FROMDEVICE);
6039                 }
6040         } else if ((rxdp->Host_Control == 0)) {
6041                 /* Three buffer mode */
6042                 if (*skb) {
6043                         ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6044                         ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6045                         ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
6046                 } else {
6047                         *skb = dev_alloc_skb(size);
6048                         if (!(*skb)) {
6049                                 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n",
6050                                           dev->name);
6051                                 return -ENOMEM;
6052                         }
6053                         ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
6054                                 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6055                                                PCI_DMA_FROMDEVICE);
6056                         /* Buffer-1 receives L3/L4 headers */
6057                         ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
6058                                 pci_map_single( sp->pdev, (*skb)->data,
6059                                                 l3l4hdr_size + 4,
6060                                                 PCI_DMA_FROMDEVICE);
6061                         /*
6062                          * skb_shinfo(skb)->frag_list will have L4
6063                          * data payload
6064                          */
6065                         skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu +
6066                                                                    ALIGN_SIZE);
6067                         if (skb_shinfo(*skb)->frag_list == NULL) {
6068                                 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
6069                                           failed\n ", dev->name);
6070                                 return -ENOMEM ;
6071                         }
6072                         frag_list = skb_shinfo(*skb)->frag_list;
6073                         frag_list->next = NULL;
6074                         /*
6075                          * Buffer-2 receives L4 data payload
6076                          */
6077                         ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
6078                                 pci_map_single( sp->pdev, frag_list->data,
6079                                                 dev->mtu, PCI_DMA_FROMDEVICE);
6080                 }
6081         }
6082         return 0;
6083 }
6084 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6085                                 int size)
6086 {
6087         struct net_device *dev = sp->dev;
6088         if (sp->rxd_mode == RXD_MODE_1) {
6089                 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6090         } else if (sp->rxd_mode == RXD_MODE_3B) {
6091                 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6092                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6093                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6094         } else {
6095                 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6096                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
6097                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
6098         }
6099 }
6100
6101 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6102 {
6103         int i, j, k, blk_cnt = 0, size;
6104         struct mac_info * mac_control = &sp->mac_control;
6105         struct config_param *config = &sp->config;
6106         struct net_device *dev = sp->dev;
6107         struct RxD_t *rxdp = NULL;
6108         struct sk_buff *skb = NULL;
6109         struct buffAdd *ba = NULL;
6110         u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6111
6112         /* Calculate the size based on ring mode */
6113         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6114                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6115         if (sp->rxd_mode == RXD_MODE_1)
6116                 size += NET_IP_ALIGN;
6117         else if (sp->rxd_mode == RXD_MODE_3B)
6118                 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6119         else
6120                 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
6121
6122         for (i = 0; i < config->rx_ring_num; i++) {
6123                 blk_cnt = config->rx_cfg[i].num_rxd /
6124                         (rxd_count[sp->rxd_mode] +1);
6125
6126                 for (j = 0; j < blk_cnt; j++) {
6127                         for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6128                                 rxdp = mac_control->rings[i].
6129                                         rx_blocks[j].rxds[k].virt_addr;
6130                                 if(sp->rxd_mode >= RXD_MODE_3A)
6131                                         ba = &mac_control->rings[i].ba[j][k];
6132                                 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6133                                                        &skb,(u64 *)&temp0_64,
6134                                                        (u64 *)&temp1_64,
6135                                                        (u64 *)&temp2_64,
6136                                                         size) == ENOMEM) {
6137                                         return 0;
6138                                 }
6139
6140                                 set_rxd_buffer_size(sp, rxdp, size);
6141                                 wmb();
6142                                 /* flip the Ownership bit to Hardware */
6143                                 rxdp->Control_1 |= RXD_OWN_XENA;
6144                         }
6145                 }
6146         }
6147         return 0;
6148
6149 }
6150
6151 static int s2io_add_isr(struct s2io_nic * sp)
6152 {
6153         int ret = 0;
6154         struct net_device *dev = sp->dev;
6155         int err = 0;
6156
6157         if (sp->intr_type == MSI)
6158                 ret = s2io_enable_msi(sp);
6159         else if (sp->intr_type == MSI_X)
6160                 ret = s2io_enable_msi_x(sp);
6161         if (ret) {
6162                 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6163                 sp->intr_type = INTA;
6164         }
6165
6166         /* Store the values of the MSIX table in the struct s2io_nic structure */
6167         store_xmsi_data(sp);
6168
6169         /* After proper initialization of H/W, register ISR */
6170         if (sp->intr_type == MSI) {
6171                 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
6172                         IRQF_SHARED, sp->name, dev);
6173                 if (err) {
6174                         pci_disable_msi(sp->pdev);
6175                         DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n",
6176                                   dev->name);
6177                         return -1;
6178                 }
6179         }
6180         if (sp->intr_type == MSI_X) {
6181                 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6182
6183                 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6184                         if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6185                                 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6186                                         dev->name, i);
6187                                 err = request_irq(sp->entries[i].vector,
6188                                           s2io_msix_fifo_handle, 0, sp->desc[i],
6189                                                   sp->s2io_entries[i].arg);
6190                                 /* If either data or addr is zero print it */
6191                                 if(!(sp->msix_info[i].addr &&
6192                                         sp->msix_info[i].data)) {
6193                                         DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6194                                                 "Data:0x%lx\n",sp->desc[i],
6195                                                 (unsigned long long)
6196                                                 sp->msix_info[i].addr,
6197                                                 (unsigned long)
6198                                                 ntohl(sp->msix_info[i].data));
6199                                 } else {
6200                                         msix_tx_cnt++;
6201                                 }
6202                         } else {
6203                                 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6204                                         dev->name, i);
6205                                 err = request_irq(sp->entries[i].vector,
6206                                           s2io_msix_ring_handle, 0, sp->desc[i],
6207                                                   sp->s2io_entries[i].arg);
6208                                 /* If either data or addr is zero print it */
6209                                 if(!(sp->msix_info[i].addr &&
6210                                         sp->msix_info[i].data)) {
6211                                         DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6212                                                 "Data:0x%lx\n",sp->desc[i],
6213                                                 (unsigned long long)
6214                                                 sp->msix_info[i].addr,
6215                                                 (unsigned long)
6216                                                 ntohl(sp->msix_info[i].data));
6217                                 } else {
6218                                         msix_rx_cnt++;
6219                                 }
6220                         }
6221                         if (err) {
6222                                 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6223                                           "failed\n", dev->name, i);
6224                                 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6225                                 return -1;
6226                         }
6227                         sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6228                 }
6229                 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6230                 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
6231         }
6232         if (sp->intr_type == INTA) {
6233                 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6234                                 sp->name, dev);
6235                 if (err) {
6236                         DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6237                                   dev->name);
6238                         return -1;
6239                 }
6240         }
6241         return 0;
6242 }
6243 static void s2io_rem_isr(struct s2io_nic * sp)
6244 {
6245         int cnt = 0;
6246         struct net_device *dev = sp->dev;
6247
6248         if (sp->intr_type == MSI_X) {
6249                 int i;
6250                 u16 msi_control;
6251
6252                 for (i=1; (sp->s2io_entries[i].in_use ==
6253                         MSIX_REGISTERED_SUCCESS); i++) {
6254                         int vector = sp->entries[i].vector;
6255                         void *arg = sp->s2io_entries[i].arg;
6256
6257                         free_irq(vector, arg);
6258                 }
6259                 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6260                 msi_control &= 0xFFFE; /* Disable MSI */
6261                 pci_write_config_word(sp->pdev, 0x42, msi_control);
6262
6263                 pci_disable_msix(sp->pdev);
6264         } else {
6265                 free_irq(sp->pdev->irq, dev);
6266                 if (sp->intr_type == MSI) {
6267                         u16 val;
6268
6269                         pci_disable_msi(sp->pdev);
6270                         pci_read_config_word(sp->pdev, 0x4c, &val);
6271                         val ^= 0x1;
6272                         pci_write_config_word(sp->pdev, 0x4c, val);
6273                 }
6274         }
6275         /* Waiting till all Interrupt handlers are complete */
6276         cnt = 0;
6277         do {
6278                 msleep(10);
6279                 if (!atomic_read(&sp->isr_cnt))
6280                         break;
6281                 cnt++;
6282         } while(cnt < 5);
6283 }
6284
6285 static void s2io_card_down(struct s2io_nic * sp)
6286 {
6287         int cnt = 0;
6288         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6289         unsigned long flags;
6290         register u64 val64 = 0;
6291
6292         del_timer_sync(&sp->alarm_timer);
6293         /* If s2io_set_link task is executing, wait till it completes. */
6294         while (test_and_set_bit(0, &(sp->link_state))) {
6295                 msleep(50);
6296         }
6297         atomic_set(&sp->card_state, CARD_DOWN);
6298
6299         /* disable Tx and Rx traffic on the NIC */
6300         stop_nic(sp);
6301
6302         s2io_rem_isr(sp);
6303
6304         /* Kill tasklet. */
6305         tasklet_kill(&sp->task);
6306
6307         /* Check if the device is Quiescent and then Reset the NIC */
6308         do {
6309                 /* As per the HW requirement we need to replenish the
6310                  * receive buffer to avoid the ring bump. Since there is
6311                  * no intention of processing the Rx frame at this pointwe are
6312                  * just settting the ownership bit of rxd in Each Rx
6313                  * ring to HW and set the appropriate buffer size
6314                  * based on the ring mode
6315                  */
6316                 rxd_owner_bit_reset(sp);
6317
6318                 val64 = readq(&bar0->adapter_status);
6319                 if (verify_xena_quiescence(sp)) {
6320                         if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6321                         break;
6322                 }
6323
6324                 msleep(50);
6325                 cnt++;
6326                 if (cnt == 10) {
6327                         DBG_PRINT(ERR_DBG,
6328                                   "s2io_close:Device not Quiescent ");
6329                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6330                                   (unsigned long long) val64);
6331                         break;
6332                 }
6333         } while (1);
6334         s2io_reset(sp);
6335
6336         spin_lock_irqsave(&sp->tx_lock, flags);
6337         /* Free all Tx buffers */
6338         free_tx_buffers(sp);
6339         spin_unlock_irqrestore(&sp->tx_lock, flags);
6340
6341         /* Free all Rx buffers */
6342         spin_lock_irqsave(&sp->rx_lock, flags);
6343         free_rx_buffers(sp);
6344         spin_unlock_irqrestore(&sp->rx_lock, flags);
6345
6346         clear_bit(0, &(sp->link_state));
6347 }
6348
6349 static int s2io_card_up(struct s2io_nic * sp)
6350 {
6351         int i, ret = 0;
6352         struct mac_info *mac_control;
6353         struct config_param *config;
6354         struct net_device *dev = (struct net_device *) sp->dev;
6355         u16 interruptible;
6356
6357         /* Initialize the H/W I/O registers */
6358         if (init_nic(sp) != 0) {
6359                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6360                           dev->name);
6361                 s2io_reset(sp);
6362                 return -ENODEV;
6363         }
6364
6365         /*
6366          * Initializing the Rx buffers. For now we are considering only 1
6367          * Rx ring and initializing buffers into 30 Rx blocks
6368          */
6369         mac_control = &sp->mac_control;
6370         config = &sp->config;
6371
6372         for (i = 0; i < config->rx_ring_num; i++) {
6373                 if ((ret = fill_rx_buffers(sp, i))) {
6374                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6375                                   dev->name);
6376                         s2io_reset(sp);
6377                         free_rx_buffers(sp);
6378                         return -ENOMEM;
6379                 }
6380                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6381                           atomic_read(&sp->rx_bufs_left[i]));
6382         }
6383         /* Maintain the state prior to the open */
6384         if (sp->promisc_flg)
6385                 sp->promisc_flg = 0;
6386         if (sp->m_cast_flg) {
6387                 sp->m_cast_flg = 0;
6388                 sp->all_multi_pos= 0;
6389         }
6390
6391         /* Setting its receive mode */
6392         s2io_set_multicast(dev);
6393
6394         if (sp->lro) {
6395                 /* Initialize max aggregatable pkts per session based on MTU */
6396                 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6397                 /* Check if we can use(if specified) user provided value */
6398                 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6399                         sp->lro_max_aggr_per_sess = lro_max_pkts;
6400         }
6401
6402         /* Enable Rx Traffic and interrupts on the NIC */
6403         if (start_nic(sp)) {
6404                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6405                 s2io_reset(sp);
6406                 free_rx_buffers(sp);
6407                 return -ENODEV;
6408         }
6409
6410         /* Add interrupt service routine */
6411         if (s2io_add_isr(sp) != 0) {
6412                 if (sp->intr_type == MSI_X)
6413                         s2io_rem_isr(sp);
6414                 s2io_reset(sp);
6415                 free_rx_buffers(sp);
6416                 return -ENODEV;
6417         }
6418
6419         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6420
6421         /* Enable tasklet for the device */
6422         tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6423
6424         /*  Enable select interrupts */
6425         if (sp->intr_type != INTA)
6426                 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6427         else {
6428                 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6429                 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
6430                 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
6431                 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6432         }
6433
6434
6435         atomic_set(&sp->card_state, CARD_UP);
6436         return 0;
6437 }
6438
6439 /**
6440  * s2io_restart_nic - Resets the NIC.
6441  * @data : long pointer to the device private structure
6442  * Description:
6443  * This function is scheduled to be run by the s2io_tx_watchdog
6444  * function after 0.5 secs to reset the NIC. The idea is to reduce
6445  * the run time of the watch dog routine which is run holding a
6446  * spin lock.
6447  */
6448
6449 static void s2io_restart_nic(struct work_struct *work)
6450 {
6451         struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6452         struct net_device *dev = sp->dev;
6453
6454         rtnl_lock();
6455
6456         if (!netif_running(dev))
6457                 goto out_unlock;
6458
6459         s2io_card_down(sp);
6460         if (s2io_card_up(sp)) {
6461                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6462                           dev->name);
6463         }
6464         netif_wake_queue(dev);
6465         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6466                   dev->name);
6467 out_unlock:
6468         rtnl_unlock();
6469 }
6470
6471 /**
6472  *  s2io_tx_watchdog - Watchdog for transmit side.
6473  *  @dev : Pointer to net device structure
6474  *  Description:
6475  *  This function is triggered if the Tx Queue is stopped
6476  *  for a pre-defined amount of time when the Interface is still up.
6477  *  If the Interface is jammed in such a situation, the hardware is
6478  *  reset (by s2io_close) and restarted again (by s2io_open) to
6479  *  overcome any problem that might have been caused in the hardware.
6480  *  Return value:
6481  *  void
6482  */
6483
6484 static void s2io_tx_watchdog(struct net_device *dev)
6485 {
6486         struct s2io_nic *sp = dev->priv;
6487
6488         if (netif_carrier_ok(dev)) {
6489                 schedule_work(&sp->rst_timer_task);
6490                 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
6491         }
6492 }
6493
6494 /**
6495  *   rx_osm_handler - To perform some OS related operations on SKB.
6496  *   @sp: private member of the device structure,pointer to s2io_nic structure.
6497  *   @skb : the socket buffer pointer.
6498  *   @len : length of the packet
6499  *   @cksum : FCS checksum of the frame.
6500  *   @ring_no : the ring from which this RxD was extracted.
6501  *   Description:
6502  *   This function is called by the Rx interrupt serivce routine to perform
6503  *   some OS related operations on the SKB before passing it to the upper
6504  *   layers. It mainly checks if the checksum is OK, if so adds it to the
6505  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
6506  *   to the upper layer. If the checksum is wrong, it increments the Rx
6507  *   packet error count, frees the SKB and returns error.
6508  *   Return value:
6509  *   SUCCESS on success and -1 on failure.
6510  */
6511 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6512 {
6513         struct s2io_nic *sp = ring_data->nic;
6514         struct net_device *dev = (struct net_device *) sp->dev;
6515         struct sk_buff *skb = (struct sk_buff *)
6516                 ((unsigned long) rxdp->Host_Control);
6517         int ring_no = ring_data->ring_no;
6518         u16 l3_csum, l4_csum;
6519         unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
6520         struct lro *lro;
6521
6522         skb->dev = dev;
6523
6524         if (err) {
6525                 /* Check for parity error */
6526                 if (err & 0x1) {
6527                         sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
6528                 }
6529
6530                 /*
6531                 * Drop the packet if bad transfer code. Exception being
6532                 * 0x5, which could be due to unsupported IPv6 extension header.
6533                 * In this case, we let stack handle the packet.
6534                 * Note that in this case, since checksum will be incorrect,
6535                 * stack will validate the same.
6536                 */
6537                 if (err && ((err >> 48) != 0x5)) {
6538                         DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
6539                                 dev->name, err);
6540                         sp->stats.rx_crc_errors++;
6541                         dev_kfree_skb(skb);
6542                         atomic_dec(&sp->rx_bufs_left[ring_no]);
6543                         rxdp->Host_Control = 0;
6544                         return 0;
6545                 }
6546         }
6547
6548         /* Updating statistics */
6549         rxdp->Host_Control = 0;
6550         sp->rx_pkt_count++;
6551         sp->stats.rx_packets++;
6552         if (sp->rxd_mode == RXD_MODE_1) {
6553                 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
6554
6555                 sp->stats.rx_bytes += len;
6556                 skb_put(skb, len);
6557
6558         } else if (sp->rxd_mode >= RXD_MODE_3A) {
6559                 int get_block = ring_data->rx_curr_get_info.block_index;
6560                 int get_off = ring_data->rx_curr_get_info.offset;
6561                 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
6562                 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6563                 unsigned char *buff = skb_push(skb, buf0_len);
6564
6565                 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
6566                 sp->stats.rx_bytes += buf0_len + buf2_len;
6567                 memcpy(buff, ba->ba_0, buf0_len);
6568
6569                 if (sp->rxd_mode == RXD_MODE_3A) {
6570                         int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
6571
6572                         skb_put(skb, buf1_len);
6573                         skb->len += buf2_len;
6574                         skb->data_len += buf2_len;
6575                         skb_put(skb_shinfo(skb)->frag_list, buf2_len);
6576                         sp->stats.rx_bytes += buf1_len;
6577
6578                 } else
6579                         skb_put(skb, buf2_len);
6580         }
6581
6582         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
6583             (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
6584             (sp->rx_csum)) {
6585                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
6586                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
6587                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
6588                         /*
6589                          * NIC verifies if the Checksum of the received
6590                          * frame is Ok or not and accordingly returns
6591                          * a flag in the RxD.
6592                          */
6593                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6594                         if (sp->lro) {
6595                                 u32 tcp_len;
6596                                 u8 *tcp;
6597                                 int ret = 0;
6598
6599                                 ret = s2io_club_tcp_session(skb->data, &tcp,
6600                                                 &tcp_len, &lro, rxdp, sp);
6601                                 switch (ret) {
6602                                         case 3: /* Begin anew */
6603                                                 lro->parent = skb;
6604                                                 goto aggregate;
6605                                         case 1: /* Aggregate */
6606                                         {
6607                                                 lro_append_pkt(sp, lro,
6608                                                         skb, tcp_len);
6609                                                 goto aggregate;
6610                                         }
6611                                         case 4: /* Flush session */
6612                                         {
6613                                                 lro_append_pkt(sp, lro,
6614                                                         skb, tcp_len);
6615                                                 queue_rx_frame(lro->parent);
6616                                                 clear_lro_session(lro);
6617                                                 sp->mac_control.stats_info->
6618                                                     sw_stat.flush_max_pkts++;
6619                                                 goto aggregate;
6620                                         }
6621                                         case 2: /* Flush both */
6622                                                 lro->parent->data_len =
6623                                                         lro->frags_len;
6624                                                 sp->mac_control.stats_info->
6625                                                      sw_stat.sending_both++;
6626                                                 queue_rx_frame(lro->parent);
6627                                                 clear_lro_session(lro);
6628                                                 goto send_up;
6629                                         case 0: /* sessions exceeded */
6630                                         case -1: /* non-TCP or not
6631                                                   * L2 aggregatable
6632                                                   */
6633                                         case 5: /*
6634                                                  * First pkt in session not
6635                                                  * L3/L4 aggregatable
6636                                                  */
6637                                                 break;
6638                                         default:
6639                                                 DBG_PRINT(ERR_DBG,
6640                                                         "%s: Samadhana!!\n",
6641                                                          __FUNCTION__);
6642                                                 BUG();
6643                                 }
6644                         }
6645                 } else {
6646                         /*
6647                          * Packet with erroneous checksum, let the
6648                          * upper layers deal with it.
6649                          */
6650                         skb->ip_summed = CHECKSUM_NONE;
6651                 }
6652         } else {
6653                 skb->ip_summed = CHECKSUM_NONE;
6654         }
6655
6656         if (!sp->lro) {
6657                 skb->protocol = eth_type_trans(skb, dev);
6658                 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
6659                         vlan_strip_flag)) {
6660                         /* Queueing the vlan frame to the upper layer */
6661                         if (napi)
6662                                 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6663                                         RXD_GET_VLAN_TAG(rxdp->Control_2));
6664                         else
6665                                 vlan_hwaccel_rx(skb, sp->vlgrp,
6666                                         RXD_GET_VLAN_TAG(rxdp->Control_2));
6667                 } else {
6668                         if (napi)
6669                                 netif_receive_skb(skb);
6670                         else
6671                                 netif_rx(skb);
6672                 }
6673         } else {
6674 send_up:
6675                 queue_rx_frame(skb);
6676         }
6677         dev->last_rx = jiffies;
6678 aggregate:
6679         atomic_dec(&sp->rx_bufs_left[ring_no]);
6680         return SUCCESS;
6681 }
6682
6683 /**
6684  *  s2io_link - stops/starts the Tx queue.
6685  *  @sp : private member of the device structure, which is a pointer to the
6686  *  s2io_nic structure.
6687  *  @link : inidicates whether link is UP/DOWN.
6688  *  Description:
6689  *  This function stops/starts the Tx queue depending on whether the link
6690  *  status of the NIC is is down or up. This is called by the Alarm
6691  *  interrupt handler whenever a link change interrupt comes up.
6692  *  Return value:
6693  *  void.
6694  */
6695
6696 static void s2io_link(struct s2io_nic * sp, int link)
6697 {
6698         struct net_device *dev = (struct net_device *) sp->dev;
6699
6700         if (link != sp->last_link_state) {
6701                 if (link == LINK_DOWN) {
6702                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
6703                         netif_carrier_off(dev);
6704                 } else {
6705                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
6706                         netif_carrier_on(dev);
6707                 }
6708         }
6709         sp->last_link_state = link;
6710 }
6711
6712 /**
6713  *  get_xena_rev_id - to identify revision ID of xena.
6714  *  @pdev : PCI Dev structure
6715  *  Description:
6716  *  Function to identify the Revision ID of xena.
6717  *  Return value:
6718  *  returns the revision ID of the device.
6719  */
6720
6721 static int get_xena_rev_id(struct pci_dev *pdev)
6722 {
6723         u8 id = 0;
6724         int ret;
6725         ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
6726         return id;
6727 }
6728
6729 /**
6730  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
6731  *  @sp : private member of the device structure, which is a pointer to the
6732  *  s2io_nic structure.
6733  *  Description:
6734  *  This function initializes a few of the PCI and PCI-X configuration registers
6735  *  with recommended values.
6736  *  Return value:
6737  *  void
6738  */
6739
6740 static void s2io_init_pci(struct s2io_nic * sp)
6741 {
6742         u16 pci_cmd = 0, pcix_cmd = 0;
6743
6744         /* Enable Data Parity Error Recovery in PCI-X command register. */
6745         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6746                              &(pcix_cmd));
6747         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6748                               (pcix_cmd | 1));
6749         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6750                              &(pcix_cmd));
6751
6752         /* Set the PErr Response bit in PCI command register. */
6753         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6754         pci_write_config_word(sp->pdev, PCI_COMMAND,
6755                               (pci_cmd | PCI_COMMAND_PARITY));
6756         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6757 }
6758
6759 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6760 {
6761         if ( tx_fifo_num > 8) {
6762                 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
6763                          "supported\n");
6764                 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
6765                 tx_fifo_num = 8;
6766         }
6767         if ( rx_ring_num > 8) {
6768                 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
6769                          "supported\n");
6770                 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
6771                 rx_ring_num = 8;
6772         }
6773         if (*dev_intr_type != INTA)
6774                 napi = 0;
6775
6776 #ifndef CONFIG_PCI_MSI
6777         if (*dev_intr_type != INTA) {
6778                 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
6779                           "MSI/MSI-X. Defaulting to INTA\n");
6780                 *dev_intr_type = INTA;
6781         }
6782 #else
6783         if (*dev_intr_type > MSI_X) {
6784                 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
6785                           "Defaulting to INTA\n");
6786                 *dev_intr_type = INTA;
6787         }
6788 #endif
6789         if ((*dev_intr_type == MSI_X) &&
6790                         ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
6791                         (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
6792                 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
6793                                         "Defaulting to INTA\n");
6794                 *dev_intr_type = INTA;
6795         }
6796
6797         if (rx_ring_mode > 3) {
6798                 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6799                 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
6800                 rx_ring_mode = 3;
6801         }
6802         return SUCCESS;
6803 }
6804
6805 /**
6806  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
6807  * or Traffic class respectively.
6808  * @nic: device peivate variable
6809  * Description: The function configures the receive steering to
6810  * desired receive ring.
6811  * Return Value:  SUCCESS on success and
6812  * '-1' on failure (endian settings incorrect).
6813  */
6814 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
6815 {
6816         struct XENA_dev_config __iomem *bar0 = nic->bar0;
6817         register u64 val64 = 0;
6818
6819         if (ds_codepoint > 63)
6820                 return FAILURE;
6821
6822         val64 = RTS_DS_MEM_DATA(ring);
6823         writeq(val64, &bar0->rts_ds_mem_data);
6824
6825         val64 = RTS_DS_MEM_CTRL_WE |
6826                 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
6827                 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
6828
6829         writeq(val64, &bar0->rts_ds_mem_ctrl);
6830
6831         return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
6832                                 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
6833                                 S2IO_BIT_RESET);
6834 }
6835
6836 /**
6837  *  s2io_init_nic - Initialization of the adapter .
6838  *  @pdev : structure containing the PCI related information of the device.
6839  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
6840  *  Description:
6841  *  The function initializes an adapter identified by the pci_dec structure.
6842  *  All OS related initialization including memory and device structure and
6843  *  initlaization of the device private variable is done. Also the swapper
6844  *  control register is initialized to enable read and write into the I/O
6845  *  registers of the device.
6846  *  Return value:
6847  *  returns 0 on success and negative on failure.
6848  */
6849
6850 static int __devinit
6851 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6852 {
6853         struct s2io_nic *sp;
6854         struct net_device *dev;
6855         int i, j, ret;
6856         int dma_flag = FALSE;
6857         u32 mac_up, mac_down;
6858         u64 val64 = 0, tmp64 = 0;
6859         struct XENA_dev_config __iomem *bar0 = NULL;
6860         u16 subid;
6861         struct mac_info *mac_control;
6862         struct config_param *config;
6863         int mode;
6864         u8 dev_intr_type = intr_type;
6865
6866         if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
6867                 return ret;
6868
6869         if ((ret = pci_enable_device(pdev))) {
6870                 DBG_PRINT(ERR_DBG,
6871                           "s2io_init_nic: pci_enable_device failed\n");
6872                 return ret;
6873         }
6874
6875         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
6876                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
6877                 dma_flag = TRUE;
6878                 if (pci_set_consistent_dma_mask
6879                     (pdev, DMA_64BIT_MASK)) {
6880                         DBG_PRINT(ERR_DBG,
6881                                   "Unable to obtain 64bit DMA for \
6882                                         consistent allocations\n");
6883                         pci_disable_device(pdev);
6884                         return -ENOMEM;
6885                 }
6886         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
6887                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
6888         } else {
6889                 pci_disable_device(pdev);
6890                 return -ENOMEM;
6891         }
6892         if (dev_intr_type != MSI_X) {
6893                 if (pci_request_regions(pdev, s2io_driver_name)) {
6894                         DBG_PRINT(ERR_DBG, "Request Regions failed\n");
6895                         pci_disable_device(pdev);
6896                         return -ENODEV;
6897                 }
6898         }
6899         else {
6900                 if (!(request_mem_region(pci_resource_start(pdev, 0),
6901                          pci_resource_len(pdev, 0), s2io_driver_name))) {
6902                         DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
6903                         pci_disable_device(pdev);
6904                         return -ENODEV;
6905                 }
6906                 if (!(request_mem_region(pci_resource_start(pdev, 2),
6907                          pci_resource_len(pdev, 2), s2io_driver_name))) {
6908                         DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
6909                         release_mem_region(pci_resource_start(pdev, 0),
6910                                    pci_resource_len(pdev, 0));
6911                         pci_disable_device(pdev);
6912                         return -ENODEV;
6913                 }
6914         }
6915
6916         dev = alloc_etherdev(sizeof(struct s2io_nic));
6917         if (dev == NULL) {
6918                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
6919                 pci_disable_device(pdev);
6920                 pci_release_regions(pdev);
6921                 return -ENODEV;
6922         }
6923
6924         pci_set_master(pdev);
6925         pci_set_drvdata(pdev, dev);
6926         SET_MODULE_OWNER(dev);
6927         SET_NETDEV_DEV(dev, &pdev->dev);
6928
6929         /*  Private member variable initialized to s2io NIC structure */
6930         sp = dev->priv;
6931         memset(sp, 0, sizeof(struct s2io_nic));
6932         sp->dev = dev;
6933         sp->pdev = pdev;
6934         sp->high_dma_flag = dma_flag;
6935         sp->device_enabled_once = FALSE;
6936         if (rx_ring_mode == 1)
6937                 sp->rxd_mode = RXD_MODE_1;
6938         if (rx_ring_mode == 2)
6939                 sp->rxd_mode = RXD_MODE_3B;
6940         if (rx_ring_mode == 3)
6941                 sp->rxd_mode = RXD_MODE_3A;
6942
6943         sp->intr_type = dev_intr_type;
6944
6945         if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
6946                 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
6947                 sp->device_type = XFRAME_II_DEVICE;
6948         else
6949                 sp->device_type = XFRAME_I_DEVICE;
6950
6951         sp->lro = lro;
6952
6953         /* Initialize some PCI/PCI-X fields of the NIC. */
6954         s2io_init_pci(sp);
6955
6956         /*
6957          * Setting the device configuration parameters.
6958          * Most of these parameters can be specified by the user during
6959          * module insertion as they are module loadable parameters. If
6960          * these parameters are not not specified during load time, they
6961          * are initialized with default values.
6962          */
6963         mac_control = &sp->mac_control;
6964         config = &sp->config;
6965
6966         /* Tx side parameters. */
6967         config->tx_fifo_num = tx_fifo_num;
6968         for (i = 0; i < MAX_TX_FIFOS; i++) {
6969                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
6970                 config->tx_cfg[i].fifo_priority = i;
6971         }
6972
6973         /* mapping the QoS priority to the configured fifos */
6974         for (i = 0; i < MAX_TX_FIFOS; i++)
6975                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
6976
6977         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
6978         for (i = 0; i < config->tx_fifo_num; i++) {
6979                 config->tx_cfg[i].f_no_snoop =
6980                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
6981                 if (config->tx_cfg[i].fifo_len < 65) {
6982                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
6983                         break;
6984                 }
6985         }
6986         /* + 2 because one Txd for skb->data and one Txd for UFO */
6987         config->max_txds = MAX_SKB_FRAGS + 2;
6988
6989         /* Rx side parameters. */
6990         config->rx_ring_num = rx_ring_num;
6991         for (i = 0; i < MAX_RX_RINGS; i++) {
6992                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
6993                     (rxd_count[sp->rxd_mode] + 1);
6994                 config->rx_cfg[i].ring_priority = i;
6995         }
6996
6997         for (i = 0; i < rx_ring_num; i++) {
6998                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
6999                 config->rx_cfg[i].f_no_snoop =
7000                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7001         }
7002
7003         /*  Setting Mac Control parameters */
7004         mac_control->rmac_pause_time = rmac_pause_time;
7005         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7006         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7007
7008
7009         /* Initialize Ring buffer parameters. */
7010         for (i = 0; i < config->rx_ring_num; i++)
7011                 atomic_set(&sp->rx_bufs_left[i], 0);
7012
7013         /* Initialize the number of ISRs currently running */
7014         atomic_set(&sp->isr_cnt, 0);
7015
7016         /*  initialize the shared memory used by the NIC and the host */
7017         if (init_shared_mem(sp)) {
7018                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7019                           dev->name);
7020                 ret = -ENOMEM;
7021                 goto mem_alloc_failed;
7022         }
7023
7024         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7025                                      pci_resource_len(pdev, 0));
7026         if (!sp->bar0) {
7027                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7028                           dev->name);
7029                 ret = -ENOMEM;
7030                 goto bar0_remap_failed;
7031         }
7032
7033         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7034                                      pci_resource_len(pdev, 2));
7035         if (!sp->bar1) {
7036                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7037                           dev->name);
7038                 ret = -ENOMEM;
7039                 goto bar1_remap_failed;
7040         }
7041
7042         dev->irq = pdev->irq;
7043         dev->base_addr = (unsigned long) sp->bar0;
7044
7045         /* Initializing the BAR1 address as the start of the FIFO pointer. */
7046         for (j = 0; j < MAX_TX_FIFOS; j++) {
7047                 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7048                     (sp->bar1 + (j * 0x00020000));
7049         }
7050
7051         /*  Driver entry points */
7052         dev->open = &s2io_open;
7053         dev->stop = &s2io_close;
7054         dev->hard_start_xmit = &s2io_xmit;
7055         dev->get_stats = &s2io_get_stats;
7056         dev->set_multicast_list = &s2io_set_multicast;
7057         dev->do_ioctl = &s2io_ioctl;
7058         dev->change_mtu = &s2io_change_mtu;
7059         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7060         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7061         dev->vlan_rx_register = s2io_vlan_rx_register;
7062         dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
7063
7064         /*
7065          * will use eth_mac_addr() for  dev->set_mac_address
7066          * mac address will be set every time dev->open() is called
7067          */
7068         dev->poll = s2io_poll;
7069         dev->weight = 32;
7070
7071 #ifdef CONFIG_NET_POLL_CONTROLLER
7072         dev->poll_controller = s2io_netpoll;
7073 #endif
7074
7075         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7076         if (sp->high_dma_flag == TRUE)
7077                 dev->features |= NETIF_F_HIGHDMA;
7078         dev->features |= NETIF_F_TSO;
7079         dev->features |= NETIF_F_TSO6;
7080         if ((sp->device_type & XFRAME_II_DEVICE) && (ufo))  {
7081                 dev->features |= NETIF_F_UFO;
7082                 dev->features |= NETIF_F_HW_CSUM;
7083         }
7084
7085         dev->tx_timeout = &s2io_tx_watchdog;
7086         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7087         INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7088         INIT_WORK(&sp->set_link_task, s2io_set_link);
7089
7090         pci_save_state(sp->pdev);
7091
7092         /* Setting swapper control on the NIC, for proper reset operation */
7093         if (s2io_set_swapper(sp)) {
7094                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7095                           dev->name);
7096                 ret = -EAGAIN;
7097                 goto set_swap_failed;
7098         }
7099
7100         /* Verify if the Herc works on the slot its placed into */
7101         if (sp->device_type & XFRAME_II_DEVICE) {
7102                 mode = s2io_verify_pci_mode(sp);
7103                 if (mode < 0) {
7104                         DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7105                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7106                         ret = -EBADSLT;
7107                         goto set_swap_failed;
7108                 }
7109         }
7110
7111         /* Not needed for Herc */
7112         if (sp->device_type & XFRAME_I_DEVICE) {
7113                 /*
7114                  * Fix for all "FFs" MAC address problems observed on
7115                  * Alpha platforms
7116                  */
7117                 fix_mac_address(sp);
7118                 s2io_reset(sp);
7119         }
7120
7121         /*
7122          * MAC address initialization.
7123          * For now only one mac address will be read and used.
7124          */
7125         bar0 = sp->bar0;
7126         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7127             RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7128         writeq(val64, &bar0->rmac_addr_cmd_mem);
7129         wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7130                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7131         tmp64 = readq(&bar0->rmac_addr_data0_mem);
7132         mac_down = (u32) tmp64;
7133         mac_up = (u32) (tmp64 >> 32);
7134
7135         memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
7136
7137         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7138         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7139         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7140         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7141         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7142         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7143
7144         /*  Set the factory defined MAC address initially   */
7145         dev->addr_len = ETH_ALEN;
7146         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7147
7148         /* reset Nic and bring it to known state */
7149         s2io_reset(sp);
7150
7151         /*
7152          * Initialize the tasklet status and link state flags
7153          * and the card state parameter
7154          */
7155         atomic_set(&(sp->card_state), 0);
7156         sp->tasklet_status = 0;
7157         sp->link_state = 0;
7158
7159         /* Initialize spinlocks */
7160         spin_lock_init(&sp->tx_lock);
7161
7162         if (!napi)
7163                 spin_lock_init(&sp->put_lock);
7164         spin_lock_init(&sp->rx_lock);
7165
7166         /*
7167          * SXE-002: Configure link and activity LED to init state
7168          * on driver load.
7169          */
7170         subid = sp->pdev->subsystem_device;
7171         if ((subid & 0xFF) >= 0x07) {
7172                 val64 = readq(&bar0->gpio_control);
7173                 val64 |= 0x0000800000000000ULL;
7174                 writeq(val64, &bar0->gpio_control);
7175                 val64 = 0x0411040400000000ULL;
7176                 writeq(val64, (void __iomem *) bar0 + 0x2700);
7177                 val64 = readq(&bar0->gpio_control);
7178         }
7179
7180         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
7181
7182         if (register_netdev(dev)) {
7183                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7184                 ret = -ENODEV;
7185                 goto register_failed;
7186         }
7187         s2io_vpd_read(sp);
7188         DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n");
7189         DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7190                   sp->product_name, get_xena_rev_id(sp->pdev));
7191         DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7192                   s2io_driver_version);
7193         DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7194                           "%02x:%02x:%02x:%02x:%02x:%02x", dev->name,
7195                           sp->def_mac_addr[0].mac_addr[0],
7196                           sp->def_mac_addr[0].mac_addr[1],
7197                           sp->def_mac_addr[0].mac_addr[2],
7198                           sp->def_mac_addr[0].mac_addr[3],
7199                           sp->def_mac_addr[0].mac_addr[4],
7200                           sp->def_mac_addr[0].mac_addr[5]);
7201         DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7202         if (sp->device_type & XFRAME_II_DEVICE) {
7203                 mode = s2io_print_pci_mode(sp);
7204                 if (mode < 0) {
7205                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7206                         ret = -EBADSLT;
7207                         unregister_netdev(dev);
7208                         goto set_swap_failed;
7209                 }
7210         }
7211         switch(sp->rxd_mode) {
7212                 case RXD_MODE_1:
7213                     DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7214                                                 dev->name);
7215                     break;
7216                 case RXD_MODE_3B:
7217                     DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7218                                                 dev->name);
7219                     break;
7220                 case RXD_MODE_3A:
7221                     DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n",
7222                                                 dev->name);
7223                     break;
7224         }
7225
7226         if (napi)
7227                 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7228         switch(sp->intr_type) {
7229                 case INTA:
7230                     DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7231                     break;
7232                 case MSI:
7233                     DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name);
7234                     break;
7235                 case MSI_X:
7236                     DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7237                     break;
7238         }
7239         if (sp->lro)
7240                 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7241                           dev->name);
7242         if (ufo)
7243                 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7244                                         " enabled\n", dev->name);
7245         /* Initialize device name */
7246         sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7247
7248         /* Initialize bimodal Interrupts */
7249         sp->config.bimodal = bimodal;
7250         if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7251                 sp->config.bimodal = 0;
7252                 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7253                         dev->name);
7254         }
7255
7256         /*
7257          * Make Link state as off at this point, when the Link change
7258          * interrupt comes the state will be automatically changed to
7259          * the right state.
7260          */
7261         netif_carrier_off(dev);
7262
7263         return 0;
7264
7265       register_failed:
7266       set_swap_failed:
7267         iounmap(sp->bar1);
7268       bar1_remap_failed:
7269         iounmap(sp->bar0);
7270       bar0_remap_failed:
7271       mem_alloc_failed:
7272         free_shared_mem(sp);
7273         pci_disable_device(pdev);
7274         if (dev_intr_type != MSI_X)
7275                 pci_release_regions(pdev);
7276         else {
7277                 release_mem_region(pci_resource_start(pdev, 0),
7278                         pci_resource_len(pdev, 0));
7279                 release_mem_region(pci_resource_start(pdev, 2),
7280                         pci_resource_len(pdev, 2));
7281         }
7282         pci_set_drvdata(pdev, NULL);
7283         free_netdev(dev);
7284
7285         return ret;
7286 }
7287
7288 /**
7289  * s2io_rem_nic - Free the PCI device
7290  * @pdev: structure containing the PCI related information of the device.
7291  * Description: This function is called by the Pci subsystem to release a
7292  * PCI device and free up all resource held up by the device. This could
7293  * be in response to a Hot plug event or when the driver is to be removed
7294  * from memory.
7295  */
7296
7297 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7298 {
7299         struct net_device *dev =
7300             (struct net_device *) pci_get_drvdata(pdev);
7301         struct s2io_nic *sp;
7302
7303         if (dev == NULL) {
7304                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7305                 return;
7306         }
7307
7308         flush_scheduled_work();
7309
7310         sp = dev->priv;
7311         unregister_netdev(dev);
7312
7313         free_shared_mem(sp);
7314         iounmap(sp->bar0);
7315         iounmap(sp->bar1);
7316         if (sp->intr_type != MSI_X)
7317                 pci_release_regions(pdev);
7318         else {
7319                 release_mem_region(pci_resource_start(pdev, 0),
7320                         pci_resource_len(pdev, 0));
7321                 release_mem_region(pci_resource_start(pdev, 2),
7322                         pci_resource_len(pdev, 2));
7323         }
7324         pci_set_drvdata(pdev, NULL);
7325         free_netdev(dev);
7326         pci_disable_device(pdev);
7327 }
7328
7329 /**
7330  * s2io_starter - Entry point for the driver
7331  * Description: This function is the entry point for the driver. It verifies
7332  * the module loadable parameters and initializes PCI configuration space.
7333  */
7334
7335 int __init s2io_starter(void)
7336 {
7337         return pci_register_driver(&s2io_driver);
7338 }
7339
7340 /**
7341  * s2io_closer - Cleanup routine for the driver
7342  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7343  */
7344
7345 static __exit void s2io_closer(void)
7346 {
7347         pci_unregister_driver(&s2io_driver);
7348         DBG_PRINT(INIT_DBG, "cleanup done\n");
7349 }
7350
7351 module_init(s2io_starter);
7352 module_exit(s2io_closer);
7353
7354 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7355                 struct tcphdr **tcp, struct RxD_t *rxdp)
7356 {
7357         int ip_off;
7358         u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7359
7360         if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7361                 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7362                           __FUNCTION__);
7363                 return -1;
7364         }
7365
7366         /* TODO:
7367          * By default the VLAN field in the MAC is stripped by the card, if this
7368          * feature is turned off in rx_pa_cfg register, then the ip_off field
7369          * has to be shifted by a further 2 bytes
7370          */
7371         switch (l2_type) {
7372                 case 0: /* DIX type */
7373                 case 4: /* DIX type with VLAN */
7374                         ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7375                         break;
7376                 /* LLC, SNAP etc are considered non-mergeable */
7377                 default:
7378                         return -1;
7379         }
7380
7381         *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7382         ip_len = (u8)((*ip)->ihl);
7383         ip_len <<= 2;
7384         *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7385
7386         return 0;
7387 }
7388
7389 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7390                                   struct tcphdr *tcp)
7391 {
7392         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7393         if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7394            (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7395                 return -1;
7396         return 0;
7397 }
7398
7399 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7400 {
7401         return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7402 }
7403
7404 static void initiate_new_session(struct lro *lro, u8 *l2h,
7405                      struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7406 {
7407         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7408         lro->l2h = l2h;
7409         lro->iph = ip;
7410         lro->tcph = tcp;
7411         lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7412         lro->tcp_ack = ntohl(tcp->ack_seq);
7413         lro->sg_num = 1;
7414         lro->total_len = ntohs(ip->tot_len);
7415         lro->frags_len = 0;
7416         /*
7417          * check if we saw TCP timestamp. Other consistency checks have
7418          * already been done.
7419          */
7420         if (tcp->doff == 8) {
7421                 u32 *ptr;
7422                 ptr = (u32 *)(tcp+1);
7423                 lro->saw_ts = 1;
7424                 lro->cur_tsval = *(ptr+1);
7425                 lro->cur_tsecr = *(ptr+2);
7426         }
7427         lro->in_use = 1;
7428 }
7429
7430 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7431 {
7432         struct iphdr *ip = lro->iph;
7433         struct tcphdr *tcp = lro->tcph;
7434         __sum16 nchk;
7435         struct stat_block *statinfo = sp->mac_control.stats_info;
7436         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7437
7438         /* Update L3 header */
7439         ip->tot_len = htons(lro->total_len);
7440         ip->check = 0;
7441         nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7442         ip->check = nchk;
7443
7444         /* Update L4 header */
7445         tcp->ack_seq = lro->tcp_ack;
7446         tcp->window = lro->window;
7447
7448         /* Update tsecr field if this session has timestamps enabled */
7449         if (lro->saw_ts) {
7450                 u32 *ptr = (u32 *)(tcp + 1);
7451                 *(ptr+2) = lro->cur_tsecr;
7452         }
7453
7454         /* Update counters required for calculation of
7455          * average no. of packets aggregated.
7456          */
7457         statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7458         statinfo->sw_stat.num_aggregations++;
7459 }
7460
7461 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7462                 struct tcphdr *tcp, u32 l4_pyld)
7463 {
7464         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7465         lro->total_len += l4_pyld;
7466         lro->frags_len += l4_pyld;
7467         lro->tcp_next_seq += l4_pyld;
7468         lro->sg_num++;
7469
7470         /* Update ack seq no. and window ad(from this pkt) in LRO object */
7471         lro->tcp_ack = tcp->ack_seq;
7472         lro->window = tcp->window;
7473
7474         if (lro->saw_ts) {
7475                 u32 *ptr;
7476                 /* Update tsecr and tsval from this packet */
7477                 ptr = (u32 *) (tcp + 1);
7478                 lro->cur_tsval = *(ptr + 1);
7479                 lro->cur_tsecr = *(ptr + 2);
7480         }
7481 }
7482
7483 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7484                                     struct tcphdr *tcp, u32 tcp_pyld_len)
7485 {
7486         u8 *ptr;
7487
7488         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7489
7490         if (!tcp_pyld_len) {
7491                 /* Runt frame or a pure ack */
7492                 return -1;
7493         }
7494
7495         if (ip->ihl != 5) /* IP has options */
7496                 return -1;
7497
7498         /* If we see CE codepoint in IP header, packet is not mergeable */
7499         if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7500                 return -1;
7501
7502         /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7503         if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7504                                     tcp->ece || tcp->cwr || !tcp->ack) {
7505                 /*
7506                  * Currently recognize only the ack control word and
7507                  * any other control field being set would result in
7508                  * flushing the LRO session
7509                  */
7510                 return -1;
7511         }
7512
7513         /*
7514          * Allow only one TCP timestamp option. Don't aggregate if
7515          * any other options are detected.
7516          */
7517         if (tcp->doff != 5 && tcp->doff != 8)
7518                 return -1;
7519
7520         if (tcp->doff == 8) {
7521                 ptr = (u8 *)(tcp + 1);
7522                 while (*ptr == TCPOPT_NOP)
7523                         ptr++;
7524                 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7525                         return -1;
7526
7527                 /* Ensure timestamp value increases monotonically */
7528                 if (l_lro)
7529                         if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7530                                 return -1;
7531
7532                 /* timestamp echo reply should be non-zero */
7533                 if (*((u32 *)(ptr+6)) == 0)
7534                         return -1;
7535         }
7536
7537         return 0;
7538 }
7539
7540 static int
7541 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
7542                       struct RxD_t *rxdp, struct s2io_nic *sp)
7543 {
7544         struct iphdr *ip;
7545         struct tcphdr *tcph;
7546         int ret = 0, i;
7547
7548         if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
7549                                          rxdp))) {
7550                 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
7551                           ip->saddr, ip->daddr);
7552         } else {
7553                 return ret;
7554         }
7555
7556         tcph = (struct tcphdr *)*tcp;
7557         *tcp_len = get_l4_pyld_length(ip, tcph);
7558         for (i=0; i<MAX_LRO_SESSIONS; i++) {
7559                 struct lro *l_lro = &sp->lro0_n[i];
7560                 if (l_lro->in_use) {
7561                         if (check_for_socket_match(l_lro, ip, tcph))
7562                                 continue;
7563                         /* Sock pair matched */
7564                         *lro = l_lro;
7565
7566                         if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
7567                                 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
7568                                           "0x%x, actual 0x%x\n", __FUNCTION__,
7569                                           (*lro)->tcp_next_seq,
7570                                           ntohl(tcph->seq));
7571
7572                                 sp->mac_control.stats_info->
7573                                    sw_stat.outof_sequence_pkts++;
7574                                 ret = 2;
7575                                 break;
7576                         }
7577
7578                         if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
7579                                 ret = 1; /* Aggregate */
7580                         else
7581                                 ret = 2; /* Flush both */
7582                         break;
7583                 }
7584         }
7585
7586         if (ret == 0) {
7587                 /* Before searching for available LRO objects,
7588                  * check if the pkt is L3/L4 aggregatable. If not
7589                  * don't create new LRO session. Just send this
7590                  * packet up.
7591                  */
7592                 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
7593                         return 5;
7594                 }
7595
7596                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7597                         struct lro *l_lro = &sp->lro0_n[i];
7598                         if (!(l_lro->in_use)) {
7599                                 *lro = l_lro;
7600                                 ret = 3; /* Begin anew */
7601                                 break;
7602                         }
7603                 }
7604         }
7605
7606         if (ret == 0) { /* sessions exceeded */
7607                 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
7608                           __FUNCTION__);
7609                 *lro = NULL;
7610                 return ret;
7611         }
7612
7613         switch (ret) {
7614                 case 3:
7615                         initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
7616                         break;
7617                 case 2:
7618                         update_L3L4_header(sp, *lro);
7619                         break;
7620                 case 1:
7621                         aggregate_new_rx(*lro, ip, tcph, *tcp_len);
7622                         if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7623                                 update_L3L4_header(sp, *lro);
7624                                 ret = 4; /* Flush the LRO */
7625                         }
7626                         break;
7627                 default:
7628                         DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
7629                                 __FUNCTION__);
7630                         break;
7631         }
7632
7633         return ret;
7634 }
7635
7636 static void clear_lro_session(struct lro *lro)
7637 {
7638         static u16 lro_struct_size = sizeof(struct lro);
7639
7640         memset(lro, 0, lro_struct_size);
7641 }
7642
7643 static void queue_rx_frame(struct sk_buff *skb)
7644 {
7645         struct net_device *dev = skb->dev;
7646
7647         skb->protocol = eth_type_trans(skb, dev);
7648         if (napi)
7649                 netif_receive_skb(skb);
7650         else
7651                 netif_rx(skb);
7652 }
7653
7654 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
7655                            struct sk_buff *skb,
7656                            u32 tcp_len)
7657 {
7658         struct sk_buff *first = lro->parent;
7659
7660         first->len += tcp_len;
7661         first->data_len = lro->frags_len;
7662         skb_pull(skb, (skb->len - tcp_len));
7663         if (skb_shinfo(first)->frag_list)
7664                 lro->last_frag->next = skb;
7665         else
7666                 skb_shinfo(first)->frag_list = skb;
7667         first->truesize += skb->truesize;
7668         lro->last_frag = skb;
7669         sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
7670         return;
7671 }