]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/s2io.c
netdev: use ARRAY_SIZE() instead of sizeof(array) / ETH_GSTRING_LEN
[linux-2.6-omap-h63xx.git] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2007 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  * napi: This parameter used to enable/disable NAPI (polling Rx)
46  *     Possible values '1' for enable and '0' for disable. Default is '1'
47  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48  *      Possible values '1' for enable and '0' for disable. Default is '0'
49  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50  *                 Possible values '1' for enable , '0' for disable.
51  *                 Default is '2' - which means disable in promisc mode
52  *                 and enable in non-promiscuous mode.
53  ************************************************************************/
54
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <net/tcp.h>
76
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
79 #include <asm/io.h>
80 #include <asm/div64.h>
81 #include <asm/irq.h>
82
83 /* local include */
84 #include "s2io.h"
85 #include "s2io-regs.h"
86
87 #define DRV_VERSION "2.0.26.17"
88
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
92
93 static int rxd_size[2] = {32,48};
94 static int rxd_count[2] = {127,85};
95
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
97 {
98         int ret;
99
100         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
102
103         return ret;
104 }
105
106 /*
107  * Cards with following subsystem_id have a link state indication
108  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109  * macro below identifies these cards given the subsystem_id.
110  */
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112         (dev_type == XFRAME_I_DEVICE) ?                 \
113                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
115
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
119 #define PANIC   1
120 #define LOW     2
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
122 {
123         struct mac_info *mac_control;
124
125         mac_control = &sp->mac_control;
126         if (rxb_size <= rxd_count[sp->rxd_mode])
127                 return PANIC;
128         else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
129                 return  LOW;
130         return 0;
131 }
132
133 static inline int is_s2io_card_up(const struct s2io_nic * sp)
134 {
135         return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
136 }
137
138 /* Ethtool related variables and Macros. */
139 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
140         "Register test\t(offline)",
141         "Eeprom test\t(offline)",
142         "Link test\t(online)",
143         "RLDRAM test\t(offline)",
144         "BIST Test\t(offline)"
145 };
146
147 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
148         {"tmac_frms"},
149         {"tmac_data_octets"},
150         {"tmac_drop_frms"},
151         {"tmac_mcst_frms"},
152         {"tmac_bcst_frms"},
153         {"tmac_pause_ctrl_frms"},
154         {"tmac_ttl_octets"},
155         {"tmac_ucst_frms"},
156         {"tmac_nucst_frms"},
157         {"tmac_any_err_frms"},
158         {"tmac_ttl_less_fb_octets"},
159         {"tmac_vld_ip_octets"},
160         {"tmac_vld_ip"},
161         {"tmac_drop_ip"},
162         {"tmac_icmp"},
163         {"tmac_rst_tcp"},
164         {"tmac_tcp"},
165         {"tmac_udp"},
166         {"rmac_vld_frms"},
167         {"rmac_data_octets"},
168         {"rmac_fcs_err_frms"},
169         {"rmac_drop_frms"},
170         {"rmac_vld_mcst_frms"},
171         {"rmac_vld_bcst_frms"},
172         {"rmac_in_rng_len_err_frms"},
173         {"rmac_out_rng_len_err_frms"},
174         {"rmac_long_frms"},
175         {"rmac_pause_ctrl_frms"},
176         {"rmac_unsup_ctrl_frms"},
177         {"rmac_ttl_octets"},
178         {"rmac_accepted_ucst_frms"},
179         {"rmac_accepted_nucst_frms"},
180         {"rmac_discarded_frms"},
181         {"rmac_drop_events"},
182         {"rmac_ttl_less_fb_octets"},
183         {"rmac_ttl_frms"},
184         {"rmac_usized_frms"},
185         {"rmac_osized_frms"},
186         {"rmac_frag_frms"},
187         {"rmac_jabber_frms"},
188         {"rmac_ttl_64_frms"},
189         {"rmac_ttl_65_127_frms"},
190         {"rmac_ttl_128_255_frms"},
191         {"rmac_ttl_256_511_frms"},
192         {"rmac_ttl_512_1023_frms"},
193         {"rmac_ttl_1024_1518_frms"},
194         {"rmac_ip"},
195         {"rmac_ip_octets"},
196         {"rmac_hdr_err_ip"},
197         {"rmac_drop_ip"},
198         {"rmac_icmp"},
199         {"rmac_tcp"},
200         {"rmac_udp"},
201         {"rmac_err_drp_udp"},
202         {"rmac_xgmii_err_sym"},
203         {"rmac_frms_q0"},
204         {"rmac_frms_q1"},
205         {"rmac_frms_q2"},
206         {"rmac_frms_q3"},
207         {"rmac_frms_q4"},
208         {"rmac_frms_q5"},
209         {"rmac_frms_q6"},
210         {"rmac_frms_q7"},
211         {"rmac_full_q0"},
212         {"rmac_full_q1"},
213         {"rmac_full_q2"},
214         {"rmac_full_q3"},
215         {"rmac_full_q4"},
216         {"rmac_full_q5"},
217         {"rmac_full_q6"},
218         {"rmac_full_q7"},
219         {"rmac_pause_cnt"},
220         {"rmac_xgmii_data_err_cnt"},
221         {"rmac_xgmii_ctrl_err_cnt"},
222         {"rmac_accepted_ip"},
223         {"rmac_err_tcp"},
224         {"rd_req_cnt"},
225         {"new_rd_req_cnt"},
226         {"new_rd_req_rtry_cnt"},
227         {"rd_rtry_cnt"},
228         {"wr_rtry_rd_ack_cnt"},
229         {"wr_req_cnt"},
230         {"new_wr_req_cnt"},
231         {"new_wr_req_rtry_cnt"},
232         {"wr_rtry_cnt"},
233         {"wr_disc_cnt"},
234         {"rd_rtry_wr_ack_cnt"},
235         {"txp_wr_cnt"},
236         {"txd_rd_cnt"},
237         {"txd_wr_cnt"},
238         {"rxd_rd_cnt"},
239         {"rxd_wr_cnt"},
240         {"txf_rd_cnt"},
241         {"rxf_wr_cnt"}
242 };
243
244 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
245         {"rmac_ttl_1519_4095_frms"},
246         {"rmac_ttl_4096_8191_frms"},
247         {"rmac_ttl_8192_max_frms"},
248         {"rmac_ttl_gt_max_frms"},
249         {"rmac_osized_alt_frms"},
250         {"rmac_jabber_alt_frms"},
251         {"rmac_gt_max_alt_frms"},
252         {"rmac_vlan_frms"},
253         {"rmac_len_discard"},
254         {"rmac_fcs_discard"},
255         {"rmac_pf_discard"},
256         {"rmac_da_discard"},
257         {"rmac_red_discard"},
258         {"rmac_rts_discard"},
259         {"rmac_ingm_full_discard"},
260         {"link_fault_cnt"}
261 };
262
263 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
264         {"\n DRIVER STATISTICS"},
265         {"single_bit_ecc_errs"},
266         {"double_bit_ecc_errs"},
267         {"parity_err_cnt"},
268         {"serious_err_cnt"},
269         {"soft_reset_cnt"},
270         {"fifo_full_cnt"},
271         {"ring_0_full_cnt"},
272         {"ring_1_full_cnt"},
273         {"ring_2_full_cnt"},
274         {"ring_3_full_cnt"},
275         {"ring_4_full_cnt"},
276         {"ring_5_full_cnt"},
277         {"ring_6_full_cnt"},
278         {"ring_7_full_cnt"},
279         {"alarm_transceiver_temp_high"},
280         {"alarm_transceiver_temp_low"},
281         {"alarm_laser_bias_current_high"},
282         {"alarm_laser_bias_current_low"},
283         {"alarm_laser_output_power_high"},
284         {"alarm_laser_output_power_low"},
285         {"warn_transceiver_temp_high"},
286         {"warn_transceiver_temp_low"},
287         {"warn_laser_bias_current_high"},
288         {"warn_laser_bias_current_low"},
289         {"warn_laser_output_power_high"},
290         {"warn_laser_output_power_low"},
291         {"lro_aggregated_pkts"},
292         {"lro_flush_both_count"},
293         {"lro_out_of_sequence_pkts"},
294         {"lro_flush_due_to_max_pkts"},
295         {"lro_avg_aggr_pkts"},
296         {"mem_alloc_fail_cnt"},
297         {"pci_map_fail_cnt"},
298         {"watchdog_timer_cnt"},
299         {"mem_allocated"},
300         {"mem_freed"},
301         {"link_up_cnt"},
302         {"link_down_cnt"},
303         {"link_up_time"},
304         {"link_down_time"},
305         {"tx_tcode_buf_abort_cnt"},
306         {"tx_tcode_desc_abort_cnt"},
307         {"tx_tcode_parity_err_cnt"},
308         {"tx_tcode_link_loss_cnt"},
309         {"tx_tcode_list_proc_err_cnt"},
310         {"rx_tcode_parity_err_cnt"},
311         {"rx_tcode_abort_cnt"},
312         {"rx_tcode_parity_abort_cnt"},
313         {"rx_tcode_rda_fail_cnt"},
314         {"rx_tcode_unkn_prot_cnt"},
315         {"rx_tcode_fcs_err_cnt"},
316         {"rx_tcode_buf_size_err_cnt"},
317         {"rx_tcode_rxd_corrupt_cnt"},
318         {"rx_tcode_unkn_err_cnt"},
319         {"tda_err_cnt"},
320         {"pfc_err_cnt"},
321         {"pcc_err_cnt"},
322         {"tti_err_cnt"},
323         {"tpa_err_cnt"},
324         {"sm_err_cnt"},
325         {"lso_err_cnt"},
326         {"mac_tmac_err_cnt"},
327         {"mac_rmac_err_cnt"},
328         {"xgxs_txgxs_err_cnt"},
329         {"xgxs_rxgxs_err_cnt"},
330         {"rc_err_cnt"},
331         {"prc_pcix_err_cnt"},
332         {"rpa_err_cnt"},
333         {"rda_err_cnt"},
334         {"rti_err_cnt"},
335         {"mc_err_cnt"}
336 };
337
338 #define S2IO_XENA_STAT_LEN      ARRAY_SIZE(ethtool_xena_stats_keys)
339 #define S2IO_ENHANCED_STAT_LEN  ARRAY_SIZE(ethtool_enhanced_stats_keys)
340 #define S2IO_DRIVER_STAT_LEN    ARRAY_SIZE(ethtool_driver_stats_keys)
341
342 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
343 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
344
345 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
346 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
347
348 #define S2IO_TEST_LEN   ARRAY_SIZE(s2io_gstrings)
349 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
350
351 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
352                         init_timer(&timer);                     \
353                         timer.function = handle;                \
354                         timer.data = (unsigned long) arg;       \
355                         mod_timer(&timer, (jiffies + exp))      \
356
357 /* copy mac addr to def_mac_addr array */
358 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
359 {
360         sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
361         sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
362         sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
363         sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
364         sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
365         sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
366 }
367 /* Add the vlan */
368 static void s2io_vlan_rx_register(struct net_device *dev,
369                                         struct vlan_group *grp)
370 {
371         struct s2io_nic *nic = dev->priv;
372         unsigned long flags;
373
374         spin_lock_irqsave(&nic->tx_lock, flags);
375         nic->vlgrp = grp;
376         spin_unlock_irqrestore(&nic->tx_lock, flags);
377 }
378
379 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
380 static int vlan_strip_flag;
381
382 /*
383  * Constants to be programmed into the Xena's registers, to configure
384  * the XAUI.
385  */
386
387 #define END_SIGN        0x0
388 static const u64 herc_act_dtx_cfg[] = {
389         /* Set address */
390         0x8000051536750000ULL, 0x80000515367500E0ULL,
391         /* Write data */
392         0x8000051536750004ULL, 0x80000515367500E4ULL,
393         /* Set address */
394         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
395         /* Write data */
396         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
397         /* Set address */
398         0x801205150D440000ULL, 0x801205150D4400E0ULL,
399         /* Write data */
400         0x801205150D440004ULL, 0x801205150D4400E4ULL,
401         /* Set address */
402         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
403         /* Write data */
404         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
405         /* Done */
406         END_SIGN
407 };
408
409 static const u64 xena_dtx_cfg[] = {
410         /* Set address */
411         0x8000051500000000ULL, 0x80000515000000E0ULL,
412         /* Write data */
413         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
414         /* Set address */
415         0x8001051500000000ULL, 0x80010515000000E0ULL,
416         /* Write data */
417         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
418         /* Set address */
419         0x8002051500000000ULL, 0x80020515000000E0ULL,
420         /* Write data */
421         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
422         END_SIGN
423 };
424
425 /*
426  * Constants for Fixing the MacAddress problem seen mostly on
427  * Alpha machines.
428  */
429 static const u64 fix_mac[] = {
430         0x0060000000000000ULL, 0x0060600000000000ULL,
431         0x0040600000000000ULL, 0x0000600000000000ULL,
432         0x0020600000000000ULL, 0x0060600000000000ULL,
433         0x0020600000000000ULL, 0x0060600000000000ULL,
434         0x0020600000000000ULL, 0x0060600000000000ULL,
435         0x0020600000000000ULL, 0x0060600000000000ULL,
436         0x0020600000000000ULL, 0x0060600000000000ULL,
437         0x0020600000000000ULL, 0x0060600000000000ULL,
438         0x0020600000000000ULL, 0x0060600000000000ULL,
439         0x0020600000000000ULL, 0x0060600000000000ULL,
440         0x0020600000000000ULL, 0x0060600000000000ULL,
441         0x0020600000000000ULL, 0x0060600000000000ULL,
442         0x0020600000000000ULL, 0x0000600000000000ULL,
443         0x0040600000000000ULL, 0x0060600000000000ULL,
444         END_SIGN
445 };
446
447 MODULE_LICENSE("GPL");
448 MODULE_VERSION(DRV_VERSION);
449
450
451 /* Module Loadable parameters. */
452 S2IO_PARM_INT(tx_fifo_num, 1);
453 S2IO_PARM_INT(rx_ring_num, 1);
454
455
456 S2IO_PARM_INT(rx_ring_mode, 1);
457 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
458 S2IO_PARM_INT(rmac_pause_time, 0x100);
459 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
460 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
461 S2IO_PARM_INT(shared_splits, 0);
462 S2IO_PARM_INT(tmac_util_period, 5);
463 S2IO_PARM_INT(rmac_util_period, 5);
464 S2IO_PARM_INT(l3l4hdr_size, 128);
465 /* Frequency of Rx desc syncs expressed as power of 2 */
466 S2IO_PARM_INT(rxsync_frequency, 3);
467 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
468 S2IO_PARM_INT(intr_type, 2);
469 /* Large receive offload feature */
470 static unsigned int lro_enable;
471 module_param_named(lro, lro_enable, uint, 0);
472
473 /* Max pkts to be aggregated by LRO at one time. If not specified,
474  * aggregation happens until we hit max IP pkt size(64K)
475  */
476 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
477 S2IO_PARM_INT(indicate_max_pkts, 0);
478
479 S2IO_PARM_INT(napi, 1);
480 S2IO_PARM_INT(ufo, 0);
481 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
482
483 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
484     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
485 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
486     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
487 static unsigned int rts_frm_len[MAX_RX_RINGS] =
488     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
489
490 module_param_array(tx_fifo_len, uint, NULL, 0);
491 module_param_array(rx_ring_sz, uint, NULL, 0);
492 module_param_array(rts_frm_len, uint, NULL, 0);
493
494 /*
495  * S2IO device table.
496  * This table lists all the devices that this driver supports.
497  */
498 static struct pci_device_id s2io_tbl[] __devinitdata = {
499         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
500          PCI_ANY_ID, PCI_ANY_ID},
501         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
502          PCI_ANY_ID, PCI_ANY_ID},
503         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
504          PCI_ANY_ID, PCI_ANY_ID},
505         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
506          PCI_ANY_ID, PCI_ANY_ID},
507         {0,}
508 };
509
510 MODULE_DEVICE_TABLE(pci, s2io_tbl);
511
512 static struct pci_error_handlers s2io_err_handler = {
513         .error_detected = s2io_io_error_detected,
514         .slot_reset = s2io_io_slot_reset,
515         .resume = s2io_io_resume,
516 };
517
518 static struct pci_driver s2io_driver = {
519       .name = "S2IO",
520       .id_table = s2io_tbl,
521       .probe = s2io_init_nic,
522       .remove = __devexit_p(s2io_rem_nic),
523       .err_handler = &s2io_err_handler,
524 };
525
526 /* A simplifier macro used both by init and free shared_mem Fns(). */
527 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
528
529 /**
530  * init_shared_mem - Allocation and Initialization of Memory
531  * @nic: Device private variable.
532  * Description: The function allocates all the memory areas shared
533  * between the NIC and the driver. This includes Tx descriptors,
534  * Rx descriptors and the statistics block.
535  */
536
537 static int init_shared_mem(struct s2io_nic *nic)
538 {
539         u32 size;
540         void *tmp_v_addr, *tmp_v_addr_next;
541         dma_addr_t tmp_p_addr, tmp_p_addr_next;
542         struct RxD_block *pre_rxd_blk = NULL;
543         int i, j, blk_cnt;
544         int lst_size, lst_per_page;
545         struct net_device *dev = nic->dev;
546         unsigned long tmp;
547         struct buffAdd *ba;
548
549         struct mac_info *mac_control;
550         struct config_param *config;
551         unsigned long long mem_allocated = 0;
552
553         mac_control = &nic->mac_control;
554         config = &nic->config;
555
556
557         /* Allocation and initialization of TXDLs in FIOFs */
558         size = 0;
559         for (i = 0; i < config->tx_fifo_num; i++) {
560                 size += config->tx_cfg[i].fifo_len;
561         }
562         if (size > MAX_AVAILABLE_TXDS) {
563                 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
564                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
565                 return -EINVAL;
566         }
567
568         lst_size = (sizeof(struct TxD) * config->max_txds);
569         lst_per_page = PAGE_SIZE / lst_size;
570
571         for (i = 0; i < config->tx_fifo_num; i++) {
572                 int fifo_len = config->tx_cfg[i].fifo_len;
573                 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
574                 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
575                                                           GFP_KERNEL);
576                 if (!mac_control->fifos[i].list_info) {
577                         DBG_PRINT(INFO_DBG,
578                                   "Malloc failed for list_info\n");
579                         return -ENOMEM;
580                 }
581                 mem_allocated += list_holder_size;
582         }
583         for (i = 0; i < config->tx_fifo_num; i++) {
584                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
585                                                 lst_per_page);
586                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
587                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
588                     config->tx_cfg[i].fifo_len - 1;
589                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
590                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
591                     config->tx_cfg[i].fifo_len - 1;
592                 mac_control->fifos[i].fifo_no = i;
593                 mac_control->fifos[i].nic = nic;
594                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
595
596                 for (j = 0; j < page_num; j++) {
597                         int k = 0;
598                         dma_addr_t tmp_p;
599                         void *tmp_v;
600                         tmp_v = pci_alloc_consistent(nic->pdev,
601                                                      PAGE_SIZE, &tmp_p);
602                         if (!tmp_v) {
603                                 DBG_PRINT(INFO_DBG,
604                                           "pci_alloc_consistent ");
605                                 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
606                                 return -ENOMEM;
607                         }
608                         /* If we got a zero DMA address(can happen on
609                          * certain platforms like PPC), reallocate.
610                          * Store virtual address of page we don't want,
611                          * to be freed later.
612                          */
613                         if (!tmp_p) {
614                                 mac_control->zerodma_virt_addr = tmp_v;
615                                 DBG_PRINT(INIT_DBG,
616                                 "%s: Zero DMA address for TxDL. ", dev->name);
617                                 DBG_PRINT(INIT_DBG,
618                                 "Virtual address %p\n", tmp_v);
619                                 tmp_v = pci_alloc_consistent(nic->pdev,
620                                                      PAGE_SIZE, &tmp_p);
621                                 if (!tmp_v) {
622                                         DBG_PRINT(INFO_DBG,
623                                           "pci_alloc_consistent ");
624                                         DBG_PRINT(INFO_DBG, "failed for TxDL\n");
625                                         return -ENOMEM;
626                                 }
627                                 mem_allocated += PAGE_SIZE;
628                         }
629                         while (k < lst_per_page) {
630                                 int l = (j * lst_per_page) + k;
631                                 if (l == config->tx_cfg[i].fifo_len)
632                                         break;
633                                 mac_control->fifos[i].list_info[l].list_virt_addr =
634                                     tmp_v + (k * lst_size);
635                                 mac_control->fifos[i].list_info[l].list_phy_addr =
636                                     tmp_p + (k * lst_size);
637                                 k++;
638                         }
639                 }
640         }
641
642         nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
643         if (!nic->ufo_in_band_v)
644                 return -ENOMEM;
645          mem_allocated += (size * sizeof(u64));
646
647         /* Allocation and initialization of RXDs in Rings */
648         size = 0;
649         for (i = 0; i < config->rx_ring_num; i++) {
650                 if (config->rx_cfg[i].num_rxd %
651                     (rxd_count[nic->rxd_mode] + 1)) {
652                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
653                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
654                                   i);
655                         DBG_PRINT(ERR_DBG, "RxDs per Block");
656                         return FAILURE;
657                 }
658                 size += config->rx_cfg[i].num_rxd;
659                 mac_control->rings[i].block_count =
660                         config->rx_cfg[i].num_rxd /
661                         (rxd_count[nic->rxd_mode] + 1 );
662                 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
663                         mac_control->rings[i].block_count;
664         }
665         if (nic->rxd_mode == RXD_MODE_1)
666                 size = (size * (sizeof(struct RxD1)));
667         else
668                 size = (size * (sizeof(struct RxD3)));
669
670         for (i = 0; i < config->rx_ring_num; i++) {
671                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
672                 mac_control->rings[i].rx_curr_get_info.offset = 0;
673                 mac_control->rings[i].rx_curr_get_info.ring_len =
674                     config->rx_cfg[i].num_rxd - 1;
675                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
676                 mac_control->rings[i].rx_curr_put_info.offset = 0;
677                 mac_control->rings[i].rx_curr_put_info.ring_len =
678                     config->rx_cfg[i].num_rxd - 1;
679                 mac_control->rings[i].nic = nic;
680                 mac_control->rings[i].ring_no = i;
681
682                 blk_cnt = config->rx_cfg[i].num_rxd /
683                                 (rxd_count[nic->rxd_mode] + 1);
684                 /*  Allocating all the Rx blocks */
685                 for (j = 0; j < blk_cnt; j++) {
686                         struct rx_block_info *rx_blocks;
687                         int l;
688
689                         rx_blocks = &mac_control->rings[i].rx_blocks[j];
690                         size = SIZE_OF_BLOCK; //size is always page size
691                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
692                                                           &tmp_p_addr);
693                         if (tmp_v_addr == NULL) {
694                                 /*
695                                  * In case of failure, free_shared_mem()
696                                  * is called, which should free any
697                                  * memory that was alloced till the
698                                  * failure happened.
699                                  */
700                                 rx_blocks->block_virt_addr = tmp_v_addr;
701                                 return -ENOMEM;
702                         }
703                         mem_allocated += size;
704                         memset(tmp_v_addr, 0, size);
705                         rx_blocks->block_virt_addr = tmp_v_addr;
706                         rx_blocks->block_dma_addr = tmp_p_addr;
707                         rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
708                                                   rxd_count[nic->rxd_mode],
709                                                   GFP_KERNEL);
710                         if (!rx_blocks->rxds)
711                                 return -ENOMEM;
712                         mem_allocated +=
713                         (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
714                         for (l=0; l<rxd_count[nic->rxd_mode];l++) {
715                                 rx_blocks->rxds[l].virt_addr =
716                                         rx_blocks->block_virt_addr +
717                                         (rxd_size[nic->rxd_mode] * l);
718                                 rx_blocks->rxds[l].dma_addr =
719                                         rx_blocks->block_dma_addr +
720                                         (rxd_size[nic->rxd_mode] * l);
721                         }
722                 }
723                 /* Interlinking all Rx Blocks */
724                 for (j = 0; j < blk_cnt; j++) {
725                         tmp_v_addr =
726                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
727                         tmp_v_addr_next =
728                                 mac_control->rings[i].rx_blocks[(j + 1) %
729                                               blk_cnt].block_virt_addr;
730                         tmp_p_addr =
731                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
732                         tmp_p_addr_next =
733                                 mac_control->rings[i].rx_blocks[(j + 1) %
734                                               blk_cnt].block_dma_addr;
735
736                         pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
737                         pre_rxd_blk->reserved_2_pNext_RxD_block =
738                             (unsigned long) tmp_v_addr_next;
739                         pre_rxd_blk->pNext_RxD_Blk_physical =
740                             (u64) tmp_p_addr_next;
741                 }
742         }
743         if (nic->rxd_mode == RXD_MODE_3B) {
744                 /*
745                  * Allocation of Storages for buffer addresses in 2BUFF mode
746                  * and the buffers as well.
747                  */
748                 for (i = 0; i < config->rx_ring_num; i++) {
749                         blk_cnt = config->rx_cfg[i].num_rxd /
750                            (rxd_count[nic->rxd_mode]+ 1);
751                         mac_control->rings[i].ba =
752                                 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
753                                      GFP_KERNEL);
754                         if (!mac_control->rings[i].ba)
755                                 return -ENOMEM;
756                         mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
757                         for (j = 0; j < blk_cnt; j++) {
758                                 int k = 0;
759                                 mac_control->rings[i].ba[j] =
760                                         kmalloc((sizeof(struct buffAdd) *
761                                                 (rxd_count[nic->rxd_mode] + 1)),
762                                                 GFP_KERNEL);
763                                 if (!mac_control->rings[i].ba[j])
764                                         return -ENOMEM;
765                                 mem_allocated += (sizeof(struct buffAdd) *  \
766                                         (rxd_count[nic->rxd_mode] + 1));
767                                 while (k != rxd_count[nic->rxd_mode]) {
768                                         ba = &mac_control->rings[i].ba[j][k];
769
770                                         ba->ba_0_org = (void *) kmalloc
771                                             (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
772                                         if (!ba->ba_0_org)
773                                                 return -ENOMEM;
774                                         mem_allocated +=
775                                                 (BUF0_LEN + ALIGN_SIZE);
776                                         tmp = (unsigned long)ba->ba_0_org;
777                                         tmp += ALIGN_SIZE;
778                                         tmp &= ~((unsigned long) ALIGN_SIZE);
779                                         ba->ba_0 = (void *) tmp;
780
781                                         ba->ba_1_org = (void *) kmalloc
782                                             (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
783                                         if (!ba->ba_1_org)
784                                                 return -ENOMEM;
785                                         mem_allocated
786                                                 += (BUF1_LEN + ALIGN_SIZE);
787                                         tmp = (unsigned long) ba->ba_1_org;
788                                         tmp += ALIGN_SIZE;
789                                         tmp &= ~((unsigned long) ALIGN_SIZE);
790                                         ba->ba_1 = (void *) tmp;
791                                         k++;
792                                 }
793                         }
794                 }
795         }
796
797         /* Allocation and initialization of Statistics block */
798         size = sizeof(struct stat_block);
799         mac_control->stats_mem = pci_alloc_consistent
800             (nic->pdev, size, &mac_control->stats_mem_phy);
801
802         if (!mac_control->stats_mem) {
803                 /*
804                  * In case of failure, free_shared_mem() is called, which
805                  * should free any memory that was alloced till the
806                  * failure happened.
807                  */
808                 return -ENOMEM;
809         }
810         mem_allocated += size;
811         mac_control->stats_mem_sz = size;
812
813         tmp_v_addr = mac_control->stats_mem;
814         mac_control->stats_info = (struct stat_block *) tmp_v_addr;
815         memset(tmp_v_addr, 0, size);
816         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
817                   (unsigned long long) tmp_p_addr);
818         mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
819         return SUCCESS;
820 }
821
822 /**
823  * free_shared_mem - Free the allocated Memory
824  * @nic:  Device private variable.
825  * Description: This function is to free all memory locations allocated by
826  * the init_shared_mem() function and return it to the kernel.
827  */
828
829 static void free_shared_mem(struct s2io_nic *nic)
830 {
831         int i, j, blk_cnt, size;
832         u32 ufo_size = 0;
833         void *tmp_v_addr;
834         dma_addr_t tmp_p_addr;
835         struct mac_info *mac_control;
836         struct config_param *config;
837         int lst_size, lst_per_page;
838         struct net_device *dev;
839         int page_num = 0;
840
841         if (!nic)
842                 return;
843
844         dev = nic->dev;
845
846         mac_control = &nic->mac_control;
847         config = &nic->config;
848
849         lst_size = (sizeof(struct TxD) * config->max_txds);
850         lst_per_page = PAGE_SIZE / lst_size;
851
852         for (i = 0; i < config->tx_fifo_num; i++) {
853                 ufo_size += config->tx_cfg[i].fifo_len;
854                 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
855                                                         lst_per_page);
856                 for (j = 0; j < page_num; j++) {
857                         int mem_blks = (j * lst_per_page);
858                         if (!mac_control->fifos[i].list_info)
859                                 return;
860                         if (!mac_control->fifos[i].list_info[mem_blks].
861                                  list_virt_addr)
862                                 break;
863                         pci_free_consistent(nic->pdev, PAGE_SIZE,
864                                             mac_control->fifos[i].
865                                             list_info[mem_blks].
866                                             list_virt_addr,
867                                             mac_control->fifos[i].
868                                             list_info[mem_blks].
869                                             list_phy_addr);
870                         nic->mac_control.stats_info->sw_stat.mem_freed
871                                                 += PAGE_SIZE;
872                 }
873                 /* If we got a zero DMA address during allocation,
874                  * free the page now
875                  */
876                 if (mac_control->zerodma_virt_addr) {
877                         pci_free_consistent(nic->pdev, PAGE_SIZE,
878                                             mac_control->zerodma_virt_addr,
879                                             (dma_addr_t)0);
880                         DBG_PRINT(INIT_DBG,
881                                 "%s: Freeing TxDL with zero DMA addr. ",
882                                 dev->name);
883                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
884                                 mac_control->zerodma_virt_addr);
885                         nic->mac_control.stats_info->sw_stat.mem_freed
886                                                 += PAGE_SIZE;
887                 }
888                 kfree(mac_control->fifos[i].list_info);
889                 nic->mac_control.stats_info->sw_stat.mem_freed +=
890                 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
891         }
892
893         size = SIZE_OF_BLOCK;
894         for (i = 0; i < config->rx_ring_num; i++) {
895                 blk_cnt = mac_control->rings[i].block_count;
896                 for (j = 0; j < blk_cnt; j++) {
897                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
898                                 block_virt_addr;
899                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
900                                 block_dma_addr;
901                         if (tmp_v_addr == NULL)
902                                 break;
903                         pci_free_consistent(nic->pdev, size,
904                                             tmp_v_addr, tmp_p_addr);
905                         nic->mac_control.stats_info->sw_stat.mem_freed += size;
906                         kfree(mac_control->rings[i].rx_blocks[j].rxds);
907                         nic->mac_control.stats_info->sw_stat.mem_freed +=
908                         ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
909                 }
910         }
911
912         if (nic->rxd_mode == RXD_MODE_3B) {
913                 /* Freeing buffer storage addresses in 2BUFF mode. */
914                 for (i = 0; i < config->rx_ring_num; i++) {
915                         blk_cnt = config->rx_cfg[i].num_rxd /
916                             (rxd_count[nic->rxd_mode] + 1);
917                         for (j = 0; j < blk_cnt; j++) {
918                                 int k = 0;
919                                 if (!mac_control->rings[i].ba[j])
920                                         continue;
921                                 while (k != rxd_count[nic->rxd_mode]) {
922                                         struct buffAdd *ba =
923                                                 &mac_control->rings[i].ba[j][k];
924                                         kfree(ba->ba_0_org);
925                                         nic->mac_control.stats_info->sw_stat.\
926                                         mem_freed += (BUF0_LEN + ALIGN_SIZE);
927                                         kfree(ba->ba_1_org);
928                                         nic->mac_control.stats_info->sw_stat.\
929                                         mem_freed += (BUF1_LEN + ALIGN_SIZE);
930                                         k++;
931                                 }
932                                 kfree(mac_control->rings[i].ba[j]);
933                                 nic->mac_control.stats_info->sw_stat.mem_freed +=
934                                         (sizeof(struct buffAdd) *
935                                         (rxd_count[nic->rxd_mode] + 1));
936                         }
937                         kfree(mac_control->rings[i].ba);
938                         nic->mac_control.stats_info->sw_stat.mem_freed +=
939                         (sizeof(struct buffAdd *) * blk_cnt);
940                 }
941         }
942
943         if (mac_control->stats_mem) {
944                 pci_free_consistent(nic->pdev,
945                                     mac_control->stats_mem_sz,
946                                     mac_control->stats_mem,
947                                     mac_control->stats_mem_phy);
948                 nic->mac_control.stats_info->sw_stat.mem_freed +=
949                         mac_control->stats_mem_sz;
950         }
951         if (nic->ufo_in_band_v) {
952                 kfree(nic->ufo_in_band_v);
953                 nic->mac_control.stats_info->sw_stat.mem_freed
954                         += (ufo_size * sizeof(u64));
955         }
956 }
957
958 /**
959  * s2io_verify_pci_mode -
960  */
961
962 static int s2io_verify_pci_mode(struct s2io_nic *nic)
963 {
964         struct XENA_dev_config __iomem *bar0 = nic->bar0;
965         register u64 val64 = 0;
966         int     mode;
967
968         val64 = readq(&bar0->pci_mode);
969         mode = (u8)GET_PCI_MODE(val64);
970
971         if ( val64 & PCI_MODE_UNKNOWN_MODE)
972                 return -1;      /* Unknown PCI mode */
973         return mode;
974 }
975
976 #define NEC_VENID   0x1033
977 #define NEC_DEVID   0x0125
978 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
979 {
980         struct pci_dev *tdev = NULL;
981         while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
982                 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
983                         if (tdev->bus == s2io_pdev->bus->parent)
984                                 pci_dev_put(tdev);
985                                 return 1;
986                 }
987         }
988         return 0;
989 }
990
991 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
992 /**
993  * s2io_print_pci_mode -
994  */
995 static int s2io_print_pci_mode(struct s2io_nic *nic)
996 {
997         struct XENA_dev_config __iomem *bar0 = nic->bar0;
998         register u64 val64 = 0;
999         int     mode;
1000         struct config_param *config = &nic->config;
1001
1002         val64 = readq(&bar0->pci_mode);
1003         mode = (u8)GET_PCI_MODE(val64);
1004
1005         if ( val64 & PCI_MODE_UNKNOWN_MODE)
1006                 return -1;      /* Unknown PCI mode */
1007
1008         config->bus_speed = bus_speed[mode];
1009
1010         if (s2io_on_nec_bridge(nic->pdev)) {
1011                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1012                                                         nic->dev->name);
1013                 return mode;
1014         }
1015
1016         if (val64 & PCI_MODE_32_BITS) {
1017                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1018         } else {
1019                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1020         }
1021
1022         switch(mode) {
1023                 case PCI_MODE_PCI_33:
1024                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1025                         break;
1026                 case PCI_MODE_PCI_66:
1027                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1028                         break;
1029                 case PCI_MODE_PCIX_M1_66:
1030                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1031                         break;
1032                 case PCI_MODE_PCIX_M1_100:
1033                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1034                         break;
1035                 case PCI_MODE_PCIX_M1_133:
1036                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1037                         break;
1038                 case PCI_MODE_PCIX_M2_66:
1039                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1040                         break;
1041                 case PCI_MODE_PCIX_M2_100:
1042                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1043                         break;
1044                 case PCI_MODE_PCIX_M2_133:
1045                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1046                         break;
1047                 default:
1048                         return -1;      /* Unsupported bus speed */
1049         }
1050
1051         return mode;
1052 }
1053
1054 /**
1055  *  init_nic - Initialization of hardware
1056  *  @nic: device peivate variable
1057  *  Description: The function sequentially configures every block
1058  *  of the H/W from their reset values.
1059  *  Return Value:  SUCCESS on success and
1060  *  '-1' on failure (endian settings incorrect).
1061  */
1062
1063 static int init_nic(struct s2io_nic *nic)
1064 {
1065         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1066         struct net_device *dev = nic->dev;
1067         register u64 val64 = 0;
1068         void __iomem *add;
1069         u32 time;
1070         int i, j;
1071         struct mac_info *mac_control;
1072         struct config_param *config;
1073         int dtx_cnt = 0;
1074         unsigned long long mem_share;
1075         int mem_size;
1076
1077         mac_control = &nic->mac_control;
1078         config = &nic->config;
1079
1080         /* to set the swapper controle on the card */
1081         if(s2io_set_swapper(nic)) {
1082                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1083                 return -EIO;
1084         }
1085
1086         /*
1087          * Herc requires EOI to be removed from reset before XGXS, so..
1088          */
1089         if (nic->device_type & XFRAME_II_DEVICE) {
1090                 val64 = 0xA500000000ULL;
1091                 writeq(val64, &bar0->sw_reset);
1092                 msleep(500);
1093                 val64 = readq(&bar0->sw_reset);
1094         }
1095
1096         /* Remove XGXS from reset state */
1097         val64 = 0;
1098         writeq(val64, &bar0->sw_reset);
1099         msleep(500);
1100         val64 = readq(&bar0->sw_reset);
1101
1102         /* Ensure that it's safe to access registers by checking
1103          * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1104          */
1105         if (nic->device_type == XFRAME_II_DEVICE) {
1106                 for (i = 0; i < 50; i++) {
1107                         val64 = readq(&bar0->adapter_status);
1108                         if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1109                                 break;
1110                         msleep(10);
1111                 }
1112                 if (i == 50)
1113                         return -ENODEV;
1114         }
1115
1116         /*  Enable Receiving broadcasts */
1117         add = &bar0->mac_cfg;
1118         val64 = readq(&bar0->mac_cfg);
1119         val64 |= MAC_RMAC_BCAST_ENABLE;
1120         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1121         writel((u32) val64, add);
1122         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1123         writel((u32) (val64 >> 32), (add + 4));
1124
1125         /* Read registers in all blocks */
1126         val64 = readq(&bar0->mac_int_mask);
1127         val64 = readq(&bar0->mc_int_mask);
1128         val64 = readq(&bar0->xgxs_int_mask);
1129
1130         /*  Set MTU */
1131         val64 = dev->mtu;
1132         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1133
1134         if (nic->device_type & XFRAME_II_DEVICE) {
1135                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1136                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1137                                           &bar0->dtx_control, UF);
1138                         if (dtx_cnt & 0x1)
1139                                 msleep(1); /* Necessary!! */
1140                         dtx_cnt++;
1141                 }
1142         } else {
1143                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1144                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1145                                           &bar0->dtx_control, UF);
1146                         val64 = readq(&bar0->dtx_control);
1147                         dtx_cnt++;
1148                 }
1149         }
1150
1151         /*  Tx DMA Initialization */
1152         val64 = 0;
1153         writeq(val64, &bar0->tx_fifo_partition_0);
1154         writeq(val64, &bar0->tx_fifo_partition_1);
1155         writeq(val64, &bar0->tx_fifo_partition_2);
1156         writeq(val64, &bar0->tx_fifo_partition_3);
1157
1158
1159         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1160                 val64 |=
1161                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1162                          13) | vBIT(config->tx_cfg[i].fifo_priority,
1163                                     ((i * 32) + 5), 3);
1164
1165                 if (i == (config->tx_fifo_num - 1)) {
1166                         if (i % 2 == 0)
1167                                 i++;
1168                 }
1169
1170                 switch (i) {
1171                 case 1:
1172                         writeq(val64, &bar0->tx_fifo_partition_0);
1173                         val64 = 0;
1174                         break;
1175                 case 3:
1176                         writeq(val64, &bar0->tx_fifo_partition_1);
1177                         val64 = 0;
1178                         break;
1179                 case 5:
1180                         writeq(val64, &bar0->tx_fifo_partition_2);
1181                         val64 = 0;
1182                         break;
1183                 case 7:
1184                         writeq(val64, &bar0->tx_fifo_partition_3);
1185                         break;
1186                 }
1187         }
1188
1189         /*
1190          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1191          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1192          */
1193         if ((nic->device_type == XFRAME_I_DEVICE) &&
1194                 (nic->pdev->revision < 4))
1195                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1196
1197         val64 = readq(&bar0->tx_fifo_partition_0);
1198         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1199                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1200
1201         /*
1202          * Initialization of Tx_PA_CONFIG register to ignore packet
1203          * integrity checking.
1204          */
1205         val64 = readq(&bar0->tx_pa_cfg);
1206         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1207             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1208         writeq(val64, &bar0->tx_pa_cfg);
1209
1210         /* Rx DMA intialization. */
1211         val64 = 0;
1212         for (i = 0; i < config->rx_ring_num; i++) {
1213                 val64 |=
1214                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1215                          3);
1216         }
1217         writeq(val64, &bar0->rx_queue_priority);
1218
1219         /*
1220          * Allocating equal share of memory to all the
1221          * configured Rings.
1222          */
1223         val64 = 0;
1224         if (nic->device_type & XFRAME_II_DEVICE)
1225                 mem_size = 32;
1226         else
1227                 mem_size = 64;
1228
1229         for (i = 0; i < config->rx_ring_num; i++) {
1230                 switch (i) {
1231                 case 0:
1232                         mem_share = (mem_size / config->rx_ring_num +
1233                                      mem_size % config->rx_ring_num);
1234                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1235                         continue;
1236                 case 1:
1237                         mem_share = (mem_size / config->rx_ring_num);
1238                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1239                         continue;
1240                 case 2:
1241                         mem_share = (mem_size / config->rx_ring_num);
1242                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1243                         continue;
1244                 case 3:
1245                         mem_share = (mem_size / config->rx_ring_num);
1246                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1247                         continue;
1248                 case 4:
1249                         mem_share = (mem_size / config->rx_ring_num);
1250                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1251                         continue;
1252                 case 5:
1253                         mem_share = (mem_size / config->rx_ring_num);
1254                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1255                         continue;
1256                 case 6:
1257                         mem_share = (mem_size / config->rx_ring_num);
1258                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1259                         continue;
1260                 case 7:
1261                         mem_share = (mem_size / config->rx_ring_num);
1262                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1263                         continue;
1264                 }
1265         }
1266         writeq(val64, &bar0->rx_queue_cfg);
1267
1268         /*
1269          * Filling Tx round robin registers
1270          * as per the number of FIFOs
1271          */
1272         switch (config->tx_fifo_num) {
1273         case 1:
1274                 val64 = 0x0000000000000000ULL;
1275                 writeq(val64, &bar0->tx_w_round_robin_0);
1276                 writeq(val64, &bar0->tx_w_round_robin_1);
1277                 writeq(val64, &bar0->tx_w_round_robin_2);
1278                 writeq(val64, &bar0->tx_w_round_robin_3);
1279                 writeq(val64, &bar0->tx_w_round_robin_4);
1280                 break;
1281         case 2:
1282                 val64 = 0x0000010000010000ULL;
1283                 writeq(val64, &bar0->tx_w_round_robin_0);
1284                 val64 = 0x0100000100000100ULL;
1285                 writeq(val64, &bar0->tx_w_round_robin_1);
1286                 val64 = 0x0001000001000001ULL;
1287                 writeq(val64, &bar0->tx_w_round_robin_2);
1288                 val64 = 0x0000010000010000ULL;
1289                 writeq(val64, &bar0->tx_w_round_robin_3);
1290                 val64 = 0x0100000000000000ULL;
1291                 writeq(val64, &bar0->tx_w_round_robin_4);
1292                 break;
1293         case 3:
1294                 val64 = 0x0001000102000001ULL;
1295                 writeq(val64, &bar0->tx_w_round_robin_0);
1296                 val64 = 0x0001020000010001ULL;
1297                 writeq(val64, &bar0->tx_w_round_robin_1);
1298                 val64 = 0x0200000100010200ULL;
1299                 writeq(val64, &bar0->tx_w_round_robin_2);
1300                 val64 = 0x0001000102000001ULL;
1301                 writeq(val64, &bar0->tx_w_round_robin_3);
1302                 val64 = 0x0001020000000000ULL;
1303                 writeq(val64, &bar0->tx_w_round_robin_4);
1304                 break;
1305         case 4:
1306                 val64 = 0x0001020300010200ULL;
1307                 writeq(val64, &bar0->tx_w_round_robin_0);
1308                 val64 = 0x0100000102030001ULL;
1309                 writeq(val64, &bar0->tx_w_round_robin_1);
1310                 val64 = 0x0200010000010203ULL;
1311                 writeq(val64, &bar0->tx_w_round_robin_2);
1312                 val64 = 0x0001020001000001ULL;
1313                 writeq(val64, &bar0->tx_w_round_robin_3);
1314                 val64 = 0x0203000100000000ULL;
1315                 writeq(val64, &bar0->tx_w_round_robin_4);
1316                 break;
1317         case 5:
1318                 val64 = 0x0001000203000102ULL;
1319                 writeq(val64, &bar0->tx_w_round_robin_0);
1320                 val64 = 0x0001020001030004ULL;
1321                 writeq(val64, &bar0->tx_w_round_robin_1);
1322                 val64 = 0x0001000203000102ULL;
1323                 writeq(val64, &bar0->tx_w_round_robin_2);
1324                 val64 = 0x0001020001030004ULL;
1325                 writeq(val64, &bar0->tx_w_round_robin_3);
1326                 val64 = 0x0001000000000000ULL;
1327                 writeq(val64, &bar0->tx_w_round_robin_4);
1328                 break;
1329         case 6:
1330                 val64 = 0x0001020304000102ULL;
1331                 writeq(val64, &bar0->tx_w_round_robin_0);
1332                 val64 = 0x0304050001020001ULL;
1333                 writeq(val64, &bar0->tx_w_round_robin_1);
1334                 val64 = 0x0203000100000102ULL;
1335                 writeq(val64, &bar0->tx_w_round_robin_2);
1336                 val64 = 0x0304000102030405ULL;
1337                 writeq(val64, &bar0->tx_w_round_robin_3);
1338                 val64 = 0x0001000200000000ULL;
1339                 writeq(val64, &bar0->tx_w_round_robin_4);
1340                 break;
1341         case 7:
1342                 val64 = 0x0001020001020300ULL;
1343                 writeq(val64, &bar0->tx_w_round_robin_0);
1344                 val64 = 0x0102030400010203ULL;
1345                 writeq(val64, &bar0->tx_w_round_robin_1);
1346                 val64 = 0x0405060001020001ULL;
1347                 writeq(val64, &bar0->tx_w_round_robin_2);
1348                 val64 = 0x0304050000010200ULL;
1349                 writeq(val64, &bar0->tx_w_round_robin_3);
1350                 val64 = 0x0102030000000000ULL;
1351                 writeq(val64, &bar0->tx_w_round_robin_4);
1352                 break;
1353         case 8:
1354                 val64 = 0x0001020300040105ULL;
1355                 writeq(val64, &bar0->tx_w_round_robin_0);
1356                 val64 = 0x0200030106000204ULL;
1357                 writeq(val64, &bar0->tx_w_round_robin_1);
1358                 val64 = 0x0103000502010007ULL;
1359                 writeq(val64, &bar0->tx_w_round_robin_2);
1360                 val64 = 0x0304010002060500ULL;
1361                 writeq(val64, &bar0->tx_w_round_robin_3);
1362                 val64 = 0x0103020400000000ULL;
1363                 writeq(val64, &bar0->tx_w_round_robin_4);
1364                 break;
1365         }
1366
1367         /* Enable all configured Tx FIFO partitions */
1368         val64 = readq(&bar0->tx_fifo_partition_0);
1369         val64 |= (TX_FIFO_PARTITION_EN);
1370         writeq(val64, &bar0->tx_fifo_partition_0);
1371
1372         /* Filling the Rx round robin registers as per the
1373          * number of Rings and steering based on QoS.
1374          */
1375         switch (config->rx_ring_num) {
1376         case 1:
1377                 val64 = 0x8080808080808080ULL;
1378                 writeq(val64, &bar0->rts_qos_steering);
1379                 break;
1380         case 2:
1381                 val64 = 0x0000010000010000ULL;
1382                 writeq(val64, &bar0->rx_w_round_robin_0);
1383                 val64 = 0x0100000100000100ULL;
1384                 writeq(val64, &bar0->rx_w_round_robin_1);
1385                 val64 = 0x0001000001000001ULL;
1386                 writeq(val64, &bar0->rx_w_round_robin_2);
1387                 val64 = 0x0000010000010000ULL;
1388                 writeq(val64, &bar0->rx_w_round_robin_3);
1389                 val64 = 0x0100000000000000ULL;
1390                 writeq(val64, &bar0->rx_w_round_robin_4);
1391
1392                 val64 = 0x8080808040404040ULL;
1393                 writeq(val64, &bar0->rts_qos_steering);
1394                 break;
1395         case 3:
1396                 val64 = 0x0001000102000001ULL;
1397                 writeq(val64, &bar0->rx_w_round_robin_0);
1398                 val64 = 0x0001020000010001ULL;
1399                 writeq(val64, &bar0->rx_w_round_robin_1);
1400                 val64 = 0x0200000100010200ULL;
1401                 writeq(val64, &bar0->rx_w_round_robin_2);
1402                 val64 = 0x0001000102000001ULL;
1403                 writeq(val64, &bar0->rx_w_round_robin_3);
1404                 val64 = 0x0001020000000000ULL;
1405                 writeq(val64, &bar0->rx_w_round_robin_4);
1406
1407                 val64 = 0x8080804040402020ULL;
1408                 writeq(val64, &bar0->rts_qos_steering);
1409                 break;
1410         case 4:
1411                 val64 = 0x0001020300010200ULL;
1412                 writeq(val64, &bar0->rx_w_round_robin_0);
1413                 val64 = 0x0100000102030001ULL;
1414                 writeq(val64, &bar0->rx_w_round_robin_1);
1415                 val64 = 0x0200010000010203ULL;
1416                 writeq(val64, &bar0->rx_w_round_robin_2);
1417                 val64 = 0x0001020001000001ULL;
1418                 writeq(val64, &bar0->rx_w_round_robin_3);
1419                 val64 = 0x0203000100000000ULL;
1420                 writeq(val64, &bar0->rx_w_round_robin_4);
1421
1422                 val64 = 0x8080404020201010ULL;
1423                 writeq(val64, &bar0->rts_qos_steering);
1424                 break;
1425         case 5:
1426                 val64 = 0x0001000203000102ULL;
1427                 writeq(val64, &bar0->rx_w_round_robin_0);
1428                 val64 = 0x0001020001030004ULL;
1429                 writeq(val64, &bar0->rx_w_round_robin_1);
1430                 val64 = 0x0001000203000102ULL;
1431                 writeq(val64, &bar0->rx_w_round_robin_2);
1432                 val64 = 0x0001020001030004ULL;
1433                 writeq(val64, &bar0->rx_w_round_robin_3);
1434                 val64 = 0x0001000000000000ULL;
1435                 writeq(val64, &bar0->rx_w_round_robin_4);
1436
1437                 val64 = 0x8080404020201008ULL;
1438                 writeq(val64, &bar0->rts_qos_steering);
1439                 break;
1440         case 6:
1441                 val64 = 0x0001020304000102ULL;
1442                 writeq(val64, &bar0->rx_w_round_robin_0);
1443                 val64 = 0x0304050001020001ULL;
1444                 writeq(val64, &bar0->rx_w_round_robin_1);
1445                 val64 = 0x0203000100000102ULL;
1446                 writeq(val64, &bar0->rx_w_round_robin_2);
1447                 val64 = 0x0304000102030405ULL;
1448                 writeq(val64, &bar0->rx_w_round_robin_3);
1449                 val64 = 0x0001000200000000ULL;
1450                 writeq(val64, &bar0->rx_w_round_robin_4);
1451
1452                 val64 = 0x8080404020100804ULL;
1453                 writeq(val64, &bar0->rts_qos_steering);
1454                 break;
1455         case 7:
1456                 val64 = 0x0001020001020300ULL;
1457                 writeq(val64, &bar0->rx_w_round_robin_0);
1458                 val64 = 0x0102030400010203ULL;
1459                 writeq(val64, &bar0->rx_w_round_robin_1);
1460                 val64 = 0x0405060001020001ULL;
1461                 writeq(val64, &bar0->rx_w_round_robin_2);
1462                 val64 = 0x0304050000010200ULL;
1463                 writeq(val64, &bar0->rx_w_round_robin_3);
1464                 val64 = 0x0102030000000000ULL;
1465                 writeq(val64, &bar0->rx_w_round_robin_4);
1466
1467                 val64 = 0x8080402010080402ULL;
1468                 writeq(val64, &bar0->rts_qos_steering);
1469                 break;
1470         case 8:
1471                 val64 = 0x0001020300040105ULL;
1472                 writeq(val64, &bar0->rx_w_round_robin_0);
1473                 val64 = 0x0200030106000204ULL;
1474                 writeq(val64, &bar0->rx_w_round_robin_1);
1475                 val64 = 0x0103000502010007ULL;
1476                 writeq(val64, &bar0->rx_w_round_robin_2);
1477                 val64 = 0x0304010002060500ULL;
1478                 writeq(val64, &bar0->rx_w_round_robin_3);
1479                 val64 = 0x0103020400000000ULL;
1480                 writeq(val64, &bar0->rx_w_round_robin_4);
1481
1482                 val64 = 0x8040201008040201ULL;
1483                 writeq(val64, &bar0->rts_qos_steering);
1484                 break;
1485         }
1486
1487         /* UDP Fix */
1488         val64 = 0;
1489         for (i = 0; i < 8; i++)
1490                 writeq(val64, &bar0->rts_frm_len_n[i]);
1491
1492         /* Set the default rts frame length for the rings configured */
1493         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1494         for (i = 0 ; i < config->rx_ring_num ; i++)
1495                 writeq(val64, &bar0->rts_frm_len_n[i]);
1496
1497         /* Set the frame length for the configured rings
1498          * desired by the user
1499          */
1500         for (i = 0; i < config->rx_ring_num; i++) {
1501                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1502                  * specified frame length steering.
1503                  * If the user provides the frame length then program
1504                  * the rts_frm_len register for those values or else
1505                  * leave it as it is.
1506                  */
1507                 if (rts_frm_len[i] != 0) {
1508                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1509                                 &bar0->rts_frm_len_n[i]);
1510                 }
1511         }
1512
1513         /* Disable differentiated services steering logic */
1514         for (i = 0; i < 64; i++) {
1515                 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1516                         DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1517                                 dev->name);
1518                         DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1519                         return -ENODEV;
1520                 }
1521         }
1522
1523         /* Program statistics memory */
1524         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1525
1526         if (nic->device_type == XFRAME_II_DEVICE) {
1527                 val64 = STAT_BC(0x320);
1528                 writeq(val64, &bar0->stat_byte_cnt);
1529         }
1530
1531         /*
1532          * Initializing the sampling rate for the device to calculate the
1533          * bandwidth utilization.
1534          */
1535         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1536             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1537         writeq(val64, &bar0->mac_link_util);
1538
1539
1540         /*
1541          * Initializing the Transmit and Receive Traffic Interrupt
1542          * Scheme.
1543          */
1544         /*
1545          * TTI Initialization. Default Tx timer gets us about
1546          * 250 interrupts per sec. Continuous interrupts are enabled
1547          * by default.
1548          */
1549         if (nic->device_type == XFRAME_II_DEVICE) {
1550                 int count = (nic->config.bus_speed * 125)/2;
1551                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1552         } else {
1553
1554                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1555         }
1556         val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1557             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1558             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1559                 if (use_continuous_tx_intrs)
1560                         val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1561         writeq(val64, &bar0->tti_data1_mem);
1562
1563         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1564             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1565             TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1566         writeq(val64, &bar0->tti_data2_mem);
1567
1568         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1569         writeq(val64, &bar0->tti_command_mem);
1570
1571         /*
1572          * Once the operation completes, the Strobe bit of the command
1573          * register will be reset. We poll for this particular condition
1574          * We wait for a maximum of 500ms for the operation to complete,
1575          * if it's not complete by then we return error.
1576          */
1577         time = 0;
1578         while (TRUE) {
1579                 val64 = readq(&bar0->tti_command_mem);
1580                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1581                         break;
1582                 }
1583                 if (time > 10) {
1584                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1585                                   dev->name);
1586                         return -ENODEV;
1587                 }
1588                 msleep(50);
1589                 time++;
1590         }
1591
1592         /* RTI Initialization */
1593         if (nic->device_type == XFRAME_II_DEVICE) {
1594                 /*
1595                  * Programmed to generate Apprx 500 Intrs per
1596                  * second
1597                  */
1598                 int count = (nic->config.bus_speed * 125)/4;
1599                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1600         } else
1601                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1602         val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1603                  RTI_DATA1_MEM_RX_URNG_B(0x10) |
1604                  RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1605
1606         writeq(val64, &bar0->rti_data1_mem);
1607
1608         val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1609                 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1610         if (nic->config.intr_type == MSI_X)
1611             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1612                         RTI_DATA2_MEM_RX_UFC_D(0x40));
1613         else
1614             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1615                         RTI_DATA2_MEM_RX_UFC_D(0x80));
1616         writeq(val64, &bar0->rti_data2_mem);
1617
1618         for (i = 0; i < config->rx_ring_num; i++) {
1619                 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1620                                 | RTI_CMD_MEM_OFFSET(i);
1621                 writeq(val64, &bar0->rti_command_mem);
1622
1623                 /*
1624                  * Once the operation completes, the Strobe bit of the
1625                  * command register will be reset. We poll for this
1626                  * particular condition. We wait for a maximum of 500ms
1627                  * for the operation to complete, if it's not complete
1628                  * by then we return error.
1629                  */
1630                 time = 0;
1631                 while (TRUE) {
1632                         val64 = readq(&bar0->rti_command_mem);
1633                         if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1634                                 break;
1635
1636                         if (time > 10) {
1637                                 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1638                                           dev->name);
1639                                 return -ENODEV;
1640                         }
1641                         time++;
1642                         msleep(50);
1643                 }
1644         }
1645
1646         /*
1647          * Initializing proper values as Pause threshold into all
1648          * the 8 Queues on Rx side.
1649          */
1650         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1651         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1652
1653         /* Disable RMAC PAD STRIPPING */
1654         add = &bar0->mac_cfg;
1655         val64 = readq(&bar0->mac_cfg);
1656         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1657         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1658         writel((u32) (val64), add);
1659         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1660         writel((u32) (val64 >> 32), (add + 4));
1661         val64 = readq(&bar0->mac_cfg);
1662
1663         /* Enable FCS stripping by adapter */
1664         add = &bar0->mac_cfg;
1665         val64 = readq(&bar0->mac_cfg);
1666         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1667         if (nic->device_type == XFRAME_II_DEVICE)
1668                 writeq(val64, &bar0->mac_cfg);
1669         else {
1670                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1671                 writel((u32) (val64), add);
1672                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1673                 writel((u32) (val64 >> 32), (add + 4));
1674         }
1675
1676         /*
1677          * Set the time value to be inserted in the pause frame
1678          * generated by xena.
1679          */
1680         val64 = readq(&bar0->rmac_pause_cfg);
1681         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1682         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1683         writeq(val64, &bar0->rmac_pause_cfg);
1684
1685         /*
1686          * Set the Threshold Limit for Generating the pause frame
1687          * If the amount of data in any Queue exceeds ratio of
1688          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1689          * pause frame is generated
1690          */
1691         val64 = 0;
1692         for (i = 0; i < 4; i++) {
1693                 val64 |=
1694                     (((u64) 0xFF00 | nic->mac_control.
1695                       mc_pause_threshold_q0q3)
1696                      << (i * 2 * 8));
1697         }
1698         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1699
1700         val64 = 0;
1701         for (i = 0; i < 4; i++) {
1702                 val64 |=
1703                     (((u64) 0xFF00 | nic->mac_control.
1704                       mc_pause_threshold_q4q7)
1705                      << (i * 2 * 8));
1706         }
1707         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1708
1709         /*
1710          * TxDMA will stop Read request if the number of read split has
1711          * exceeded the limit pointed by shared_splits
1712          */
1713         val64 = readq(&bar0->pic_control);
1714         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1715         writeq(val64, &bar0->pic_control);
1716
1717         if (nic->config.bus_speed == 266) {
1718                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1719                 writeq(0x0, &bar0->read_retry_delay);
1720                 writeq(0x0, &bar0->write_retry_delay);
1721         }
1722
1723         /*
1724          * Programming the Herc to split every write transaction
1725          * that does not start on an ADB to reduce disconnects.
1726          */
1727         if (nic->device_type == XFRAME_II_DEVICE) {
1728                 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1729                         MISC_LINK_STABILITY_PRD(3);
1730                 writeq(val64, &bar0->misc_control);
1731                 val64 = readq(&bar0->pic_control2);
1732                 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1733                 writeq(val64, &bar0->pic_control2);
1734         }
1735         if (strstr(nic->product_name, "CX4")) {
1736                 val64 = TMAC_AVG_IPG(0x17);
1737                 writeq(val64, &bar0->tmac_avg_ipg);
1738         }
1739
1740         return SUCCESS;
1741 }
1742 #define LINK_UP_DOWN_INTERRUPT          1
1743 #define MAC_RMAC_ERR_TIMER              2
1744
1745 static int s2io_link_fault_indication(struct s2io_nic *nic)
1746 {
1747         if (nic->config.intr_type != INTA)
1748                 return MAC_RMAC_ERR_TIMER;
1749         if (nic->device_type == XFRAME_II_DEVICE)
1750                 return LINK_UP_DOWN_INTERRUPT;
1751         else
1752                 return MAC_RMAC_ERR_TIMER;
1753 }
1754
1755 /**
1756  *  do_s2io_write_bits -  update alarm bits in alarm register
1757  *  @value: alarm bits
1758  *  @flag: interrupt status
1759  *  @addr: address value
1760  *  Description: update alarm bits in alarm register
1761  *  Return Value:
1762  *  NONE.
1763  */
1764 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1765 {
1766         u64 temp64;
1767
1768         temp64 = readq(addr);
1769
1770         if(flag == ENABLE_INTRS)
1771                 temp64 &= ~((u64) value);
1772         else
1773                 temp64 |= ((u64) value);
1774         writeq(temp64, addr);
1775 }
1776
1777 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1778 {
1779         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1780         register u64 gen_int_mask = 0;
1781
1782         if (mask & TX_DMA_INTR) {
1783
1784                 gen_int_mask |= TXDMA_INT_M;
1785
1786                 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1787                                 TXDMA_PCC_INT | TXDMA_TTI_INT |
1788                                 TXDMA_LSO_INT | TXDMA_TPA_INT |
1789                                 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1790
1791                 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1792                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1793                                 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1794                                 &bar0->pfc_err_mask);
1795
1796                 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1797                                 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1798                                 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1799
1800                 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1801                                 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1802                                 PCC_N_SERR | PCC_6_COF_OV_ERR |
1803                                 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1804                                 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1805                                 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1806
1807                 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1808                                 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1809
1810                 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1811                                 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1812                                 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1813                                 flag, &bar0->lso_err_mask);
1814
1815                 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1816                                 flag, &bar0->tpa_err_mask);
1817
1818                 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1819
1820         }
1821
1822         if (mask & TX_MAC_INTR) {
1823                 gen_int_mask |= TXMAC_INT_M;
1824                 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1825                                 &bar0->mac_int_mask);
1826                 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1827                                 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1828                                 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1829                                 flag, &bar0->mac_tmac_err_mask);
1830         }
1831
1832         if (mask & TX_XGXS_INTR) {
1833                 gen_int_mask |= TXXGXS_INT_M;
1834                 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1835                                 &bar0->xgxs_int_mask);
1836                 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1837                                 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1838                                 flag, &bar0->xgxs_txgxs_err_mask);
1839         }
1840
1841         if (mask & RX_DMA_INTR) {
1842                 gen_int_mask |= RXDMA_INT_M;
1843                 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1844                                 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1845                                 flag, &bar0->rxdma_int_mask);
1846                 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1847                                 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1848                                 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1849                                 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1850                 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1851                                 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1852                                 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1853                                 &bar0->prc_pcix_err_mask);
1854                 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1855                                 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1856                                 &bar0->rpa_err_mask);
1857                 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1858                                 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1859                                 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1860                                 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
1861                                 flag, &bar0->rda_err_mask);
1862                 do_s2io_write_bits(RTI_SM_ERR_ALARM |
1863                                 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1864                                 flag, &bar0->rti_err_mask);
1865         }
1866
1867         if (mask & RX_MAC_INTR) {
1868                 gen_int_mask |= RXMAC_INT_M;
1869                 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1870                                 &bar0->mac_int_mask);
1871                 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1872                                 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1873                                 RMAC_DOUBLE_ECC_ERR |
1874                                 RMAC_LINK_STATE_CHANGE_INT,
1875                                 flag, &bar0->mac_rmac_err_mask);
1876         }
1877
1878         if (mask & RX_XGXS_INTR)
1879         {
1880                 gen_int_mask |= RXXGXS_INT_M;
1881                 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1882                                 &bar0->xgxs_int_mask);
1883                 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1884                                 &bar0->xgxs_rxgxs_err_mask);
1885         }
1886
1887         if (mask & MC_INTR) {
1888                 gen_int_mask |= MC_INT_M;
1889                 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
1890                 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1891                                 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1892                                 &bar0->mc_err_mask);
1893         }
1894         nic->general_int_mask = gen_int_mask;
1895
1896         /* Remove this line when alarm interrupts are enabled */
1897         nic->general_int_mask = 0;
1898 }
1899 /**
1900  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1901  *  @nic: device private variable,
1902  *  @mask: A mask indicating which Intr block must be modified and,
1903  *  @flag: A flag indicating whether to enable or disable the Intrs.
1904  *  Description: This function will either disable or enable the interrupts
1905  *  depending on the flag argument. The mask argument can be used to
1906  *  enable/disable any Intr block.
1907  *  Return Value: NONE.
1908  */
1909
1910 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1911 {
1912         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1913         register u64 temp64 = 0, intr_mask = 0;
1914
1915         intr_mask = nic->general_int_mask;
1916
1917         /*  Top level interrupt classification */
1918         /*  PIC Interrupts */
1919         if (mask & TX_PIC_INTR) {
1920                 /*  Enable PIC Intrs in the general intr mask register */
1921                 intr_mask |= TXPIC_INT_M;
1922                 if (flag == ENABLE_INTRS) {
1923                         /*
1924                          * If Hercules adapter enable GPIO otherwise
1925                          * disable all PCIX, Flash, MDIO, IIC and GPIO
1926                          * interrupts for now.
1927                          * TODO
1928                          */
1929                         if (s2io_link_fault_indication(nic) ==
1930                                         LINK_UP_DOWN_INTERRUPT ) {
1931                                 do_s2io_write_bits(PIC_INT_GPIO, flag,
1932                                                 &bar0->pic_int_mask);
1933                                 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
1934                                                 &bar0->gpio_int_mask);
1935                         } else
1936                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1937                 } else if (flag == DISABLE_INTRS) {
1938                         /*
1939                          * Disable PIC Intrs in the general
1940                          * intr mask register
1941                          */
1942                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1943                 }
1944         }
1945
1946         /*  Tx traffic interrupts */
1947         if (mask & TX_TRAFFIC_INTR) {
1948                 intr_mask |= TXTRAFFIC_INT_M;
1949                 if (flag == ENABLE_INTRS) {
1950                         /*
1951                          * Enable all the Tx side interrupts
1952                          * writing 0 Enables all 64 TX interrupt levels
1953                          */
1954                         writeq(0x0, &bar0->tx_traffic_mask);
1955                 } else if (flag == DISABLE_INTRS) {
1956                         /*
1957                          * Disable Tx Traffic Intrs in the general intr mask
1958                          * register.
1959                          */
1960                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1961                 }
1962         }
1963
1964         /*  Rx traffic interrupts */
1965         if (mask & RX_TRAFFIC_INTR) {
1966                 intr_mask |= RXTRAFFIC_INT_M;
1967                 if (flag == ENABLE_INTRS) {
1968                         /* writing 0 Enables all 8 RX interrupt levels */
1969                         writeq(0x0, &bar0->rx_traffic_mask);
1970                 } else if (flag == DISABLE_INTRS) {
1971                         /*
1972                          * Disable Rx Traffic Intrs in the general intr mask
1973                          * register.
1974                          */
1975                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1976                 }
1977         }
1978
1979         temp64 = readq(&bar0->general_int_mask);
1980         if (flag == ENABLE_INTRS)
1981                 temp64 &= ~((u64) intr_mask);
1982         else
1983                 temp64 = DISABLE_ALL_INTRS;
1984         writeq(temp64, &bar0->general_int_mask);
1985
1986         nic->general_int_mask = readq(&bar0->general_int_mask);
1987 }
1988
1989 /**
1990  *  verify_pcc_quiescent- Checks for PCC quiescent state
1991  *  Return: 1 If PCC is quiescence
1992  *          0 If PCC is not quiescence
1993  */
1994 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1995 {
1996         int ret = 0, herc;
1997         struct XENA_dev_config __iomem *bar0 = sp->bar0;
1998         u64 val64 = readq(&bar0->adapter_status);
1999
2000         herc = (sp->device_type == XFRAME_II_DEVICE);
2001
2002         if (flag == FALSE) {
2003                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2004                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2005                                 ret = 1;
2006                 } else {
2007                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2008                                 ret = 1;
2009                 }
2010         } else {
2011                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2012                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2013                              ADAPTER_STATUS_RMAC_PCC_IDLE))
2014                                 ret = 1;
2015                 } else {
2016                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2017                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2018                                 ret = 1;
2019                 }
2020         }
2021
2022         return ret;
2023 }
2024 /**
2025  *  verify_xena_quiescence - Checks whether the H/W is ready
2026  *  Description: Returns whether the H/W is ready to go or not. Depending
2027  *  on whether adapter enable bit was written or not the comparison
2028  *  differs and the calling function passes the input argument flag to
2029  *  indicate this.
2030  *  Return: 1 If xena is quiescence
2031  *          0 If Xena is not quiescence
2032  */
2033
2034 static int verify_xena_quiescence(struct s2io_nic *sp)
2035 {
2036         int  mode;
2037         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2038         u64 val64 = readq(&bar0->adapter_status);
2039         mode = s2io_verify_pci_mode(sp);
2040
2041         if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2042                 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2043                 return 0;
2044         }
2045         if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2046         DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2047                 return 0;
2048         }
2049         if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2050                 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2051                 return 0;
2052         }
2053         if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2054                 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2055                 return 0;
2056         }
2057         if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2058                 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2059                 return 0;
2060         }
2061         if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2062                 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2063                 return 0;
2064         }
2065         if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2066                 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2067                 return 0;
2068         }
2069         if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2070                 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2071                 return 0;
2072         }
2073
2074         /*
2075          * In PCI 33 mode, the P_PLL is not used, and therefore,
2076          * the the P_PLL_LOCK bit in the adapter_status register will
2077          * not be asserted.
2078          */
2079         if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2080                 sp->device_type == XFRAME_II_DEVICE && mode !=
2081                 PCI_MODE_PCI_33) {
2082                 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2083                 return 0;
2084         }
2085         if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2086                         ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2087                 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2088                 return 0;
2089         }
2090         return 1;
2091 }
2092
2093 /**
2094  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2095  * @sp: Pointer to device specifc structure
2096  * Description :
2097  * New procedure to clear mac address reading  problems on Alpha platforms
2098  *
2099  */
2100
2101 static void fix_mac_address(struct s2io_nic * sp)
2102 {
2103         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2104         u64 val64;
2105         int i = 0;
2106
2107         while (fix_mac[i] != END_SIGN) {
2108                 writeq(fix_mac[i++], &bar0->gpio_control);
2109                 udelay(10);
2110                 val64 = readq(&bar0->gpio_control);
2111         }
2112 }
2113
2114 /**
2115  *  start_nic - Turns the device on
2116  *  @nic : device private variable.
2117  *  Description:
2118  *  This function actually turns the device on. Before this  function is
2119  *  called,all Registers are configured from their reset states
2120  *  and shared memory is allocated but the NIC is still quiescent. On
2121  *  calling this function, the device interrupts are cleared and the NIC is
2122  *  literally switched on by writing into the adapter control register.
2123  *  Return Value:
2124  *  SUCCESS on success and -1 on failure.
2125  */
2126
2127 static int start_nic(struct s2io_nic *nic)
2128 {
2129         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2130         struct net_device *dev = nic->dev;
2131         register u64 val64 = 0;
2132         u16 subid, i;
2133         struct mac_info *mac_control;
2134         struct config_param *config;
2135
2136         mac_control = &nic->mac_control;
2137         config = &nic->config;
2138
2139         /*  PRC Initialization and configuration */
2140         for (i = 0; i < config->rx_ring_num; i++) {
2141                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2142                        &bar0->prc_rxd0_n[i]);
2143
2144                 val64 = readq(&bar0->prc_ctrl_n[i]);
2145                 if (nic->rxd_mode == RXD_MODE_1)
2146                         val64 |= PRC_CTRL_RC_ENABLED;
2147                 else
2148                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2149                 if (nic->device_type == XFRAME_II_DEVICE)
2150                         val64 |= PRC_CTRL_GROUP_READS;
2151                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2152                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2153                 writeq(val64, &bar0->prc_ctrl_n[i]);
2154         }
2155
2156         if (nic->rxd_mode == RXD_MODE_3B) {
2157                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2158                 val64 = readq(&bar0->rx_pa_cfg);
2159                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2160                 writeq(val64, &bar0->rx_pa_cfg);
2161         }
2162
2163         if (vlan_tag_strip == 0) {
2164                 val64 = readq(&bar0->rx_pa_cfg);
2165                 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2166                 writeq(val64, &bar0->rx_pa_cfg);
2167                 vlan_strip_flag = 0;
2168         }
2169
2170         /*
2171          * Enabling MC-RLDRAM. After enabling the device, we timeout
2172          * for around 100ms, which is approximately the time required
2173          * for the device to be ready for operation.
2174          */
2175         val64 = readq(&bar0->mc_rldram_mrs);
2176         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2177         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2178         val64 = readq(&bar0->mc_rldram_mrs);
2179
2180         msleep(100);    /* Delay by around 100 ms. */
2181
2182         /* Enabling ECC Protection. */
2183         val64 = readq(&bar0->adapter_control);
2184         val64 &= ~ADAPTER_ECC_EN;
2185         writeq(val64, &bar0->adapter_control);
2186
2187         /*
2188          * Verify if the device is ready to be enabled, if so enable
2189          * it.
2190          */
2191         val64 = readq(&bar0->adapter_status);
2192         if (!verify_xena_quiescence(nic)) {
2193                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2194                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2195                           (unsigned long long) val64);
2196                 return FAILURE;
2197         }
2198
2199         /*
2200          * With some switches, link might be already up at this point.
2201          * Because of this weird behavior, when we enable laser,
2202          * we may not get link. We need to handle this. We cannot
2203          * figure out which switch is misbehaving. So we are forced to
2204          * make a global change.
2205          */
2206
2207         /* Enabling Laser. */
2208         val64 = readq(&bar0->adapter_control);
2209         val64 |= ADAPTER_EOI_TX_ON;
2210         writeq(val64, &bar0->adapter_control);
2211
2212         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2213                 /*
2214                  * Dont see link state interrupts initally on some switches,
2215                  * so directly scheduling the link state task here.
2216                  */
2217                 schedule_work(&nic->set_link_task);
2218         }
2219         /* SXE-002: Initialize link and activity LED */
2220         subid = nic->pdev->subsystem_device;
2221         if (((subid & 0xFF) >= 0x07) &&
2222             (nic->device_type == XFRAME_I_DEVICE)) {
2223                 val64 = readq(&bar0->gpio_control);
2224                 val64 |= 0x0000800000000000ULL;
2225                 writeq(val64, &bar0->gpio_control);
2226                 val64 = 0x0411040400000000ULL;
2227                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2228         }
2229
2230         return SUCCESS;
2231 }
2232 /**
2233  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2234  */
2235 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2236                                         TxD *txdlp, int get_off)
2237 {
2238         struct s2io_nic *nic = fifo_data->nic;
2239         struct sk_buff *skb;
2240         struct TxD *txds;
2241         u16 j, frg_cnt;
2242
2243         txds = txdlp;
2244         if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2245                 pci_unmap_single(nic->pdev, (dma_addr_t)
2246                         txds->Buffer_Pointer, sizeof(u64),
2247                         PCI_DMA_TODEVICE);
2248                 txds++;
2249         }
2250
2251         skb = (struct sk_buff *) ((unsigned long)
2252                         txds->Host_Control);
2253         if (!skb) {
2254                 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2255                 return NULL;
2256         }
2257         pci_unmap_single(nic->pdev, (dma_addr_t)
2258                          txds->Buffer_Pointer,
2259                          skb->len - skb->data_len,
2260                          PCI_DMA_TODEVICE);
2261         frg_cnt = skb_shinfo(skb)->nr_frags;
2262         if (frg_cnt) {
2263                 txds++;
2264                 for (j = 0; j < frg_cnt; j++, txds++) {
2265                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2266                         if (!txds->Buffer_Pointer)
2267                                 break;
2268                         pci_unmap_page(nic->pdev, (dma_addr_t)
2269                                         txds->Buffer_Pointer,
2270                                        frag->size, PCI_DMA_TODEVICE);
2271                 }
2272         }
2273         memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2274         return(skb);
2275 }
2276
2277 /**
2278  *  free_tx_buffers - Free all queued Tx buffers
2279  *  @nic : device private variable.
2280  *  Description:
2281  *  Free all queued Tx buffers.
2282  *  Return Value: void
2283 */
2284
2285 static void free_tx_buffers(struct s2io_nic *nic)
2286 {
2287         struct net_device *dev = nic->dev;
2288         struct sk_buff *skb;
2289         struct TxD *txdp;
2290         int i, j;
2291         struct mac_info *mac_control;
2292         struct config_param *config;
2293         int cnt = 0;
2294
2295         mac_control = &nic->mac_control;
2296         config = &nic->config;
2297
2298         for (i = 0; i < config->tx_fifo_num; i++) {
2299                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2300                         txdp = (struct TxD *) \
2301                         mac_control->fifos[i].list_info[j].list_virt_addr;
2302                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2303                         if (skb) {
2304                                 nic->mac_control.stats_info->sw_stat.mem_freed
2305                                         += skb->truesize;
2306                                 dev_kfree_skb(skb);
2307                                 cnt++;
2308                         }
2309                 }
2310                 DBG_PRINT(INTR_DBG,
2311                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2312                           dev->name, cnt, i);
2313                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2314                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2315         }
2316 }
2317
2318 /**
2319  *   stop_nic -  To stop the nic
2320  *   @nic ; device private variable.
2321  *   Description:
2322  *   This function does exactly the opposite of what the start_nic()
2323  *   function does. This function is called to stop the device.
2324  *   Return Value:
2325  *   void.
2326  */
2327
2328 static void stop_nic(struct s2io_nic *nic)
2329 {
2330         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2331         register u64 val64 = 0;
2332         u16 interruptible;
2333         struct mac_info *mac_control;
2334         struct config_param *config;
2335
2336         mac_control = &nic->mac_control;
2337         config = &nic->config;
2338
2339         /*  Disable all interrupts */
2340         en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2341         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2342         interruptible |= TX_PIC_INTR;
2343         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2344
2345         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2346         val64 = readq(&bar0->adapter_control);
2347         val64 &= ~(ADAPTER_CNTL_EN);
2348         writeq(val64, &bar0->adapter_control);
2349 }
2350
2351 /**
2352  *  fill_rx_buffers - Allocates the Rx side skbs
2353  *  @nic:  device private variable
2354  *  @ring_no: ring number
2355  *  Description:
2356  *  The function allocates Rx side skbs and puts the physical
2357  *  address of these buffers into the RxD buffer pointers, so that the NIC
2358  *  can DMA the received frame into these locations.
2359  *  The NIC supports 3 receive modes, viz
2360  *  1. single buffer,
2361  *  2. three buffer and
2362  *  3. Five buffer modes.
2363  *  Each mode defines how many fragments the received frame will be split
2364  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2365  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2366  *  is split into 3 fragments. As of now only single buffer mode is
2367  *  supported.
2368  *   Return Value:
2369  *  SUCCESS on success or an appropriate -ve value on failure.
2370  */
2371
2372 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2373 {
2374         struct net_device *dev = nic->dev;
2375         struct sk_buff *skb;
2376         struct RxD_t *rxdp;
2377         int off, off1, size, block_no, block_no1;
2378         u32 alloc_tab = 0;
2379         u32 alloc_cnt;
2380         struct mac_info *mac_control;
2381         struct config_param *config;
2382         u64 tmp;
2383         struct buffAdd *ba;
2384         unsigned long flags;
2385         struct RxD_t *first_rxdp = NULL;
2386         u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2387         struct RxD1 *rxdp1;
2388         struct RxD3 *rxdp3;
2389         struct swStat *stats = &nic->mac_control.stats_info->sw_stat;
2390
2391         mac_control = &nic->mac_control;
2392         config = &nic->config;
2393         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2394             atomic_read(&nic->rx_bufs_left[ring_no]);
2395
2396         block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2397         off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2398         while (alloc_tab < alloc_cnt) {
2399                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2400                     block_index;
2401                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2402
2403                 rxdp = mac_control->rings[ring_no].
2404                                 rx_blocks[block_no].rxds[off].virt_addr;
2405
2406                 if ((block_no == block_no1) && (off == off1) &&
2407                                         (rxdp->Host_Control)) {
2408                         DBG_PRINT(INTR_DBG, "%s: Get and Put",
2409                                   dev->name);
2410                         DBG_PRINT(INTR_DBG, " info equated\n");
2411                         goto end;
2412                 }
2413                 if (off && (off == rxd_count[nic->rxd_mode])) {
2414                         mac_control->rings[ring_no].rx_curr_put_info.
2415                             block_index++;
2416                         if (mac_control->rings[ring_no].rx_curr_put_info.
2417                             block_index == mac_control->rings[ring_no].
2418                                         block_count)
2419                                 mac_control->rings[ring_no].rx_curr_put_info.
2420                                         block_index = 0;
2421                         block_no = mac_control->rings[ring_no].
2422                                         rx_curr_put_info.block_index;
2423                         if (off == rxd_count[nic->rxd_mode])
2424                                 off = 0;
2425                         mac_control->rings[ring_no].rx_curr_put_info.
2426                                 offset = off;
2427                         rxdp = mac_control->rings[ring_no].
2428                                 rx_blocks[block_no].block_virt_addr;
2429                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2430                                   dev->name, rxdp);
2431                 }
2432                 if(!napi) {
2433                         spin_lock_irqsave(&nic->put_lock, flags);
2434                         mac_control->rings[ring_no].put_pos =
2435                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2436                         spin_unlock_irqrestore(&nic->put_lock, flags);
2437                 } else {
2438                         mac_control->rings[ring_no].put_pos =
2439                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2440                 }
2441                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2442                         ((nic->rxd_mode == RXD_MODE_3B) &&
2443                                 (rxdp->Control_2 & s2BIT(0)))) {
2444                         mac_control->rings[ring_no].rx_curr_put_info.
2445                                         offset = off;
2446                         goto end;
2447                 }
2448                 /* calculate size of skb based on ring mode */
2449                 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2450                                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2451                 if (nic->rxd_mode == RXD_MODE_1)
2452                         size += NET_IP_ALIGN;
2453                 else
2454                         size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2455
2456                 /* allocate skb */
2457                 skb = dev_alloc_skb(size);
2458                 if(!skb) {
2459                         DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2460                         DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2461                         if (first_rxdp) {
2462                                 wmb();
2463                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2464                         }
2465                         nic->mac_control.stats_info->sw_stat. \
2466                                 mem_alloc_fail_cnt++;
2467                         return -ENOMEM ;
2468                 }
2469                 nic->mac_control.stats_info->sw_stat.mem_allocated
2470                         += skb->truesize;
2471                 if (nic->rxd_mode == RXD_MODE_1) {
2472                         /* 1 buffer mode - normal operation mode */
2473                         rxdp1 = (struct RxD1*)rxdp;
2474                         memset(rxdp, 0, sizeof(struct RxD1));
2475                         skb_reserve(skb, NET_IP_ALIGN);
2476                         rxdp1->Buffer0_ptr = pci_map_single
2477                             (nic->pdev, skb->data, size - NET_IP_ALIGN,
2478                                 PCI_DMA_FROMDEVICE);
2479                         if( (rxdp1->Buffer0_ptr == 0) ||
2480                                 (rxdp1->Buffer0_ptr ==
2481                                 DMA_ERROR_CODE))
2482                                 goto pci_map_failed;
2483
2484                         rxdp->Control_2 =
2485                                 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2486
2487                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2488                         /*
2489                          * 2 buffer mode -
2490                          * 2 buffer mode provides 128
2491                          * byte aligned receive buffers.
2492                          */
2493
2494                         rxdp3 = (struct RxD3*)rxdp;
2495                         /* save buffer pointers to avoid frequent dma mapping */
2496                         Buffer0_ptr = rxdp3->Buffer0_ptr;
2497                         Buffer1_ptr = rxdp3->Buffer1_ptr;
2498                         memset(rxdp, 0, sizeof(struct RxD3));
2499                         /* restore the buffer pointers for dma sync*/
2500                         rxdp3->Buffer0_ptr = Buffer0_ptr;
2501                         rxdp3->Buffer1_ptr = Buffer1_ptr;
2502
2503                         ba = &mac_control->rings[ring_no].ba[block_no][off];
2504                         skb_reserve(skb, BUF0_LEN);
2505                         tmp = (u64)(unsigned long) skb->data;
2506                         tmp += ALIGN_SIZE;
2507                         tmp &= ~ALIGN_SIZE;
2508                         skb->data = (void *) (unsigned long)tmp;
2509                         skb_reset_tail_pointer(skb);
2510
2511                         if (!(rxdp3->Buffer0_ptr))
2512                                 rxdp3->Buffer0_ptr =
2513                                    pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2514                                            PCI_DMA_FROMDEVICE);
2515                         else
2516                                 pci_dma_sync_single_for_device(nic->pdev,
2517                                 (dma_addr_t) rxdp3->Buffer0_ptr,
2518                                     BUF0_LEN, PCI_DMA_FROMDEVICE);
2519                         if( (rxdp3->Buffer0_ptr == 0) ||
2520                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2521                                 goto pci_map_failed;
2522
2523                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2524                         if (nic->rxd_mode == RXD_MODE_3B) {
2525                                 /* Two buffer mode */
2526
2527                                 /*
2528                                  * Buffer2 will have L3/L4 header plus
2529                                  * L4 payload
2530                                  */
2531                                 rxdp3->Buffer2_ptr = pci_map_single
2532                                 (nic->pdev, skb->data, dev->mtu + 4,
2533                                                 PCI_DMA_FROMDEVICE);
2534
2535                                 if( (rxdp3->Buffer2_ptr == 0) ||
2536                                         (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2537                                         goto pci_map_failed;
2538
2539                                 rxdp3->Buffer1_ptr =
2540                                                 pci_map_single(nic->pdev,
2541                                                 ba->ba_1, BUF1_LEN,
2542                                                 PCI_DMA_FROMDEVICE);
2543                                 if( (rxdp3->Buffer1_ptr == 0) ||
2544                                         (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2545                                         pci_unmap_single
2546                                                 (nic->pdev,
2547                                                 (dma_addr_t)rxdp3->Buffer2_ptr,
2548                                                 dev->mtu + 4,
2549                                                 PCI_DMA_FROMDEVICE);
2550                                         goto pci_map_failed;
2551                                 }
2552                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2553                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2554                                                                 (dev->mtu + 4);
2555                         }
2556                         rxdp->Control_2 |= s2BIT(0);
2557                 }
2558                 rxdp->Host_Control = (unsigned long) (skb);
2559                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2560                         rxdp->Control_1 |= RXD_OWN_XENA;
2561                 off++;
2562                 if (off == (rxd_count[nic->rxd_mode] + 1))
2563                         off = 0;
2564                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2565
2566                 rxdp->Control_2 |= SET_RXD_MARKER;
2567                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2568                         if (first_rxdp) {
2569                                 wmb();
2570                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2571                         }
2572                         first_rxdp = rxdp;
2573                 }
2574                 atomic_inc(&nic->rx_bufs_left[ring_no]);
2575                 alloc_tab++;
2576         }
2577
2578       end:
2579         /* Transfer ownership of first descriptor to adapter just before
2580          * exiting. Before that, use memory barrier so that ownership
2581          * and other fields are seen by adapter correctly.
2582          */
2583         if (first_rxdp) {
2584                 wmb();
2585                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2586         }
2587
2588         return SUCCESS;
2589 pci_map_failed:
2590         stats->pci_map_fail_cnt++;
2591         stats->mem_freed += skb->truesize;
2592         dev_kfree_skb_irq(skb);
2593         return -ENOMEM;
2594 }
2595
2596 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2597 {
2598         struct net_device *dev = sp->dev;
2599         int j;
2600         struct sk_buff *skb;
2601         struct RxD_t *rxdp;
2602         struct mac_info *mac_control;
2603         struct buffAdd *ba;
2604         struct RxD1 *rxdp1;
2605         struct RxD3 *rxdp3;
2606
2607         mac_control = &sp->mac_control;
2608         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2609                 rxdp = mac_control->rings[ring_no].
2610                                 rx_blocks[blk].rxds[j].virt_addr;
2611                 skb = (struct sk_buff *)
2612                         ((unsigned long) rxdp->Host_Control);
2613                 if (!skb) {
2614                         continue;
2615                 }
2616                 if (sp->rxd_mode == RXD_MODE_1) {
2617                         rxdp1 = (struct RxD1*)rxdp;
2618                         pci_unmap_single(sp->pdev, (dma_addr_t)
2619                                 rxdp1->Buffer0_ptr,
2620                                 dev->mtu +
2621                                 HEADER_ETHERNET_II_802_3_SIZE
2622                                 + HEADER_802_2_SIZE +
2623                                 HEADER_SNAP_SIZE,
2624                                 PCI_DMA_FROMDEVICE);
2625                         memset(rxdp, 0, sizeof(struct RxD1));
2626                 } else if(sp->rxd_mode == RXD_MODE_3B) {
2627                         rxdp3 = (struct RxD3*)rxdp;
2628                         ba = &mac_control->rings[ring_no].
2629                                 ba[blk][j];
2630                         pci_unmap_single(sp->pdev, (dma_addr_t)
2631                                 rxdp3->Buffer0_ptr,
2632                                 BUF0_LEN,
2633                                 PCI_DMA_FROMDEVICE);
2634                         pci_unmap_single(sp->pdev, (dma_addr_t)
2635                                 rxdp3->Buffer1_ptr,
2636                                 BUF1_LEN,
2637                                 PCI_DMA_FROMDEVICE);
2638                         pci_unmap_single(sp->pdev, (dma_addr_t)
2639                                 rxdp3->Buffer2_ptr,
2640                                 dev->mtu + 4,
2641                                 PCI_DMA_FROMDEVICE);
2642                         memset(rxdp, 0, sizeof(struct RxD3));
2643                 }
2644                 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2645                 dev_kfree_skb(skb);
2646                 atomic_dec(&sp->rx_bufs_left[ring_no]);
2647         }
2648 }
2649
2650 /**
2651  *  free_rx_buffers - Frees all Rx buffers
2652  *  @sp: device private variable.
2653  *  Description:
2654  *  This function will free all Rx buffers allocated by host.
2655  *  Return Value:
2656  *  NONE.
2657  */
2658
2659 static void free_rx_buffers(struct s2io_nic *sp)
2660 {
2661         struct net_device *dev = sp->dev;
2662         int i, blk = 0, buf_cnt = 0;
2663         struct mac_info *mac_control;
2664         struct config_param *config;
2665
2666         mac_control = &sp->mac_control;
2667         config = &sp->config;
2668
2669         for (i = 0; i < config->rx_ring_num; i++) {
2670                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2671                         free_rxd_blk(sp,i,blk);
2672
2673                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2674                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2675                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2676                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2677                 atomic_set(&sp->rx_bufs_left[i], 0);
2678                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2679                           dev->name, buf_cnt, i);
2680         }
2681 }
2682
2683 /**
2684  * s2io_poll - Rx interrupt handler for NAPI support
2685  * @napi : pointer to the napi structure.
2686  * @budget : The number of packets that were budgeted to be processed
2687  * during  one pass through the 'Poll" function.
2688  * Description:
2689  * Comes into picture only if NAPI support has been incorporated. It does
2690  * the same thing that rx_intr_handler does, but not in a interrupt context
2691  * also It will process only a given number of packets.
2692  * Return value:
2693  * 0 on success and 1 if there are No Rx packets to be processed.
2694  */
2695
2696 static int s2io_poll(struct napi_struct *napi, int budget)
2697 {
2698         struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2699         struct net_device *dev = nic->dev;
2700         int pkt_cnt = 0, org_pkts_to_process;
2701         struct mac_info *mac_control;
2702         struct config_param *config;
2703         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2704         int i;
2705
2706         mac_control = &nic->mac_control;
2707         config = &nic->config;
2708
2709         nic->pkts_to_process = budget;
2710         org_pkts_to_process = nic->pkts_to_process;
2711
2712         writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2713         readl(&bar0->rx_traffic_int);
2714
2715         for (i = 0; i < config->rx_ring_num; i++) {
2716                 rx_intr_handler(&mac_control->rings[i]);
2717                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2718                 if (!nic->pkts_to_process) {
2719                         /* Quota for the current iteration has been met */
2720                         goto no_rx;
2721                 }
2722         }
2723
2724         netif_rx_complete(dev, napi);
2725
2726         for (i = 0; i < config->rx_ring_num; i++) {
2727                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2728                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2729                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2730                         break;
2731                 }
2732         }
2733         /* Re enable the Rx interrupts. */
2734         writeq(0x0, &bar0->rx_traffic_mask);
2735         readl(&bar0->rx_traffic_mask);
2736         return pkt_cnt;
2737
2738 no_rx:
2739         for (i = 0; i < config->rx_ring_num; i++) {
2740                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2741                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2742                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2743                         break;
2744                 }
2745         }
2746         return pkt_cnt;
2747 }
2748
2749 #ifdef CONFIG_NET_POLL_CONTROLLER
2750 /**
2751  * s2io_netpoll - netpoll event handler entry point
2752  * @dev : pointer to the device structure.
2753  * Description:
2754  *      This function will be called by upper layer to check for events on the
2755  * interface in situations where interrupts are disabled. It is used for
2756  * specific in-kernel networking tasks, such as remote consoles and kernel
2757  * debugging over the network (example netdump in RedHat).
2758  */
2759 static void s2io_netpoll(struct net_device *dev)
2760 {
2761         struct s2io_nic *nic = dev->priv;
2762         struct mac_info *mac_control;
2763         struct config_param *config;
2764         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2765         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2766         int i;
2767
2768         if (pci_channel_offline(nic->pdev))
2769                 return;
2770
2771         disable_irq(dev->irq);
2772
2773         mac_control = &nic->mac_control;
2774         config = &nic->config;
2775
2776         writeq(val64, &bar0->rx_traffic_int);
2777         writeq(val64, &bar0->tx_traffic_int);
2778
2779         /* we need to free up the transmitted skbufs or else netpoll will
2780          * run out of skbs and will fail and eventually netpoll application such
2781          * as netdump will fail.
2782          */
2783         for (i = 0; i < config->tx_fifo_num; i++)
2784                 tx_intr_handler(&mac_control->fifos[i]);
2785
2786         /* check for received packet and indicate up to network */
2787         for (i = 0; i < config->rx_ring_num; i++)
2788                 rx_intr_handler(&mac_control->rings[i]);
2789
2790         for (i = 0; i < config->rx_ring_num; i++) {
2791                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2792                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2793                         DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2794                         break;
2795                 }
2796         }
2797         enable_irq(dev->irq);
2798         return;
2799 }
2800 #endif
2801
2802 /**
2803  *  rx_intr_handler - Rx interrupt handler
2804  *  @nic: device private variable.
2805  *  Description:
2806  *  If the interrupt is because of a received frame or if the
2807  *  receive ring contains fresh as yet un-processed frames,this function is
2808  *  called. It picks out the RxD at which place the last Rx processing had
2809  *  stopped and sends the skb to the OSM's Rx handler and then increments
2810  *  the offset.
2811  *  Return Value:
2812  *  NONE.
2813  */
2814 static void rx_intr_handler(struct ring_info *ring_data)
2815 {
2816         struct s2io_nic *nic = ring_data->nic;
2817         struct net_device *dev = (struct net_device *) nic->dev;
2818         int get_block, put_block, put_offset;
2819         struct rx_curr_get_info get_info, put_info;
2820         struct RxD_t *rxdp;
2821         struct sk_buff *skb;
2822         int pkt_cnt = 0;
2823         int i;
2824         struct RxD1* rxdp1;
2825         struct RxD3* rxdp3;
2826
2827         spin_lock(&nic->rx_lock);
2828
2829         get_info = ring_data->rx_curr_get_info;
2830         get_block = get_info.block_index;
2831         memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2832         put_block = put_info.block_index;
2833         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2834         if (!napi) {
2835                 spin_lock(&nic->put_lock);
2836                 put_offset = ring_data->put_pos;
2837                 spin_unlock(&nic->put_lock);
2838         } else
2839                 put_offset = ring_data->put_pos;
2840
2841         while (RXD_IS_UP2DT(rxdp)) {
2842                 /*
2843                  * If your are next to put index then it's
2844                  * FIFO full condition
2845                  */
2846                 if ((get_block == put_block) &&
2847                     (get_info.offset + 1) == put_info.offset) {
2848                         DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2849                         break;
2850                 }
2851                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2852                 if (skb == NULL) {
2853                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2854                                   dev->name);
2855                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2856                         spin_unlock(&nic->rx_lock);
2857                         return;
2858                 }
2859                 if (nic->rxd_mode == RXD_MODE_1) {
2860                         rxdp1 = (struct RxD1*)rxdp;
2861                         pci_unmap_single(nic->pdev, (dma_addr_t)
2862                                 rxdp1->Buffer0_ptr,
2863                                 dev->mtu +
2864                                 HEADER_ETHERNET_II_802_3_SIZE +
2865                                 HEADER_802_2_SIZE +
2866                                 HEADER_SNAP_SIZE,
2867                                 PCI_DMA_FROMDEVICE);
2868                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2869                         rxdp3 = (struct RxD3*)rxdp;
2870                         pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2871                                 rxdp3->Buffer0_ptr,
2872                                 BUF0_LEN, PCI_DMA_FROMDEVICE);
2873                         pci_unmap_single(nic->pdev, (dma_addr_t)
2874                                 rxdp3->Buffer2_ptr,
2875                                 dev->mtu + 4,
2876                                 PCI_DMA_FROMDEVICE);
2877                 }
2878                 prefetch(skb->data);
2879                 rx_osm_handler(ring_data, rxdp);
2880                 get_info.offset++;
2881                 ring_data->rx_curr_get_info.offset = get_info.offset;
2882                 rxdp = ring_data->rx_blocks[get_block].
2883                                 rxds[get_info.offset].virt_addr;
2884                 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2885                         get_info.offset = 0;
2886                         ring_data->rx_curr_get_info.offset = get_info.offset;
2887                         get_block++;
2888                         if (get_block == ring_data->block_count)
2889                                 get_block = 0;
2890                         ring_data->rx_curr_get_info.block_index = get_block;
2891                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2892                 }
2893
2894                 nic->pkts_to_process -= 1;
2895                 if ((napi) && (!nic->pkts_to_process))
2896                         break;
2897                 pkt_cnt++;
2898                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2899                         break;
2900         }
2901         if (nic->lro) {
2902                 /* Clear all LRO sessions before exiting */
2903                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2904                         struct lro *lro = &nic->lro0_n[i];
2905                         if (lro->in_use) {
2906                                 update_L3L4_header(nic, lro);
2907                                 queue_rx_frame(lro->parent);
2908                                 clear_lro_session(lro);
2909                         }
2910                 }
2911         }
2912
2913         spin_unlock(&nic->rx_lock);
2914 }
2915
2916 /**
2917  *  tx_intr_handler - Transmit interrupt handler
2918  *  @nic : device private variable
2919  *  Description:
2920  *  If an interrupt was raised to indicate DMA complete of the
2921  *  Tx packet, this function is called. It identifies the last TxD
2922  *  whose buffer was freed and frees all skbs whose data have already
2923  *  DMA'ed into the NICs internal memory.
2924  *  Return Value:
2925  *  NONE
2926  */
2927
2928 static void tx_intr_handler(struct fifo_info *fifo_data)
2929 {
2930         struct s2io_nic *nic = fifo_data->nic;
2931         struct net_device *dev = (struct net_device *) nic->dev;
2932         struct tx_curr_get_info get_info, put_info;
2933         struct sk_buff *skb;
2934         struct TxD *txdlp;
2935         u8 err_mask;
2936
2937         get_info = fifo_data->tx_curr_get_info;
2938         memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2939         txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2940             list_virt_addr;
2941         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2942                (get_info.offset != put_info.offset) &&
2943                (txdlp->Host_Control)) {
2944                 /* Check for TxD errors */
2945                 if (txdlp->Control_1 & TXD_T_CODE) {
2946                         unsigned long long err;
2947                         err = txdlp->Control_1 & TXD_T_CODE;
2948                         if (err & 0x1) {
2949                                 nic->mac_control.stats_info->sw_stat.
2950                                                 parity_err_cnt++;
2951                         }
2952
2953                         /* update t_code statistics */
2954                         err_mask = err >> 48;
2955                         switch(err_mask) {
2956                                 case 2:
2957                                         nic->mac_control.stats_info->sw_stat.
2958                                                         tx_buf_abort_cnt++;
2959                                 break;
2960
2961                                 case 3:
2962                                         nic->mac_control.stats_info->sw_stat.
2963                                                         tx_desc_abort_cnt++;
2964                                 break;
2965
2966                                 case 7:
2967                                         nic->mac_control.stats_info->sw_stat.
2968                                                         tx_parity_err_cnt++;
2969                                 break;
2970
2971                                 case 10:
2972                                         nic->mac_control.stats_info->sw_stat.
2973                                                         tx_link_loss_cnt++;
2974                                 break;
2975
2976                                 case 15:
2977                                         nic->mac_control.stats_info->sw_stat.
2978                                                         tx_list_proc_err_cnt++;
2979                                 break;
2980                         }
2981                 }
2982
2983                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2984                 if (skb == NULL) {
2985                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
2986                         __FUNCTION__);
2987                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2988                         return;
2989                 }
2990
2991                 /* Updating the statistics block */
2992                 nic->stats.tx_bytes += skb->len;
2993                 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2994                 dev_kfree_skb_irq(skb);
2995
2996                 get_info.offset++;
2997                 if (get_info.offset == get_info.fifo_len + 1)
2998                         get_info.offset = 0;
2999                 txdlp = (struct TxD *) fifo_data->list_info
3000                     [get_info.offset].list_virt_addr;
3001                 fifo_data->tx_curr_get_info.offset =
3002                     get_info.offset;
3003         }
3004
3005         spin_lock(&nic->tx_lock);
3006         if (netif_queue_stopped(dev))
3007                 netif_wake_queue(dev);
3008         spin_unlock(&nic->tx_lock);
3009 }
3010
3011 /**
3012  *  s2io_mdio_write - Function to write in to MDIO registers
3013  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3014  *  @addr     : address value
3015  *  @value    : data value
3016  *  @dev      : pointer to net_device structure
3017  *  Description:
3018  *  This function is used to write values to the MDIO registers
3019  *  NONE
3020  */
3021 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3022 {
3023         u64 val64 = 0x0;
3024         struct s2io_nic *sp = dev->priv;
3025         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3026
3027         //address transaction
3028         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3029                         | MDIO_MMD_DEV_ADDR(mmd_type)
3030                         | MDIO_MMS_PRT_ADDR(0x0);
3031         writeq(val64, &bar0->mdio_control);
3032         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3033         writeq(val64, &bar0->mdio_control);
3034         udelay(100);
3035
3036         //Data transaction
3037         val64 = 0x0;
3038         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3039                         | MDIO_MMD_DEV_ADDR(mmd_type)
3040                         | MDIO_MMS_PRT_ADDR(0x0)
3041                         | MDIO_MDIO_DATA(value)
3042                         | MDIO_OP(MDIO_OP_WRITE_TRANS);
3043         writeq(val64, &bar0->mdio_control);
3044         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3045         writeq(val64, &bar0->mdio_control);
3046         udelay(100);
3047
3048         val64 = 0x0;
3049         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3050         | MDIO_MMD_DEV_ADDR(mmd_type)
3051         | MDIO_MMS_PRT_ADDR(0x0)
3052         | MDIO_OP(MDIO_OP_READ_TRANS);
3053         writeq(val64, &bar0->mdio_control);
3054         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3055         writeq(val64, &bar0->mdio_control);
3056         udelay(100);
3057
3058 }
3059
3060 /**
3061  *  s2io_mdio_read - Function to write in to MDIO registers
3062  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3063  *  @addr     : address value
3064  *  @dev      : pointer to net_device structure
3065  *  Description:
3066  *  This function is used to read values to the MDIO registers
3067  *  NONE
3068  */
3069 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3070 {
3071         u64 val64 = 0x0;
3072         u64 rval64 = 0x0;
3073         struct s2io_nic *sp = dev->priv;
3074         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3075
3076         /* address transaction */
3077         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3078                         | MDIO_MMD_DEV_ADDR(mmd_type)
3079                         | MDIO_MMS_PRT_ADDR(0x0);
3080         writeq(val64, &bar0->mdio_control);
3081         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3082         writeq(val64, &bar0->mdio_control);
3083         udelay(100);
3084
3085         /* Data transaction */
3086         val64 = 0x0;
3087         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3088                         | MDIO_MMD_DEV_ADDR(mmd_type)
3089                         | MDIO_MMS_PRT_ADDR(0x0)
3090                         | MDIO_OP(MDIO_OP_READ_TRANS);
3091         writeq(val64, &bar0->mdio_control);
3092         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3093         writeq(val64, &bar0->mdio_control);
3094         udelay(100);
3095
3096         /* Read the value from regs */
3097         rval64 = readq(&bar0->mdio_control);
3098         rval64 = rval64 & 0xFFFF0000;
3099         rval64 = rval64 >> 16;
3100         return rval64;
3101 }
3102 /**
3103  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3104  *  @counter      : couter value to be updated
3105  *  @flag         : flag to indicate the status
3106  *  @type         : counter type
3107  *  Description:
3108  *  This function is to check the status of the xpak counters value
3109  *  NONE
3110  */
3111
3112 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3113 {
3114         u64 mask = 0x3;
3115         u64 val64;
3116         int i;
3117         for(i = 0; i <index; i++)
3118                 mask = mask << 0x2;
3119
3120         if(flag > 0)
3121         {
3122                 *counter = *counter + 1;
3123                 val64 = *regs_stat & mask;
3124                 val64 = val64 >> (index * 0x2);
3125                 val64 = val64 + 1;
3126                 if(val64 == 3)
3127                 {
3128                         switch(type)
3129                         {
3130                         case 1:
3131                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3132                                           "service. Excessive temperatures may "
3133                                           "result in premature transceiver "
3134                                           "failure \n");
3135                         break;
3136                         case 2:
3137                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3138                                           "service Excessive bias currents may "
3139                                           "indicate imminent laser diode "
3140                                           "failure \n");
3141                         break;
3142                         case 3:
3143                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3144                                           "service Excessive laser output "
3145                                           "power may saturate far-end "
3146                                           "receiver\n");
3147                         break;
3148                         default:
3149                                 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3150                                           "type \n");
3151                         }
3152                         val64 = 0x0;
3153                 }
3154                 val64 = val64 << (index * 0x2);
3155                 *regs_stat = (*regs_stat & (~mask)) | (val64);
3156
3157         } else {
3158                 *regs_stat = *regs_stat & (~mask);
3159         }
3160 }
3161
3162 /**
3163  *  s2io_updt_xpak_counter - Function to update the xpak counters
3164  *  @dev         : pointer to net_device struct
3165  *  Description:
3166  *  This function is to upate the status of the xpak counters value
3167  *  NONE
3168  */
3169 static void s2io_updt_xpak_counter(struct net_device *dev)
3170 {
3171         u16 flag  = 0x0;
3172         u16 type  = 0x0;
3173         u16 val16 = 0x0;
3174         u64 val64 = 0x0;
3175         u64 addr  = 0x0;
3176
3177         struct s2io_nic *sp = dev->priv;
3178         struct stat_block *stat_info = sp->mac_control.stats_info;
3179
3180         /* Check the communication with the MDIO slave */
3181         addr = 0x0000;
3182         val64 = 0x0;
3183         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3184         if((val64 == 0xFFFF) || (val64 == 0x0000))
3185         {
3186                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3187                           "Returned %llx\n", (unsigned long long)val64);
3188                 return;
3189         }
3190
3191         /* Check for the expecte value of 2040 at PMA address 0x0000 */
3192         if(val64 != 0x2040)
3193         {
3194                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3195                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3196                           (unsigned long long)val64);
3197                 return;
3198         }
3199
3200         /* Loading the DOM register to MDIO register */
3201         addr = 0xA100;
3202         s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3203         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3204
3205         /* Reading the Alarm flags */
3206         addr = 0xA070;
3207         val64 = 0x0;
3208         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3209
3210         flag = CHECKBIT(val64, 0x7);
3211         type = 1;
3212         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3213                                 &stat_info->xpak_stat.xpak_regs_stat,
3214                                 0x0, flag, type);
3215
3216         if(CHECKBIT(val64, 0x6))
3217                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3218
3219         flag = CHECKBIT(val64, 0x3);
3220         type = 2;
3221         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3222                                 &stat_info->xpak_stat.xpak_regs_stat,
3223                                 0x2, flag, type);
3224
3225         if(CHECKBIT(val64, 0x2))
3226                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3227
3228         flag = CHECKBIT(val64, 0x1);
3229         type = 3;
3230         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3231                                 &stat_info->xpak_stat.xpak_regs_stat,
3232                                 0x4, flag, type);
3233
3234         if(CHECKBIT(val64, 0x0))
3235                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3236
3237         /* Reading the Warning flags */
3238         addr = 0xA074;
3239         val64 = 0x0;
3240         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3241
3242         if(CHECKBIT(val64, 0x7))
3243                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3244
3245         if(CHECKBIT(val64, 0x6))
3246                 stat_info->xpak_stat.warn_transceiver_temp_low++;
3247
3248         if(CHECKBIT(val64, 0x3))
3249                 stat_info->xpak_stat.warn_laser_bias_current_high++;
3250
3251         if(CHECKBIT(val64, 0x2))
3252                 stat_info->xpak_stat.warn_laser_bias_current_low++;
3253
3254         if(CHECKBIT(val64, 0x1))
3255                 stat_info->xpak_stat.warn_laser_output_power_high++;
3256
3257         if(CHECKBIT(val64, 0x0))
3258                 stat_info->xpak_stat.warn_laser_output_power_low++;
3259 }
3260
3261 /**
3262  *  wait_for_cmd_complete - waits for a command to complete.
3263  *  @sp : private member of the device structure, which is a pointer to the
3264  *  s2io_nic structure.
3265  *  Description: Function that waits for a command to Write into RMAC
3266  *  ADDR DATA registers to be completed and returns either success or
3267  *  error depending on whether the command was complete or not.
3268  *  Return value:
3269  *   SUCCESS on success and FAILURE on failure.
3270  */
3271
3272 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3273                                 int bit_state)
3274 {
3275         int ret = FAILURE, cnt = 0, delay = 1;
3276         u64 val64;
3277
3278         if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3279                 return FAILURE;
3280
3281         do {
3282                 val64 = readq(addr);
3283                 if (bit_state == S2IO_BIT_RESET) {
3284                         if (!(val64 & busy_bit)) {
3285                                 ret = SUCCESS;
3286                                 break;
3287                         }
3288                 } else {
3289                         if (!(val64 & busy_bit)) {
3290                                 ret = SUCCESS;
3291                                 break;
3292                         }
3293                 }
3294
3295                 if(in_interrupt())
3296                         mdelay(delay);
3297                 else
3298                         msleep(delay);
3299
3300                 if (++cnt >= 10)
3301                         delay = 50;
3302         } while (cnt < 20);
3303         return ret;
3304 }
3305 /*
3306  * check_pci_device_id - Checks if the device id is supported
3307  * @id : device id
3308  * Description: Function to check if the pci device id is supported by driver.
3309  * Return value: Actual device id if supported else PCI_ANY_ID
3310  */
3311 static u16 check_pci_device_id(u16 id)
3312 {
3313         switch (id) {
3314         case PCI_DEVICE_ID_HERC_WIN:
3315         case PCI_DEVICE_ID_HERC_UNI:
3316                 return XFRAME_II_DEVICE;
3317         case PCI_DEVICE_ID_S2IO_UNI:
3318         case PCI_DEVICE_ID_S2IO_WIN:
3319                 return XFRAME_I_DEVICE;
3320         default:
3321                 return PCI_ANY_ID;
3322         }
3323 }
3324
3325 /**
3326  *  s2io_reset - Resets the card.
3327  *  @sp : private member of the device structure.
3328  *  Description: Function to Reset the card. This function then also
3329  *  restores the previously saved PCI configuration space registers as
3330  *  the card reset also resets the configuration space.
3331  *  Return value:
3332  *  void.
3333  */
3334
3335 static void s2io_reset(struct s2io_nic * sp)
3336 {
3337         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3338         u64 val64;
3339         u16 subid, pci_cmd;
3340         int i;
3341         u16 val16;
3342         unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3343         unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3344
3345         DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3346                         __FUNCTION__, sp->dev->name);
3347
3348         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3349         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3350
3351         val64 = SW_RESET_ALL;
3352         writeq(val64, &bar0->sw_reset);
3353         if (strstr(sp->product_name, "CX4")) {
3354                 msleep(750);
3355         }
3356         msleep(250);
3357         for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3358
3359                 /* Restore the PCI state saved during initialization. */
3360                 pci_restore_state(sp->pdev);
3361                 pci_read_config_word(sp->pdev, 0x2, &val16);
3362                 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3363                         break;
3364                 msleep(200);
3365         }
3366
3367         if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3368                 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3369         }
3370
3371         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3372
3373         s2io_init_pci(sp);
3374
3375         /* Set swapper to enable I/O register access */
3376         s2io_set_swapper(sp);
3377
3378         /* Restore the MSIX table entries from local variables */
3379         restore_xmsi_data(sp);
3380
3381         /* Clear certain PCI/PCI-X fields after reset */
3382         if (sp->device_type == XFRAME_II_DEVICE) {
3383                 /* Clear "detected parity error" bit */
3384                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3385
3386                 /* Clearing PCIX Ecc status register */
3387                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3388
3389                 /* Clearing PCI_STATUS error reflected here */
3390                 writeq(s2BIT(62), &bar0->txpic_int_reg);
3391         }
3392
3393         /* Reset device statistics maintained by OS */
3394         memset(&sp->stats, 0, sizeof (struct net_device_stats));
3395
3396         up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3397         down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3398         up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3399         down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3400         reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3401         mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3402         mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3403         watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3404         /* save link up/down time/cnt, reset/memory/watchdog cnt */
3405         memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3406         /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3407         sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3408         sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3409         sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3410         sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3411         sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3412         sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3413         sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3414         sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3415
3416         /* SXE-002: Configure link and activity LED to turn it off */
3417         subid = sp->pdev->subsystem_device;
3418         if (((subid & 0xFF) >= 0x07) &&
3419             (sp->device_type == XFRAME_I_DEVICE)) {
3420                 val64 = readq(&bar0->gpio_control);
3421                 val64 |= 0x0000800000000000ULL;
3422                 writeq(val64, &bar0->gpio_control);
3423                 val64 = 0x0411040400000000ULL;
3424                 writeq(val64, (void __iomem *)bar0 + 0x2700);
3425         }
3426
3427         /*
3428          * Clear spurious ECC interrupts that would have occured on
3429          * XFRAME II cards after reset.
3430          */
3431         if (sp->device_type == XFRAME_II_DEVICE) {
3432                 val64 = readq(&bar0->pcc_err_reg);
3433                 writeq(val64, &bar0->pcc_err_reg);
3434         }
3435
3436         /* restore the previously assigned mac address */
3437         do_s2io_prog_unicast(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3438
3439         sp->device_enabled_once = FALSE;
3440 }
3441
3442 /**
3443  *  s2io_set_swapper - to set the swapper controle on the card
3444  *  @sp : private member of the device structure,
3445  *  pointer to the s2io_nic structure.
3446  *  Description: Function to set the swapper control on the card
3447  *  correctly depending on the 'endianness' of the system.
3448  *  Return value:
3449  *  SUCCESS on success and FAILURE on failure.
3450  */
3451
3452 static int s2io_set_swapper(struct s2io_nic * sp)
3453 {
3454         struct net_device *dev = sp->dev;
3455         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3456         u64 val64, valt, valr;
3457
3458         /*
3459          * Set proper endian settings and verify the same by reading
3460          * the PIF Feed-back register.
3461          */
3462
3463         val64 = readq(&bar0->pif_rd_swapper_fb);
3464         if (val64 != 0x0123456789ABCDEFULL) {
3465                 int i = 0;
3466                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3467                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
3468                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
3469                                 0};                     /* FE=0, SE=0 */
3470
3471                 while(i<4) {
3472                         writeq(value[i], &bar0->swapper_ctrl);
3473                         val64 = readq(&bar0->pif_rd_swapper_fb);
3474                         if (val64 == 0x0123456789ABCDEFULL)
3475                                 break;
3476                         i++;
3477                 }
3478                 if (i == 4) {
3479                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3480                                 dev->name);
3481                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3482                                 (unsigned long long) val64);
3483                         return FAILURE;
3484                 }
3485                 valr = value[i];
3486         } else {
3487                 valr = readq(&bar0->swapper_ctrl);
3488         }
3489
3490         valt = 0x0123456789ABCDEFULL;
3491         writeq(valt, &bar0->xmsi_address);
3492         val64 = readq(&bar0->xmsi_address);
3493
3494         if(val64 != valt) {
3495                 int i = 0;
3496                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3497                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
3498                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
3499                                 0};                     /* FE=0, SE=0 */
3500
3501                 while(i<4) {
3502                         writeq((value[i] | valr), &bar0->swapper_ctrl);
3503                         writeq(valt, &bar0->xmsi_address);
3504                         val64 = readq(&bar0->xmsi_address);
3505                         if(val64 == valt)
3506                                 break;
3507                         i++;
3508                 }
3509                 if(i == 4) {
3510                         unsigned long long x = val64;
3511                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3512                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3513                         return FAILURE;
3514                 }
3515         }
3516         val64 = readq(&bar0->swapper_ctrl);
3517         val64 &= 0xFFFF000000000000ULL;
3518
3519 #ifdef  __BIG_ENDIAN
3520         /*
3521          * The device by default set to a big endian format, so a
3522          * big endian driver need not set anything.
3523          */
3524         val64 |= (SWAPPER_CTRL_TXP_FE |
3525                  SWAPPER_CTRL_TXP_SE |
3526                  SWAPPER_CTRL_TXD_R_FE |
3527                  SWAPPER_CTRL_TXD_W_FE |
3528                  SWAPPER_CTRL_TXF_R_FE |
3529                  SWAPPER_CTRL_RXD_R_FE |
3530                  SWAPPER_CTRL_RXD_W_FE |
3531                  SWAPPER_CTRL_RXF_W_FE |
3532                  SWAPPER_CTRL_XMSI_FE |
3533                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3534         if (sp->config.intr_type == INTA)
3535                 val64 |= SWAPPER_CTRL_XMSI_SE;
3536         writeq(val64, &bar0->swapper_ctrl);
3537 #else
3538         /*
3539          * Initially we enable all bits to make it accessible by the
3540          * driver, then we selectively enable only those bits that
3541          * we want to set.
3542          */
3543         val64 |= (SWAPPER_CTRL_TXP_FE |
3544                  SWAPPER_CTRL_TXP_SE |
3545                  SWAPPER_CTRL_TXD_R_FE |
3546                  SWAPPER_CTRL_TXD_R_SE |
3547                  SWAPPER_CTRL_TXD_W_FE |
3548                  SWAPPER_CTRL_TXD_W_SE |
3549                  SWAPPER_CTRL_TXF_R_FE |
3550                  SWAPPER_CTRL_RXD_R_FE |
3551                  SWAPPER_CTRL_RXD_R_SE |
3552                  SWAPPER_CTRL_RXD_W_FE |
3553                  SWAPPER_CTRL_RXD_W_SE |
3554                  SWAPPER_CTRL_RXF_W_FE |
3555                  SWAPPER_CTRL_XMSI_FE |
3556                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3557         if (sp->config.intr_type == INTA)
3558                 val64 |= SWAPPER_CTRL_XMSI_SE;
3559         writeq(val64, &bar0->swapper_ctrl);
3560 #endif
3561         val64 = readq(&bar0->swapper_ctrl);
3562
3563         /*
3564          * Verifying if endian settings are accurate by reading a
3565          * feedback register.
3566          */
3567         val64 = readq(&bar0->pif_rd_swapper_fb);
3568         if (val64 != 0x0123456789ABCDEFULL) {
3569                 /* Endian settings are incorrect, calls for another dekko. */
3570                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3571                           dev->name);
3572                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3573                           (unsigned long long) val64);
3574                 return FAILURE;
3575         }
3576
3577         return SUCCESS;
3578 }
3579
3580 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3581 {
3582         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3583         u64 val64;
3584         int ret = 0, cnt = 0;
3585
3586         do {
3587                 val64 = readq(&bar0->xmsi_access);
3588                 if (!(val64 & s2BIT(15)))
3589                         break;
3590                 mdelay(1);
3591                 cnt++;
3592         } while(cnt < 5);
3593         if (cnt == 5) {
3594                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3595                 ret = 1;
3596         }
3597
3598         return ret;
3599 }
3600
3601 static void restore_xmsi_data(struct s2io_nic *nic)
3602 {
3603         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3604         u64 val64;
3605         int i;
3606
3607         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3608                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3609                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3610                 val64 = (s2BIT(7) | s2BIT(15) | vBIT(i, 26, 6));
3611                 writeq(val64, &bar0->xmsi_access);
3612                 if (wait_for_msix_trans(nic, i)) {
3613                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3614                         continue;
3615                 }
3616         }
3617 }
3618
3619 static void store_xmsi_data(struct s2io_nic *nic)
3620 {
3621         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3622         u64 val64, addr, data;
3623         int i;
3624
3625         /* Store and display */
3626         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3627                 val64 = (s2BIT(15) | vBIT(i, 26, 6));
3628                 writeq(val64, &bar0->xmsi_access);
3629                 if (wait_for_msix_trans(nic, i)) {
3630                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3631                         continue;
3632                 }
3633                 addr = readq(&bar0->xmsi_address);
3634                 data = readq(&bar0->xmsi_data);
3635                 if (addr && data) {
3636                         nic->msix_info[i].addr = addr;
3637                         nic->msix_info[i].data = data;
3638                 }
3639         }
3640 }
3641
3642 static int s2io_enable_msi_x(struct s2io_nic *nic)
3643 {
3644         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3645         u64 tx_mat, rx_mat;
3646         u16 msi_control; /* Temp variable */
3647         int ret, i, j, msix_indx = 1;
3648
3649         nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry),
3650                                GFP_KERNEL);
3651         if (!nic->entries) {
3652                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3653                         __FUNCTION__);
3654                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3655                 return -ENOMEM;
3656         }
3657         nic->mac_control.stats_info->sw_stat.mem_allocated
3658                 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3659
3660         nic->s2io_entries =
3661                 kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry),
3662                                    GFP_KERNEL);
3663         if (!nic->s2io_entries) {
3664                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3665                         __FUNCTION__);
3666                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3667                 kfree(nic->entries);
3668                 nic->mac_control.stats_info->sw_stat.mem_freed
3669                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3670                 return -ENOMEM;
3671         }
3672          nic->mac_control.stats_info->sw_stat.mem_allocated
3673                 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3674
3675         for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3676                 nic->entries[i].entry = i;
3677                 nic->s2io_entries[i].entry = i;
3678                 nic->s2io_entries[i].arg = NULL;
3679                 nic->s2io_entries[i].in_use = 0;
3680         }
3681
3682         tx_mat = readq(&bar0->tx_mat0_n[0]);
3683         for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3684                 tx_mat |= TX_MAT_SET(i, msix_indx);
3685                 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3686                 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3687                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3688         }
3689         writeq(tx_mat, &bar0->tx_mat0_n[0]);
3690
3691         rx_mat = readq(&bar0->rx_mat);
3692         for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) {
3693                 rx_mat |= RX_MAT_SET(j, msix_indx);
3694                 nic->s2io_entries[msix_indx].arg
3695                         = &nic->mac_control.rings[j];
3696                 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3697                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3698         }
3699         writeq(rx_mat, &bar0->rx_mat);
3700
3701         nic->avail_msix_vectors = 0;
3702         ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3703         /* We fail init if error or we get less vectors than min required */
3704         if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3705                 nic->avail_msix_vectors = ret;
3706                 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3707         }
3708         if (ret) {
3709                 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3710                 kfree(nic->entries);
3711                 nic->mac_control.stats_info->sw_stat.mem_freed
3712                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3713                 kfree(nic->s2io_entries);
3714                 nic->mac_control.stats_info->sw_stat.mem_freed
3715                 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3716                 nic->entries = NULL;
3717                 nic->s2io_entries = NULL;
3718                 nic->avail_msix_vectors = 0;
3719                 return -ENOMEM;
3720         }
3721         if (!nic->avail_msix_vectors)
3722                 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3723
3724         /*
3725          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3726          * in the herc NIC. (Temp change, needs to be removed later)
3727          */
3728         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3729         msi_control |= 0x1; /* Enable MSI */
3730         pci_write_config_word(nic->pdev, 0x42, msi_control);
3731
3732         return 0;
3733 }
3734
3735 /* Handle software interrupt used during MSI(X) test */
3736 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3737 {
3738         struct s2io_nic *sp = dev_id;
3739
3740         sp->msi_detected = 1;
3741         wake_up(&sp->msi_wait);
3742
3743         return IRQ_HANDLED;
3744 }
3745
3746 /* Test interrupt path by forcing a a software IRQ */
3747 static int s2io_test_msi(struct s2io_nic *sp)
3748 {
3749         struct pci_dev *pdev = sp->pdev;
3750         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3751         int err;
3752         u64 val64, saved64;
3753
3754         err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3755                         sp->name, sp);
3756         if (err) {
3757                 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3758                        sp->dev->name, pci_name(pdev), pdev->irq);
3759                 return err;
3760         }
3761
3762         init_waitqueue_head (&sp->msi_wait);
3763         sp->msi_detected = 0;
3764
3765         saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3766         val64 |= SCHED_INT_CTRL_ONE_SHOT;
3767         val64 |= SCHED_INT_CTRL_TIMER_EN;
3768         val64 |= SCHED_INT_CTRL_INT2MSI(1);
3769         writeq(val64, &bar0->scheduled_int_ctrl);
3770
3771         wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3772
3773         if (!sp->msi_detected) {
3774                 /* MSI(X) test failed, go back to INTx mode */
3775                 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated"
3776                         "using MSI(X) during test\n", sp->dev->name,
3777                         pci_name(pdev));
3778
3779                 err = -EOPNOTSUPP;
3780         }
3781
3782         free_irq(sp->entries[1].vector, sp);
3783
3784         writeq(saved64, &bar0->scheduled_int_ctrl);
3785
3786         return err;
3787 }
3788
3789 static void remove_msix_isr(struct s2io_nic *sp)
3790 {
3791         int i;
3792         u16 msi_control;
3793
3794         for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3795                 if (sp->s2io_entries[i].in_use ==
3796                         MSIX_REGISTERED_SUCCESS) {
3797                         int vector = sp->entries[i].vector;
3798                         void *arg = sp->s2io_entries[i].arg;
3799                         free_irq(vector, arg);
3800                 }
3801         }
3802
3803         kfree(sp->entries);
3804         kfree(sp->s2io_entries);
3805         sp->entries = NULL;
3806         sp->s2io_entries = NULL;
3807
3808         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3809         msi_control &= 0xFFFE; /* Disable MSI */
3810         pci_write_config_word(sp->pdev, 0x42, msi_control);
3811
3812         pci_disable_msix(sp->pdev);
3813 }
3814
3815 static void remove_inta_isr(struct s2io_nic *sp)
3816 {
3817         struct net_device *dev = sp->dev;
3818
3819         free_irq(sp->pdev->irq, dev);
3820 }
3821
3822 /* ********************************************************* *
3823  * Functions defined below concern the OS part of the driver *
3824  * ********************************************************* */
3825
3826 /**
3827  *  s2io_open - open entry point of the driver
3828  *  @dev : pointer to the device structure.
3829  *  Description:
3830  *  This function is the open entry point of the driver. It mainly calls a
3831  *  function to allocate Rx buffers and inserts them into the buffer
3832  *  descriptors and then enables the Rx part of the NIC.
3833  *  Return value:
3834  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3835  *   file on failure.
3836  */
3837
3838 static int s2io_open(struct net_device *dev)
3839 {
3840         struct s2io_nic *sp = dev->priv;
3841         int err = 0;
3842
3843         /*
3844          * Make sure you have link off by default every time
3845          * Nic is initialized
3846          */
3847         netif_carrier_off(dev);
3848         sp->last_link_state = 0;
3849
3850         if (sp->config.intr_type == MSI_X) {
3851                 int ret = s2io_enable_msi_x(sp);
3852
3853                 if (!ret) {
3854                         ret = s2io_test_msi(sp);
3855                         /* rollback MSI-X, will re-enable during add_isr() */
3856                         remove_msix_isr(sp);
3857                 }
3858                 if (ret) {
3859
3860                         DBG_PRINT(ERR_DBG,
3861                           "%s: MSI-X requested but failed to enable\n",
3862                           dev->name);
3863                         sp->config.intr_type = INTA;
3864                 }
3865         }
3866
3867         /* NAPI doesn't work well with MSI(X) */
3868          if (sp->config.intr_type != INTA) {
3869                 if(sp->config.napi)
3870                         sp->config.napi = 0;
3871         }
3872
3873         /* Initialize H/W and enable interrupts */
3874         err = s2io_card_up(sp);
3875         if (err) {
3876                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3877                           dev->name);
3878                 goto hw_init_failed;
3879         }
3880
3881         if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3882                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3883                 s2io_card_down(sp);
3884                 err = -ENODEV;
3885                 goto hw_init_failed;
3886         }
3887
3888         netif_start_queue(dev);
3889         return 0;
3890
3891 hw_init_failed:
3892         if (sp->config.intr_type == MSI_X) {
3893                 if (sp->entries) {
3894                         kfree(sp->entries);
3895                         sp->mac_control.stats_info->sw_stat.mem_freed
3896                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3897                 }
3898                 if (sp->s2io_entries) {
3899                         kfree(sp->s2io_entries);
3900                         sp->mac_control.stats_info->sw_stat.mem_freed
3901                         += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3902                 }
3903         }
3904         return err;
3905 }
3906
3907 /**
3908  *  s2io_close -close entry point of the driver
3909  *  @dev : device pointer.
3910  *  Description:
3911  *  This is the stop entry point of the driver. It needs to undo exactly
3912  *  whatever was done by the open entry point,thus it's usually referred to
3913  *  as the close function.Among other things this function mainly stops the
3914  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3915  *  Return value:
3916  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3917  *  file on failure.
3918  */
3919
3920 static int s2io_close(struct net_device *dev)
3921 {
3922         struct s2io_nic *sp = dev->priv;
3923
3924         /* Return if the device is already closed               *
3925         *  Can happen when s2io_card_up failed in change_mtu    *
3926         */
3927         if (!is_s2io_card_up(sp))
3928                 return 0;
3929
3930         netif_stop_queue(dev);
3931         /* Reset card, kill tasklet and free Tx and Rx buffers. */
3932         s2io_card_down(sp);
3933
3934         return 0;
3935 }
3936
3937 /**
3938  *  s2io_xmit - Tx entry point of te driver
3939  *  @skb : the socket buffer containing the Tx data.
3940  *  @dev : device pointer.
3941  *  Description :
3942  *  This function is the Tx entry point of the driver. S2IO NIC supports
3943  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
3944  *  NOTE: when device cant queue the pkt,just the trans_start variable will
3945  *  not be upadted.
3946  *  Return value:
3947  *  0 on success & 1 on failure.
3948  */
3949
3950 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3951 {
3952         struct s2io_nic *sp = dev->priv;
3953         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3954         register u64 val64;
3955         struct TxD *txdp;
3956         struct TxFIFO_element __iomem *tx_fifo;
3957         unsigned long flags;
3958         u16 vlan_tag = 0;
3959         int vlan_priority = 0;
3960         struct mac_info *mac_control;
3961         struct config_param *config;
3962         int offload_type;
3963         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
3964
3965         mac_control = &sp->mac_control;
3966         config = &sp->config;
3967
3968         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3969
3970         if (unlikely(skb->len <= 0)) {
3971                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3972                 dev_kfree_skb_any(skb);
3973                 return 0;
3974 }
3975
3976         spin_lock_irqsave(&sp->tx_lock, flags);
3977         if (!is_s2io_card_up(sp)) {
3978                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3979                           dev->name);
3980                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3981                 dev_kfree_skb(skb);
3982                 return 0;
3983         }
3984
3985         queue = 0;
3986         /* Get Fifo number to Transmit based on vlan priority */
3987         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3988                 vlan_tag = vlan_tx_tag_get(skb);
3989                 vlan_priority = vlan_tag >> 13;
3990                 queue = config->fifo_mapping[vlan_priority];
3991         }
3992
3993         put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3994         get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3995         txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
3996                 list_virt_addr;
3997
3998         queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3999         /* Avoid "put" pointer going beyond "get" pointer */
4000         if (txdp->Host_Control ||
4001                    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4002                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4003                 netif_stop_queue(dev);
4004                 dev_kfree_skb(skb);
4005                 spin_unlock_irqrestore(&sp->tx_lock, flags);
4006                 return 0;
4007         }
4008
4009         offload_type = s2io_offload_type(skb);
4010         if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4011                 txdp->Control_1 |= TXD_TCP_LSO_EN;
4012                 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4013         }
4014         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4015                 txdp->Control_2 |=
4016                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4017                      TXD_TX_CKO_UDP_EN);
4018         }
4019         txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4020         txdp->Control_1 |= TXD_LIST_OWN_XENA;
4021         txdp->Control_2 |= config->tx_intr_type;
4022
4023         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4024                 txdp->Control_2 |= TXD_VLAN_ENABLE;
4025                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4026         }
4027
4028         frg_len = skb->len - skb->data_len;
4029         if (offload_type == SKB_GSO_UDP) {
4030                 int ufo_size;
4031
4032                 ufo_size = s2io_udp_mss(skb);
4033                 ufo_size &= ~7;
4034                 txdp->Control_1 |= TXD_UFO_EN;
4035                 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4036                 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4037 #ifdef __BIG_ENDIAN
4038                 sp->ufo_in_band_v[put_off] =
4039                                 (u64)skb_shinfo(skb)->ip6_frag_id;
4040 #else
4041                 sp->ufo_in_band_v[put_off] =
4042                                 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
4043 #endif
4044                 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
4045                 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4046                                         sp->ufo_in_band_v,
4047                                         sizeof(u64), PCI_DMA_TODEVICE);
4048                 if((txdp->Buffer_Pointer == 0) ||
4049                         (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4050                         goto pci_map_failed;
4051                 txdp++;
4052         }
4053
4054         txdp->Buffer_Pointer = pci_map_single
4055             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4056         if((txdp->Buffer_Pointer == 0) ||
4057                 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4058                 goto pci_map_failed;
4059
4060         txdp->Host_Control = (unsigned long) skb;
4061         txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4062         if (offload_type == SKB_GSO_UDP)
4063                 txdp->Control_1 |= TXD_UFO_EN;
4064
4065         frg_cnt = skb_shinfo(skb)->nr_frags;
4066         /* For fragmented SKB. */
4067         for (i = 0; i < frg_cnt; i++) {
4068                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4069                 /* A '0' length fragment will be ignored */
4070                 if (!frag->size)
4071                         continue;
4072                 txdp++;
4073                 txdp->Buffer_Pointer = (u64) pci_map_page
4074                     (sp->pdev, frag->page, frag->page_offset,
4075                      frag->size, PCI_DMA_TODEVICE);
4076                 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4077                 if (offload_type == SKB_GSO_UDP)
4078                         txdp->Control_1 |= TXD_UFO_EN;
4079         }
4080         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4081
4082         if (offload_type == SKB_GSO_UDP)
4083                 frg_cnt++; /* as Txd0 was used for inband header */
4084
4085         tx_fifo = mac_control->tx_FIFO_start[queue];
4086         val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
4087         writeq(val64, &tx_fifo->TxDL_Pointer);
4088
4089         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4090                  TX_FIFO_LAST_LIST);
4091         if (offload_type)
4092                 val64 |= TX_FIFO_SPECIAL_FUNC;
4093
4094         writeq(val64, &tx_fifo->List_Control);
4095
4096         mmiowb();
4097
4098         put_off++;
4099         if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
4100                 put_off = 0;
4101         mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
4102
4103         /* Avoid "put" pointer going beyond "get" pointer */
4104         if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4105                 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4106                 DBG_PRINT(TX_DBG,
4107                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4108                           put_off, get_off);
4109                 netif_stop_queue(dev);
4110         }
4111         mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4112         dev->trans_start = jiffies;
4113         spin_unlock_irqrestore(&sp->tx_lock, flags);
4114
4115         return 0;
4116 pci_map_failed:
4117         stats->pci_map_fail_cnt++;
4118         netif_stop_queue(dev);
4119         stats->mem_freed += skb->truesize;
4120         dev_kfree_skb(skb);
4121         spin_unlock_irqrestore(&sp->tx_lock, flags);
4122         return 0;
4123 }
4124
4125 static void
4126 s2io_alarm_handle(unsigned long data)
4127 {
4128         struct s2io_nic *sp = (struct s2io_nic *)data;
4129         struct net_device *dev = sp->dev;
4130
4131         s2io_handle_errors(dev);
4132         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4133 }
4134
4135 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4136 {
4137         int rxb_size, level;
4138
4139         if (!sp->lro) {
4140                 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4141                 level = rx_buffer_level(sp, rxb_size, rng_n);
4142
4143                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4144                         int ret;
4145                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4146                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
4147                         if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4148                                 DBG_PRINT(INFO_DBG, "Out of memory in %s",
4149                                           __FUNCTION__);
4150                                 clear_bit(0, (&sp->tasklet_status));
4151                                 return -1;
4152                         }
4153                         clear_bit(0, (&sp->tasklet_status));
4154                 } else if (level == LOW)
4155                         tasklet_schedule(&sp->task);
4156
4157         } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4158                         DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4159                         DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4160         }
4161         return 0;
4162 }
4163
4164 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4165 {
4166         struct ring_info *ring = (struct ring_info *)dev_id;
4167         struct s2io_nic *sp = ring->nic;
4168
4169         if (!is_s2io_card_up(sp))
4170                 return IRQ_HANDLED;
4171
4172         rx_intr_handler(ring);
4173         s2io_chk_rx_buffers(sp, ring->ring_no);
4174
4175         return IRQ_HANDLED;
4176 }
4177
4178 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4179 {
4180         struct fifo_info *fifo = (struct fifo_info *)dev_id;
4181         struct s2io_nic *sp = fifo->nic;
4182
4183         if (!is_s2io_card_up(sp))
4184                 return IRQ_HANDLED;
4185
4186         tx_intr_handler(fifo);
4187         return IRQ_HANDLED;
4188 }
4189 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4190 {
4191         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4192         u64 val64;
4193
4194         val64 = readq(&bar0->pic_int_status);
4195         if (val64 & PIC_INT_GPIO) {
4196                 val64 = readq(&bar0->gpio_int_reg);
4197                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4198                     (val64 & GPIO_INT_REG_LINK_UP)) {
4199                         /*
4200                          * This is unstable state so clear both up/down
4201                          * interrupt and adapter to re-evaluate the link state.
4202                          */
4203                         val64 |=  GPIO_INT_REG_LINK_DOWN;
4204                         val64 |= GPIO_INT_REG_LINK_UP;
4205                         writeq(val64, &bar0->gpio_int_reg);
4206                         val64 = readq(&bar0->gpio_int_mask);
4207                         val64 &= ~(GPIO_INT_MASK_LINK_UP |
4208                                    GPIO_INT_MASK_LINK_DOWN);
4209                         writeq(val64, &bar0->gpio_int_mask);
4210                 }
4211                 else if (val64 & GPIO_INT_REG_LINK_UP) {
4212                         val64 = readq(&bar0->adapter_status);
4213                                 /* Enable Adapter */
4214                         val64 = readq(&bar0->adapter_control);
4215                         val64 |= ADAPTER_CNTL_EN;
4216                         writeq(val64, &bar0->adapter_control);
4217                         val64 |= ADAPTER_LED_ON;
4218                         writeq(val64, &bar0->adapter_control);
4219                         if (!sp->device_enabled_once)
4220                                 sp->device_enabled_once = 1;
4221
4222                         s2io_link(sp, LINK_UP);
4223                         /*
4224                          * unmask link down interrupt and mask link-up
4225                          * intr
4226                          */
4227                         val64 = readq(&bar0->gpio_int_mask);
4228                         val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4229                         val64 |= GPIO_INT_MASK_LINK_UP;
4230                         writeq(val64, &bar0->gpio_int_mask);
4231
4232                 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4233                         val64 = readq(&bar0->adapter_status);
4234                         s2io_link(sp, LINK_DOWN);
4235                         /* Link is down so unmaks link up interrupt */
4236                         val64 = readq(&bar0->gpio_int_mask);
4237                         val64 &= ~GPIO_INT_MASK_LINK_UP;
4238                         val64 |= GPIO_INT_MASK_LINK_DOWN;
4239                         writeq(val64, &bar0->gpio_int_mask);
4240
4241                         /* turn off LED */
4242                         val64 = readq(&bar0->adapter_control);
4243                         val64 = val64 &(~ADAPTER_LED_ON);
4244                         writeq(val64, &bar0->adapter_control);
4245                 }
4246         }
4247         val64 = readq(&bar0->gpio_int_mask);
4248 }
4249
4250 /**
4251  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4252  *  @value: alarm bits
4253  *  @addr: address value
4254  *  @cnt: counter variable
4255  *  Description: Check for alarm and increment the counter
4256  *  Return Value:
4257  *  1 - if alarm bit set
4258  *  0 - if alarm bit is not set
4259  */
4260 static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4261                           unsigned long long *cnt)
4262 {
4263         u64 val64;
4264         val64 = readq(addr);
4265         if ( val64 & value ) {
4266                 writeq(val64, addr);
4267                 (*cnt)++;
4268                 return 1;
4269         }
4270         return 0;
4271
4272 }
4273
4274 /**
4275  *  s2io_handle_errors - Xframe error indication handler
4276  *  @nic: device private variable
4277  *  Description: Handle alarms such as loss of link, single or
4278  *  double ECC errors, critical and serious errors.
4279  *  Return Value:
4280  *  NONE
4281  */
4282 static void s2io_handle_errors(void * dev_id)
4283 {
4284         struct net_device *dev = (struct net_device *) dev_id;
4285         struct s2io_nic *sp = dev->priv;
4286         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4287         u64 temp64 = 0,val64=0;
4288         int i = 0;
4289
4290         struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4291         struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4292
4293         if (!is_s2io_card_up(sp))
4294                 return;
4295
4296         if (pci_channel_offline(sp->pdev))
4297                 return;
4298
4299         memset(&sw_stat->ring_full_cnt, 0,
4300                 sizeof(sw_stat->ring_full_cnt));
4301
4302         /* Handling the XPAK counters update */
4303         if(stats->xpak_timer_count < 72000) {
4304                 /* waiting for an hour */
4305                 stats->xpak_timer_count++;
4306         } else {
4307                 s2io_updt_xpak_counter(dev);
4308                 /* reset the count to zero */
4309                 stats->xpak_timer_count = 0;
4310         }
4311
4312         /* Handling link status change error Intr */
4313         if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4314                 val64 = readq(&bar0->mac_rmac_err_reg);
4315                 writeq(val64, &bar0->mac_rmac_err_reg);
4316                 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4317                         schedule_work(&sp->set_link_task);
4318         }
4319
4320         /* In case of a serious error, the device will be Reset. */
4321         if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4322                                 &sw_stat->serious_err_cnt))
4323                 goto reset;
4324
4325         /* Check for data parity error */
4326         if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4327                                 &sw_stat->parity_err_cnt))
4328                 goto reset;
4329
4330         /* Check for ring full counter */
4331         if (sp->device_type == XFRAME_II_DEVICE) {
4332                 val64 = readq(&bar0->ring_bump_counter1);
4333                 for (i=0; i<4; i++) {
4334                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4335                         temp64 >>= 64 - ((i+1)*16);
4336                         sw_stat->ring_full_cnt[i] += temp64;
4337                 }
4338
4339                 val64 = readq(&bar0->ring_bump_counter2);
4340                 for (i=0; i<4; i++) {
4341                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4342                         temp64 >>= 64 - ((i+1)*16);
4343                          sw_stat->ring_full_cnt[i+4] += temp64;
4344                 }
4345         }
4346
4347         val64 = readq(&bar0->txdma_int_status);
4348         /*check for pfc_err*/
4349         if (val64 & TXDMA_PFC_INT) {
4350                 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4351                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4352                                 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4353                                 &sw_stat->pfc_err_cnt))
4354                         goto reset;
4355                 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4356                                 &sw_stat->pfc_err_cnt);
4357         }
4358
4359         /*check for tda_err*/
4360         if (val64 & TXDMA_TDA_INT) {
4361                 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4362                                 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4363                                 &sw_stat->tda_err_cnt))
4364                         goto reset;
4365                 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4366                                 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4367         }
4368         /*check for pcc_err*/
4369         if (val64 & TXDMA_PCC_INT) {
4370                 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4371                                 | PCC_N_SERR | PCC_6_COF_OV_ERR
4372                                 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4373                                 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4374                                 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4375                                 &sw_stat->pcc_err_cnt))
4376                         goto reset;
4377                 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4378                                 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4379         }
4380
4381         /*check for tti_err*/
4382         if (val64 & TXDMA_TTI_INT) {
4383                 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4384                                 &sw_stat->tti_err_cnt))
4385                         goto reset;
4386                 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4387                                 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4388         }
4389
4390         /*check for lso_err*/
4391         if (val64 & TXDMA_LSO_INT) {
4392                 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4393                                 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4394                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4395                         goto reset;
4396                 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4397                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4398         }
4399
4400         /*check for tpa_err*/
4401         if (val64 & TXDMA_TPA_INT) {
4402                 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4403                         &sw_stat->tpa_err_cnt))
4404                         goto reset;
4405                 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4406                         &sw_stat->tpa_err_cnt);
4407         }
4408
4409         /*check for sm_err*/
4410         if (val64 & TXDMA_SM_INT) {
4411                 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4412                         &sw_stat->sm_err_cnt))
4413                         goto reset;
4414         }
4415
4416         val64 = readq(&bar0->mac_int_status);
4417         if (val64 & MAC_INT_STATUS_TMAC_INT) {
4418                 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4419                                 &bar0->mac_tmac_err_reg,
4420                                 &sw_stat->mac_tmac_err_cnt))
4421                         goto reset;
4422                 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4423                                 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4424                                 &bar0->mac_tmac_err_reg,
4425                                 &sw_stat->mac_tmac_err_cnt);
4426         }
4427
4428         val64 = readq(&bar0->xgxs_int_status);
4429         if (val64 & XGXS_INT_STATUS_TXGXS) {
4430                 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4431                                 &bar0->xgxs_txgxs_err_reg,
4432                                 &sw_stat->xgxs_txgxs_err_cnt))
4433                         goto reset;
4434                 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4435                                 &bar0->xgxs_txgxs_err_reg,
4436                                 &sw_stat->xgxs_txgxs_err_cnt);
4437         }
4438
4439         val64 = readq(&bar0->rxdma_int_status);
4440         if (val64 & RXDMA_INT_RC_INT_M) {
4441                 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4442                                 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4443                                 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4444                         goto reset;
4445                 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4446                                 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4447                                 &sw_stat->rc_err_cnt);
4448                 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4449                                 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4450                                 &sw_stat->prc_pcix_err_cnt))
4451                         goto reset;
4452                 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4453                                 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4454                                 &sw_stat->prc_pcix_err_cnt);
4455         }
4456
4457         if (val64 & RXDMA_INT_RPA_INT_M) {
4458                 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4459                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4460                         goto reset;
4461                 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4462                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4463         }
4464
4465         if (val64 & RXDMA_INT_RDA_INT_M) {
4466                 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4467                                 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4468                                 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4469                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4470                         goto reset;
4471                 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4472                                 | RDA_MISC_ERR | RDA_PCIX_ERR,
4473                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4474         }
4475
4476         if (val64 & RXDMA_INT_RTI_INT_M) {
4477                 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4478                                 &sw_stat->rti_err_cnt))
4479                         goto reset;
4480                 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4481                                 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4482         }
4483
4484         val64 = readq(&bar0->mac_int_status);
4485         if (val64 & MAC_INT_STATUS_RMAC_INT) {
4486                 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4487                                 &bar0->mac_rmac_err_reg,
4488                                 &sw_stat->mac_rmac_err_cnt))
4489                         goto reset;
4490                 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4491                                 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4492                                 &sw_stat->mac_rmac_err_cnt);
4493         }
4494
4495         val64 = readq(&bar0->xgxs_int_status);
4496         if (val64 & XGXS_INT_STATUS_RXGXS) {
4497                 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4498                                 &bar0->xgxs_rxgxs_err_reg,
4499                                 &sw_stat->xgxs_rxgxs_err_cnt))
4500                         goto reset;
4501         }
4502
4503         val64 = readq(&bar0->mc_int_status);
4504         if(val64 & MC_INT_STATUS_MC_INT) {
4505                 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4506                                 &sw_stat->mc_err_cnt))
4507                         goto reset;
4508
4509                 /* Handling Ecc errors */
4510                 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4511                         writeq(val64, &bar0->mc_err_reg);
4512                         if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4513                                 sw_stat->double_ecc_errs++;
4514                                 if (sp->device_type != XFRAME_II_DEVICE) {
4515                                         /*
4516                                          * Reset XframeI only if critical error
4517                                          */
4518                                         if (val64 &
4519                                                 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4520                                                 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4521                                                                 goto reset;
4522                                         }
4523                         } else
4524                                 sw_stat->single_ecc_errs++;
4525                 }
4526         }
4527         return;
4528
4529 reset:
4530         netif_stop_queue(dev);
4531         schedule_work(&sp->rst_timer_task);
4532         sw_stat->soft_reset_cnt++;
4533         return;
4534 }
4535
4536 /**
4537  *  s2io_isr - ISR handler of the device .
4538  *  @irq: the irq of the device.
4539  *  @dev_id: a void pointer to the dev structure of the NIC.
4540  *  Description:  This function is the ISR handler of the device. It
4541  *  identifies the reason for the interrupt and calls the relevant
4542  *  service routines. As a contongency measure, this ISR allocates the
4543  *  recv buffers, if their numbers are below the panic value which is
4544  *  presently set to 25% of the original number of rcv buffers allocated.
4545  *  Return value:
4546  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4547  *   IRQ_NONE: will be returned if interrupt is not from our device
4548  */
4549 static irqreturn_t s2io_isr(int irq, void *dev_id)
4550 {
4551         struct net_device *dev = (struct net_device *) dev_id;
4552         struct s2io_nic *sp = dev->priv;
4553         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4554         int i;
4555         u64 reason = 0;
4556         struct mac_info *mac_control;
4557         struct config_param *config;
4558
4559         /* Pretend we handled any irq's from a disconnected card */
4560         if (pci_channel_offline(sp->pdev))
4561                 return IRQ_NONE;
4562
4563         if (!is_s2io_card_up(sp))
4564                 return IRQ_NONE;
4565
4566         mac_control = &sp->mac_control;
4567         config = &sp->config;
4568
4569         /*
4570          * Identify the cause for interrupt and call the appropriate
4571          * interrupt handler. Causes for the interrupt could be;
4572          * 1. Rx of packet.
4573          * 2. Tx complete.
4574          * 3. Link down.
4575          */
4576         reason = readq(&bar0->general_int_status);
4577
4578         if (unlikely(reason == S2IO_MINUS_ONE) ) {
4579                 /* Nothing much can be done. Get out */
4580                 return IRQ_HANDLED;
4581         }
4582
4583         if (reason & (GEN_INTR_RXTRAFFIC |
4584                 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4585         {
4586                 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4587
4588                 if (config->napi) {
4589                         if (reason & GEN_INTR_RXTRAFFIC) {
4590                                 if (likely(netif_rx_schedule_prep(dev,
4591                                                         &sp->napi))) {
4592                                         __netif_rx_schedule(dev, &sp->napi);
4593                                         writeq(S2IO_MINUS_ONE,
4594                                                &bar0->rx_traffic_mask);
4595                                 } else
4596                                         writeq(S2IO_MINUS_ONE,
4597                                                &bar0->rx_traffic_int);
4598                         }
4599                 } else {
4600                         /*
4601                          * rx_traffic_int reg is an R1 register, writing all 1's
4602                          * will ensure that the actual interrupt causing bit
4603                          * get's cleared and hence a read can be avoided.
4604                          */
4605                         if (reason & GEN_INTR_RXTRAFFIC)
4606                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4607
4608                         for (i = 0; i < config->rx_ring_num; i++)
4609                                 rx_intr_handler(&mac_control->rings[i]);
4610                 }
4611
4612                 /*
4613                  * tx_traffic_int reg is an R1 register, writing all 1's
4614                  * will ensure that the actual interrupt causing bit get's
4615                  * cleared and hence a read can be avoided.
4616                  */
4617                 if (reason & GEN_INTR_TXTRAFFIC)
4618                         writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4619
4620                 for (i = 0; i < config->tx_fifo_num; i++)
4621                         tx_intr_handler(&mac_control->fifos[i]);
4622
4623                 if (reason & GEN_INTR_TXPIC)
4624                         s2io_txpic_intr_handle(sp);
4625
4626                 /*
4627                  * Reallocate the buffers from the interrupt handler itself.
4628                  */
4629                 if (!config->napi) {
4630                         for (i = 0; i < config->rx_ring_num; i++)
4631                                 s2io_chk_rx_buffers(sp, i);
4632                 }
4633                 writeq(sp->general_int_mask, &bar0->general_int_mask);
4634                 readl(&bar0->general_int_status);
4635
4636                 return IRQ_HANDLED;
4637
4638         }
4639         else if (!reason) {
4640                 /* The interrupt was not raised by us */
4641                 return IRQ_NONE;
4642         }
4643
4644         return IRQ_HANDLED;
4645 }
4646
4647 /**
4648  * s2io_updt_stats -
4649  */
4650 static void s2io_updt_stats(struct s2io_nic *sp)
4651 {
4652         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4653         u64 val64;
4654         int cnt = 0;
4655
4656         if (is_s2io_card_up(sp)) {
4657                 /* Apprx 30us on a 133 MHz bus */
4658                 val64 = SET_UPDT_CLICKS(10) |
4659                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4660                 writeq(val64, &bar0->stat_cfg);
4661                 do {
4662                         udelay(100);
4663                         val64 = readq(&bar0->stat_cfg);
4664                         if (!(val64 & s2BIT(0)))
4665                                 break;
4666                         cnt++;
4667                         if (cnt == 5)
4668                                 break; /* Updt failed */
4669                 } while(1);
4670         }
4671 }
4672
4673 /**
4674  *  s2io_get_stats - Updates the device statistics structure.
4675  *  @dev : pointer to the device structure.
4676  *  Description:
4677  *  This function updates the device statistics structure in the s2io_nic
4678  *  structure and returns a pointer to the same.
4679  *  Return value:
4680  *  pointer to the updated net_device_stats structure.
4681  */
4682
4683 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4684 {
4685         struct s2io_nic *sp = dev->priv;
4686         struct mac_info *mac_control;
4687         struct config_param *config;
4688
4689
4690         mac_control = &sp->mac_control;
4691         config = &sp->config;
4692
4693         /* Configure Stats for immediate updt */
4694         s2io_updt_stats(sp);
4695
4696         sp->stats.tx_packets =
4697                 le32_to_cpu(mac_control->stats_info->tmac_frms);
4698         sp->stats.tx_errors =
4699                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4700         sp->stats.rx_errors =
4701                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4702         sp->stats.multicast =
4703                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4704         sp->stats.rx_length_errors =
4705                 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4706
4707         return (&sp->stats);
4708 }
4709
4710 /**
4711  *  s2io_set_multicast - entry point for multicast address enable/disable.
4712  *  @dev : pointer to the device structure
4713  *  Description:
4714  *  This function is a driver entry point which gets called by the kernel
4715  *  whenever multicast addresses must be enabled/disabled. This also gets
4716  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4717  *  determine, if multicast address must be enabled or if promiscuous mode
4718  *  is to be disabled etc.
4719  *  Return value:
4720  *  void.
4721  */
4722
4723 static void s2io_set_multicast(struct net_device *dev)
4724 {
4725         int i, j, prev_cnt;
4726         struct dev_mc_list *mclist;
4727         struct s2io_nic *sp = dev->priv;
4728         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4729         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4730             0xfeffffffffffULL;
4731         u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4732         void __iomem *add;
4733
4734         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4735                 /*  Enable all Multicast addresses */
4736                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4737                        &bar0->rmac_addr_data0_mem);
4738                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4739                        &bar0->rmac_addr_data1_mem);
4740                 val64 = RMAC_ADDR_CMD_MEM_WE |
4741                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4742                     RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4743                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4744                 /* Wait till command completes */
4745                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4746                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4747                                         S2IO_BIT_RESET);
4748
4749                 sp->m_cast_flg = 1;
4750                 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4751         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4752                 /*  Disable all Multicast addresses */
4753                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4754                        &bar0->rmac_addr_data0_mem);
4755                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4756                        &bar0->rmac_addr_data1_mem);
4757                 val64 = RMAC_ADDR_CMD_MEM_WE |
4758                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4759                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4760                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4761                 /* Wait till command completes */
4762                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4763                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4764                                         S2IO_BIT_RESET);
4765
4766                 sp->m_cast_flg = 0;
4767                 sp->all_multi_pos = 0;
4768         }
4769
4770         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4771                 /*  Put the NIC into promiscuous mode */
4772                 add = &bar0->mac_cfg;
4773                 val64 = readq(&bar0->mac_cfg);
4774                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4775
4776                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4777                 writel((u32) val64, add);
4778                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4779                 writel((u32) (val64 >> 32), (add + 4));
4780
4781                 if (vlan_tag_strip != 1) {
4782                         val64 = readq(&bar0->rx_pa_cfg);
4783                         val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4784                         writeq(val64, &bar0->rx_pa_cfg);
4785                         vlan_strip_flag = 0;
4786                 }
4787
4788                 val64 = readq(&bar0->mac_cfg);
4789                 sp->promisc_flg = 1;
4790                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4791                           dev->name);
4792         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4793                 /*  Remove the NIC from promiscuous mode */
4794                 add = &bar0->mac_cfg;
4795                 val64 = readq(&bar0->mac_cfg);
4796                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4797
4798                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4799                 writel((u32) val64, add);
4800                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4801                 writel((u32) (val64 >> 32), (add + 4));
4802
4803                 if (vlan_tag_strip != 0) {
4804                         val64 = readq(&bar0->rx_pa_cfg);
4805                         val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4806                         writeq(val64, &bar0->rx_pa_cfg);
4807                         vlan_strip_flag = 1;
4808                 }
4809
4810                 val64 = readq(&bar0->mac_cfg);
4811                 sp->promisc_flg = 0;
4812                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4813                           dev->name);
4814         }
4815
4816         /*  Update individual M_CAST address list */
4817         if ((!sp->m_cast_flg) && dev->mc_count) {
4818                 if (dev->mc_count >
4819                     (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4820                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4821                                   dev->name);
4822                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
4823                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4824                         return;
4825                 }
4826
4827                 prev_cnt = sp->mc_addr_count;
4828                 sp->mc_addr_count = dev->mc_count;
4829
4830                 /* Clear out the previous list of Mc in the H/W. */
4831                 for (i = 0; i < prev_cnt; i++) {
4832                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4833                                &bar0->rmac_addr_data0_mem);
4834                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4835                                 &bar0->rmac_addr_data1_mem);
4836                         val64 = RMAC_ADDR_CMD_MEM_WE |
4837                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4838                             RMAC_ADDR_CMD_MEM_OFFSET
4839                             (MAC_MC_ADDR_START_OFFSET + i);
4840                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4841
4842                         /* Wait for command completes */
4843                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4844                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4845                                         S2IO_BIT_RESET)) {
4846                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4847                                           dev->name);
4848                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4849                                 return;
4850                         }
4851                 }
4852
4853                 /* Create the new Rx filter list and update the same in H/W. */
4854                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4855                      i++, mclist = mclist->next) {
4856                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4857                                ETH_ALEN);
4858                         mac_addr = 0;
4859                         for (j = 0; j < ETH_ALEN; j++) {
4860                                 mac_addr |= mclist->dmi_addr[j];
4861                                 mac_addr <<= 8;
4862                         }
4863                         mac_addr >>= 8;
4864                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4865                                &bar0->rmac_addr_data0_mem);
4866                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4867                                 &bar0->rmac_addr_data1_mem);
4868                         val64 = RMAC_ADDR_CMD_MEM_WE |
4869                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4870                             RMAC_ADDR_CMD_MEM_OFFSET
4871                             (i + MAC_MC_ADDR_START_OFFSET);
4872                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4873
4874                         /* Wait for command completes */
4875                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4876                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4877                                         S2IO_BIT_RESET)) {
4878                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4879                                           dev->name);
4880                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4881                                 return;
4882                         }
4883                 }
4884         }
4885 }
4886
4887 /* add unicast MAC address to CAM */
4888 static int do_s2io_add_unicast(struct s2io_nic *sp, u64 addr, int off)
4889 {
4890         u64 val64;
4891         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4892
4893         writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
4894                 &bar0->rmac_addr_data0_mem);
4895
4896         val64 =
4897                 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4898                 RMAC_ADDR_CMD_MEM_OFFSET(off);
4899         writeq(val64, &bar0->rmac_addr_cmd_mem);
4900
4901         /* Wait till command completes */
4902         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4903                 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4904                 S2IO_BIT_RESET)) {
4905                 DBG_PRINT(INFO_DBG, "add_mac_addr failed\n");
4906                 return FAILURE;
4907         }
4908         return SUCCESS;
4909 }
4910
4911 /**
4912  * s2io_set_mac_addr driver entry point
4913  */
4914 static int s2io_set_mac_addr(struct net_device *dev, void *p)
4915 {
4916         struct sockaddr *addr = p;
4917
4918         if (!is_valid_ether_addr(addr->sa_data))
4919                 return -EINVAL;
4920
4921         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4922
4923         /* store the MAC address in CAM */
4924         return (do_s2io_prog_unicast(dev, dev->dev_addr));
4925 }
4926
4927 /**
4928  *  do_s2io_prog_unicast - Programs the Xframe mac address
4929  *  @dev : pointer to the device structure.
4930  *  @addr: a uchar pointer to the new mac address which is to be set.
4931  *  Description : This procedure will program the Xframe to receive
4932  *  frames with new Mac Address
4933  *  Return value: SUCCESS on success and an appropriate (-)ve integer
4934  *  as defined in errno.h file on failure.
4935  */
4936 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
4937 {
4938         struct s2io_nic *sp = dev->priv;
4939         register u64 mac_addr = 0, perm_addr = 0;
4940         int i;
4941
4942         /*
4943         * Set the new MAC address as the new unicast filter and reflect this
4944         * change on the device address registered with the OS. It will be
4945         * at offset 0.
4946         */
4947         for (i = 0; i < ETH_ALEN; i++) {
4948                 mac_addr <<= 8;
4949                 mac_addr |= addr[i];
4950                 perm_addr <<= 8;
4951                 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
4952         }
4953
4954         /* check if the dev_addr is different than perm_addr */
4955         if (mac_addr == perm_addr)
4956                 return SUCCESS;
4957
4958         /* Update the internal structure with this new mac address */
4959         do_s2io_copy_mac_addr(sp, 0, mac_addr);
4960         return (do_s2io_add_unicast(sp, mac_addr, 0));
4961 }
4962
4963 /**
4964  * s2io_ethtool_sset - Sets different link parameters.
4965  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
4966  * @info: pointer to the structure with parameters given by ethtool to set
4967  * link information.
4968  * Description:
4969  * The function sets different link parameters provided by the user onto
4970  * the NIC.
4971  * Return value:
4972  * 0 on success.
4973 */
4974
4975 static int s2io_ethtool_sset(struct net_device *dev,
4976                              struct ethtool_cmd *info)
4977 {
4978         struct s2io_nic *sp = dev->priv;
4979         if ((info->autoneg == AUTONEG_ENABLE) ||
4980             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4981                 return -EINVAL;
4982         else {
4983                 s2io_close(sp->dev);
4984                 s2io_open(sp->dev);
4985         }
4986
4987         return 0;
4988 }
4989
4990 /**
4991  * s2io_ethtol_gset - Return link specific information.
4992  * @sp : private member of the device structure, pointer to the
4993  *      s2io_nic structure.
4994  * @info : pointer to the structure with parameters given by ethtool
4995  * to return link information.
4996  * Description:
4997  * Returns link specific information like speed, duplex etc.. to ethtool.
4998  * Return value :
4999  * return 0 on success.
5000  */
5001
5002 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5003 {
5004         struct s2io_nic *sp = dev->priv;
5005         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5006         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5007         info->port = PORT_FIBRE;
5008
5009         /* info->transceiver */
5010         info->transceiver = XCVR_EXTERNAL;
5011
5012         if (netif_carrier_ok(sp->dev)) {
5013                 info->speed = 10000;
5014                 info->duplex = DUPLEX_FULL;
5015         } else {
5016                 info->speed = -1;
5017                 info->duplex = -1;
5018         }
5019
5020         info->autoneg = AUTONEG_DISABLE;
5021         return 0;
5022 }
5023
5024 /**
5025  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5026  * @sp : private member of the device structure, which is a pointer to the
5027  * s2io_nic structure.
5028  * @info : pointer to the structure with parameters given by ethtool to
5029  * return driver information.
5030  * Description:
5031  * Returns driver specefic information like name, version etc.. to ethtool.
5032  * Return value:
5033  *  void
5034  */
5035
5036 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5037                                   struct ethtool_drvinfo *info)
5038 {
5039         struct s2io_nic *sp = dev->priv;
5040
5041         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5042         strncpy(info->version, s2io_driver_version, sizeof(info->version));
5043         strncpy(info->fw_version, "", sizeof(info->fw_version));
5044         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5045         info->regdump_len = XENA_REG_SPACE;
5046         info->eedump_len = XENA_EEPROM_SPACE;
5047 }
5048
5049 /**
5050  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5051  *  @sp: private member of the device structure, which is a pointer to the
5052  *  s2io_nic structure.
5053  *  @regs : pointer to the structure with parameters given by ethtool for
5054  *  dumping the registers.
5055  *  @reg_space: The input argumnet into which all the registers are dumped.
5056  *  Description:
5057  *  Dumps the entire register space of xFrame NIC into the user given
5058  *  buffer area.
5059  * Return value :
5060  * void .
5061 */
5062
5063 static void s2io_ethtool_gregs(struct net_device *dev,
5064                                struct ethtool_regs *regs, void *space)
5065 {
5066         int i;
5067         u64 reg;
5068         u8 *reg_space = (u8 *) space;
5069         struct s2io_nic *sp = dev->priv;
5070
5071         regs->len = XENA_REG_SPACE;
5072         regs->version = sp->pdev->subsystem_device;
5073
5074         for (i = 0; i < regs->len; i += 8) {
5075                 reg = readq(sp->bar0 + i);
5076                 memcpy((reg_space + i), &reg, 8);
5077         }
5078 }
5079
5080 /**
5081  *  s2io_phy_id  - timer function that alternates adapter LED.
5082  *  @data : address of the private member of the device structure, which
5083  *  is a pointer to the s2io_nic structure, provided as an u32.
5084  * Description: This is actually the timer function that alternates the
5085  * adapter LED bit of the adapter control bit to set/reset every time on
5086  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5087  *  once every second.
5088 */
5089 static void s2io_phy_id(unsigned long data)
5090 {
5091         struct s2io_nic *sp = (struct s2io_nic *) data;
5092         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5093         u64 val64 = 0;
5094         u16 subid;
5095
5096         subid = sp->pdev->subsystem_device;
5097         if ((sp->device_type == XFRAME_II_DEVICE) ||
5098                    ((subid & 0xFF) >= 0x07)) {
5099                 val64 = readq(&bar0->gpio_control);
5100                 val64 ^= GPIO_CTRL_GPIO_0;
5101                 writeq(val64, &bar0->gpio_control);
5102         } else {
5103                 val64 = readq(&bar0->adapter_control);
5104                 val64 ^= ADAPTER_LED_ON;
5105                 writeq(val64, &bar0->adapter_control);
5106         }
5107
5108         mod_timer(&sp->id_timer, jiffies + HZ / 2);
5109 }
5110
5111 /**
5112  * s2io_ethtool_idnic - To physically identify the nic on the system.
5113  * @sp : private member of the device structure, which is a pointer to the
5114  * s2io_nic structure.
5115  * @id : pointer to the structure with identification parameters given by
5116  * ethtool.
5117  * Description: Used to physically identify the NIC on the system.
5118  * The Link LED will blink for a time specified by the user for
5119  * identification.
5120  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5121  * identification is possible only if it's link is up.
5122  * Return value:
5123  * int , returns 0 on success
5124  */
5125
5126 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5127 {
5128         u64 val64 = 0, last_gpio_ctrl_val;
5129         struct s2io_nic *sp = dev->priv;
5130         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5131         u16 subid;
5132
5133         subid = sp->pdev->subsystem_device;
5134         last_gpio_ctrl_val = readq(&bar0->gpio_control);
5135         if ((sp->device_type == XFRAME_I_DEVICE) &&
5136                 ((subid & 0xFF) < 0x07)) {
5137                 val64 = readq(&bar0->adapter_control);
5138                 if (!(val64 & ADAPTER_CNTL_EN)) {
5139                         printk(KERN_ERR
5140                                "Adapter Link down, cannot blink LED\n");
5141                         return -EFAULT;
5142                 }
5143         }
5144         if (sp->id_timer.function == NULL) {
5145                 init_timer(&sp->id_timer);
5146                 sp->id_timer.function = s2io_phy_id;
5147                 sp->id_timer.data = (unsigned long) sp;
5148         }
5149         mod_timer(&sp->id_timer, jiffies);
5150         if (data)
5151                 msleep_interruptible(data * HZ);
5152         else
5153                 msleep_interruptible(MAX_FLICKER_TIME);
5154         del_timer_sync(&sp->id_timer);
5155
5156         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5157                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5158                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5159         }
5160
5161         return 0;
5162 }
5163
5164 static void s2io_ethtool_gringparam(struct net_device *dev,
5165                                     struct ethtool_ringparam *ering)
5166 {
5167         struct s2io_nic *sp = dev->priv;
5168         int i,tx_desc_count=0,rx_desc_count=0;
5169
5170         if (sp->rxd_mode == RXD_MODE_1)
5171                 ering->rx_max_pending = MAX_RX_DESC_1;
5172         else if (sp->rxd_mode == RXD_MODE_3B)
5173                 ering->rx_max_pending = MAX_RX_DESC_2;
5174
5175         ering->tx_max_pending = MAX_TX_DESC;
5176         for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5177                 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5178
5179         DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5180         ering->tx_pending = tx_desc_count;
5181         rx_desc_count = 0;
5182         for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5183                 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5184
5185         ering->rx_pending = rx_desc_count;
5186
5187         ering->rx_mini_max_pending = 0;
5188         ering->rx_mini_pending = 0;
5189         if(sp->rxd_mode == RXD_MODE_1)
5190                 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5191         else if (sp->rxd_mode == RXD_MODE_3B)
5192                 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5193         ering->rx_jumbo_pending = rx_desc_count;
5194 }
5195
5196 /**
5197  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5198  * @sp : private member of the device structure, which is a pointer to the
5199  *      s2io_nic structure.
5200  * @ep : pointer to the structure with pause parameters given by ethtool.
5201  * Description:
5202  * Returns the Pause frame generation and reception capability of the NIC.
5203  * Return value:
5204  *  void
5205  */
5206 static void s2io_ethtool_getpause_data(struct net_device *dev,
5207                                        struct ethtool_pauseparam *ep)
5208 {
5209         u64 val64;
5210         struct s2io_nic *sp = dev->priv;
5211         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5212
5213         val64 = readq(&bar0->rmac_pause_cfg);
5214         if (val64 & RMAC_PAUSE_GEN_ENABLE)
5215                 ep->tx_pause = TRUE;
5216         if (val64 & RMAC_PAUSE_RX_ENABLE)
5217                 ep->rx_pause = TRUE;
5218         ep->autoneg = FALSE;
5219 }
5220
5221 /**
5222  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5223  * @sp : private member of the device structure, which is a pointer to the
5224  *      s2io_nic structure.
5225  * @ep : pointer to the structure with pause parameters given by ethtool.
5226  * Description:
5227  * It can be used to set or reset Pause frame generation or reception
5228  * support of the NIC.
5229  * Return value:
5230  * int, returns 0 on Success
5231  */
5232
5233 static int s2io_ethtool_setpause_data(struct net_device *dev,
5234                                struct ethtool_pauseparam *ep)
5235 {
5236         u64 val64;
5237         struct s2io_nic *sp = dev->priv;
5238         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5239
5240         val64 = readq(&bar0->rmac_pause_cfg);
5241         if (ep->tx_pause)
5242                 val64 |= RMAC_PAUSE_GEN_ENABLE;
5243         else
5244                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5245         if (ep->rx_pause)
5246                 val64 |= RMAC_PAUSE_RX_ENABLE;
5247         else
5248                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5249         writeq(val64, &bar0->rmac_pause_cfg);
5250         return 0;
5251 }
5252
5253 /**
5254  * read_eeprom - reads 4 bytes of data from user given offset.
5255  * @sp : private member of the device structure, which is a pointer to the
5256  *      s2io_nic structure.
5257  * @off : offset at which the data must be written
5258  * @data : Its an output parameter where the data read at the given
5259  *      offset is stored.
5260  * Description:
5261  * Will read 4 bytes of data from the user given offset and return the
5262  * read data.
5263  * NOTE: Will allow to read only part of the EEPROM visible through the
5264  *   I2C bus.
5265  * Return value:
5266  *  -1 on failure and 0 on success.
5267  */
5268
5269 #define S2IO_DEV_ID             5
5270 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5271 {
5272         int ret = -1;
5273         u32 exit_cnt = 0;
5274         u64 val64;
5275         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5276
5277         if (sp->device_type == XFRAME_I_DEVICE) {
5278                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5279                     I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5280                     I2C_CONTROL_CNTL_START;
5281                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5282
5283                 while (exit_cnt < 5) {
5284                         val64 = readq(&bar0->i2c_control);
5285                         if (I2C_CONTROL_CNTL_END(val64)) {
5286                                 *data = I2C_CONTROL_GET_DATA(val64);
5287                                 ret = 0;
5288                                 break;
5289                         }
5290                         msleep(50);
5291                         exit_cnt++;
5292                 }
5293         }
5294
5295         if (sp->device_type == XFRAME_II_DEVICE) {
5296                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5297                         SPI_CONTROL_BYTECNT(0x3) |
5298                         SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5299                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5300                 val64 |= SPI_CONTROL_REQ;
5301                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5302                 while (exit_cnt < 5) {
5303                         val64 = readq(&bar0->spi_control);
5304                         if (val64 & SPI_CONTROL_NACK) {
5305                                 ret = 1;
5306                                 break;
5307                         } else if (val64 & SPI_CONTROL_DONE) {
5308                                 *data = readq(&bar0->spi_data);
5309                                 *data &= 0xffffff;
5310                                 ret = 0;
5311                                 break;
5312                         }
5313                         msleep(50);
5314                         exit_cnt++;
5315                 }
5316         }
5317         return ret;
5318 }
5319
5320 /**
5321  *  write_eeprom - actually writes the relevant part of the data value.
5322  *  @sp : private member of the device structure, which is a pointer to the
5323  *       s2io_nic structure.
5324  *  @off : offset at which the data must be written
5325  *  @data : The data that is to be written
5326  *  @cnt : Number of bytes of the data that are actually to be written into
5327  *  the Eeprom. (max of 3)
5328  * Description:
5329  *  Actually writes the relevant part of the data value into the Eeprom
5330  *  through the I2C bus.
5331  * Return value:
5332  *  0 on success, -1 on failure.
5333  */
5334
5335 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5336 {
5337         int exit_cnt = 0, ret = -1;
5338         u64 val64;
5339         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5340
5341         if (sp->device_type == XFRAME_I_DEVICE) {
5342                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5343                     I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5344                     I2C_CONTROL_CNTL_START;
5345                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5346
5347                 while (exit_cnt < 5) {
5348                         val64 = readq(&bar0->i2c_control);
5349                         if (I2C_CONTROL_CNTL_END(val64)) {
5350                                 if (!(val64 & I2C_CONTROL_NACK))
5351                                         ret = 0;
5352                                 break;
5353                         }
5354                         msleep(50);
5355                         exit_cnt++;
5356                 }
5357         }
5358
5359         if (sp->device_type == XFRAME_II_DEVICE) {
5360                 int write_cnt = (cnt == 8) ? 0 : cnt;
5361                 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5362
5363                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5364                         SPI_CONTROL_BYTECNT(write_cnt) |
5365                         SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5366                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5367                 val64 |= SPI_CONTROL_REQ;
5368                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5369                 while (exit_cnt < 5) {
5370                         val64 = readq(&bar0->spi_control);
5371                         if (val64 & SPI_CONTROL_NACK) {
5372                                 ret = 1;
5373                                 break;
5374                         } else if (val64 & SPI_CONTROL_DONE) {
5375                                 ret = 0;
5376                                 break;
5377                         }
5378                         msleep(50);
5379                         exit_cnt++;
5380                 }
5381         }
5382         return ret;
5383 }
5384 static void s2io_vpd_read(struct s2io_nic *nic)
5385 {
5386         u8 *vpd_data;
5387         u8 data;
5388         int i=0, cnt, fail = 0;
5389         int vpd_addr = 0x80;
5390
5391         if (nic->device_type == XFRAME_II_DEVICE) {
5392                 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5393                 vpd_addr = 0x80;
5394         }
5395         else {
5396                 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5397                 vpd_addr = 0x50;
5398         }
5399         strcpy(nic->serial_num, "NOT AVAILABLE");
5400
5401         vpd_data = kmalloc(256, GFP_KERNEL);
5402         if (!vpd_data) {
5403                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5404                 return;
5405         }
5406         nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5407
5408         for (i = 0; i < 256; i +=4 ) {
5409                 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5410                 pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5411                 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5412                 for (cnt = 0; cnt <5; cnt++) {
5413                         msleep(2);
5414                         pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5415                         if (data == 0x80)
5416                                 break;
5417                 }
5418                 if (cnt >= 5) {
5419                         DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5420                         fail = 1;
5421                         break;
5422                 }
5423                 pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5424                                       (u32 *)&vpd_data[i]);
5425         }
5426
5427         if(!fail) {
5428                 /* read serial number of adapter */
5429                 for (cnt = 0; cnt < 256; cnt++) {
5430                 if ((vpd_data[cnt] == 'S') &&
5431                         (vpd_data[cnt+1] == 'N') &&
5432                         (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5433                                 memset(nic->serial_num, 0, VPD_STRING_LEN);
5434                                 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5435                                         vpd_data[cnt+2]);
5436                                 break;
5437                         }
5438                 }
5439         }
5440
5441         if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5442                 memset(nic->product_name, 0, vpd_data[1]);
5443                 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5444         }
5445         kfree(vpd_data);
5446         nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5447 }
5448
5449 /**
5450  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5451  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
5452  *  @eeprom : pointer to the user level structure provided by ethtool,
5453  *  containing all relevant information.
5454  *  @data_buf : user defined value to be written into Eeprom.
5455  *  Description: Reads the values stored in the Eeprom at given offset
5456  *  for a given length. Stores these values int the input argument data
5457  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5458  *  Return value:
5459  *  int  0 on success
5460  */
5461
5462 static int s2io_ethtool_geeprom(struct net_device *dev,
5463                          struct ethtool_eeprom *eeprom, u8 * data_buf)
5464 {
5465         u32 i, valid;
5466         u64 data;
5467         struct s2io_nic *sp = dev->priv;
5468
5469         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5470
5471         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5472                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5473
5474         for (i = 0; i < eeprom->len; i += 4) {
5475                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5476                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5477                         return -EFAULT;
5478                 }
5479                 valid = INV(data);
5480                 memcpy((data_buf + i), &valid, 4);
5481         }
5482         return 0;
5483 }
5484
5485 /**
5486  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5487  *  @sp : private member of the device structure, which is a pointer to the
5488  *  s2io_nic structure.
5489  *  @eeprom : pointer to the user level structure provided by ethtool,
5490  *  containing all relevant information.
5491  *  @data_buf ; user defined value to be written into Eeprom.
5492  *  Description:
5493  *  Tries to write the user provided value in the Eeprom, at the offset
5494  *  given by the user.
5495  *  Return value:
5496  *  0 on success, -EFAULT on failure.
5497  */
5498
5499 static int s2io_ethtool_seeprom(struct net_device *dev,
5500                                 struct ethtool_eeprom *eeprom,
5501                                 u8 * data_buf)
5502 {
5503         int len = eeprom->len, cnt = 0;
5504         u64 valid = 0, data;
5505         struct s2io_nic *sp = dev->priv;
5506
5507         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5508                 DBG_PRINT(ERR_DBG,
5509                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5510                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5511                           eeprom->magic);
5512                 return -EFAULT;
5513         }
5514
5515         while (len) {
5516                 data = (u32) data_buf[cnt] & 0x000000FF;
5517                 if (data) {
5518                         valid = (u32) (data << 24);
5519                 } else
5520                         valid = data;
5521
5522                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5523                         DBG_PRINT(ERR_DBG,
5524                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5525                         DBG_PRINT(ERR_DBG,
5526                                   "write into the specified offset\n");
5527                         return -EFAULT;
5528                 }
5529                 cnt++;
5530                 len--;
5531         }
5532
5533         return 0;
5534 }
5535
5536 /**
5537  * s2io_register_test - reads and writes into all clock domains.
5538  * @sp : private member of the device structure, which is a pointer to the
5539  * s2io_nic structure.
5540  * @data : variable that returns the result of each of the test conducted b
5541  * by the driver.
5542  * Description:
5543  * Read and write into all clock domains. The NIC has 3 clock domains,
5544  * see that registers in all the three regions are accessible.
5545  * Return value:
5546  * 0 on success.
5547  */
5548
5549 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5550 {
5551         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5552         u64 val64 = 0, exp_val;
5553         int fail = 0;
5554
5555         val64 = readq(&bar0->pif_rd_swapper_fb);
5556         if (val64 != 0x123456789abcdefULL) {
5557                 fail = 1;
5558                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5559         }
5560
5561         val64 = readq(&bar0->rmac_pause_cfg);
5562         if (val64 != 0xc000ffff00000000ULL) {
5563                 fail = 1;
5564                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5565         }
5566
5567         val64 = readq(&bar0->rx_queue_cfg);
5568         if (sp->device_type == XFRAME_II_DEVICE)
5569                 exp_val = 0x0404040404040404ULL;
5570         else
5571                 exp_val = 0x0808080808080808ULL;
5572         if (val64 != exp_val) {
5573                 fail = 1;
5574                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5575         }
5576
5577         val64 = readq(&bar0->xgxs_efifo_cfg);
5578         if (val64 != 0x000000001923141EULL) {
5579                 fail = 1;
5580                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5581         }
5582
5583         val64 = 0x5A5A5A5A5A5A5A5AULL;
5584         writeq(val64, &bar0->xmsi_data);
5585         val64 = readq(&bar0->xmsi_data);
5586         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5587                 fail = 1;
5588                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5589         }
5590
5591         val64 = 0xA5A5A5A5A5A5A5A5ULL;
5592         writeq(val64, &bar0->xmsi_data);
5593         val64 = readq(&bar0->xmsi_data);
5594         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5595                 fail = 1;
5596                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5597         }
5598
5599         *data = fail;
5600         return fail;
5601 }
5602
5603 /**
5604  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5605  * @sp : private member of the device structure, which is a pointer to the
5606  * s2io_nic structure.
5607  * @data:variable that returns the result of each of the test conducted by
5608  * the driver.
5609  * Description:
5610  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5611  * register.
5612  * Return value:
5613  * 0 on success.
5614  */
5615
5616 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5617 {
5618         int fail = 0;
5619         u64 ret_data, org_4F0, org_7F0;
5620         u8 saved_4F0 = 0, saved_7F0 = 0;
5621         struct net_device *dev = sp->dev;
5622
5623         /* Test Write Error at offset 0 */
5624         /* Note that SPI interface allows write access to all areas
5625          * of EEPROM. Hence doing all negative testing only for Xframe I.
5626          */
5627         if (sp->device_type == XFRAME_I_DEVICE)
5628                 if (!write_eeprom(sp, 0, 0, 3))
5629                         fail = 1;
5630
5631         /* Save current values at offsets 0x4F0 and 0x7F0 */
5632         if (!read_eeprom(sp, 0x4F0, &org_4F0))
5633                 saved_4F0 = 1;
5634         if (!read_eeprom(sp, 0x7F0, &org_7F0))
5635                 saved_7F0 = 1;
5636
5637         /* Test Write at offset 4f0 */
5638         if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5639                 fail = 1;
5640         if (read_eeprom(sp, 0x4F0, &ret_data))
5641                 fail = 1;
5642
5643         if (ret_data != 0x012345) {
5644                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5645                         "Data written %llx Data read %llx\n",
5646                         dev->name, (unsigned long long)0x12345,
5647                         (unsigned long long)ret_data);
5648                 fail = 1;
5649         }
5650
5651         /* Reset the EEPROM data go FFFF */
5652         write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5653
5654         /* Test Write Request Error at offset 0x7c */
5655         if (sp->device_type == XFRAME_I_DEVICE)
5656                 if (!write_eeprom(sp, 0x07C, 0, 3))
5657                         fail = 1;
5658
5659         /* Test Write Request at offset 0x7f0 */
5660         if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5661                 fail = 1;
5662         if (read_eeprom(sp, 0x7F0, &ret_data))
5663                 fail = 1;
5664
5665         if (ret_data != 0x012345) {
5666                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5667                         "Data written %llx Data read %llx\n",
5668                         dev->name, (unsigned long long)0x12345,
5669                         (unsigned long long)ret_data);
5670                 fail = 1;
5671         }
5672
5673         /* Reset the EEPROM data go FFFF */
5674         write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5675
5676         if (sp->device_type == XFRAME_I_DEVICE) {
5677                 /* Test Write Error at offset 0x80 */
5678                 if (!write_eeprom(sp, 0x080, 0, 3))
5679                         fail = 1;
5680
5681                 /* Test Write Error at offset 0xfc */
5682                 if (!write_eeprom(sp, 0x0FC, 0, 3))
5683                         fail = 1;
5684
5685                 /* Test Write Error at offset 0x100 */
5686                 if (!write_eeprom(sp, 0x100, 0, 3))
5687                         fail = 1;
5688
5689                 /* Test Write Error at offset 4ec */
5690                 if (!write_eeprom(sp, 0x4EC, 0, 3))
5691                         fail = 1;
5692         }
5693
5694         /* Restore values at offsets 0x4F0 and 0x7F0 */
5695         if (saved_4F0)
5696                 write_eeprom(sp, 0x4F0, org_4F0, 3);
5697         if (saved_7F0)
5698                 write_eeprom(sp, 0x7F0, org_7F0, 3);
5699
5700         *data = fail;
5701         return fail;
5702 }
5703
5704 /**
5705  * s2io_bist_test - invokes the MemBist test of the card .
5706  * @sp : private member of the device structure, which is a pointer to the
5707  * s2io_nic structure.
5708  * @data:variable that returns the result of each of the test conducted by
5709  * the driver.
5710  * Description:
5711  * This invokes the MemBist test of the card. We give around
5712  * 2 secs time for the Test to complete. If it's still not complete
5713  * within this peiod, we consider that the test failed.
5714  * Return value:
5715  * 0 on success and -1 on failure.
5716  */
5717
5718 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5719 {
5720         u8 bist = 0;
5721         int cnt = 0, ret = -1;
5722
5723         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5724         bist |= PCI_BIST_START;
5725         pci_write_config_word(sp->pdev, PCI_BIST, bist);
5726
5727         while (cnt < 20) {
5728                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5729                 if (!(bist & PCI_BIST_START)) {
5730                         *data = (bist & PCI_BIST_CODE_MASK);
5731                         ret = 0;
5732                         break;
5733                 }
5734                 msleep(100);
5735                 cnt++;
5736         }
5737
5738         return ret;
5739 }
5740
5741 /**
5742  * s2io-link_test - verifies the link state of the nic
5743  * @sp ; private member of the device structure, which is a pointer to the
5744  * s2io_nic structure.
5745  * @data: variable that returns the result of each of the test conducted by
5746  * the driver.
5747  * Description:
5748  * The function verifies the link state of the NIC and updates the input
5749  * argument 'data' appropriately.
5750  * Return value:
5751  * 0 on success.
5752  */
5753
5754 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5755 {
5756         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5757         u64 val64;
5758
5759         val64 = readq(&bar0->adapter_status);
5760         if(!(LINK_IS_UP(val64)))
5761                 *data = 1;
5762         else
5763                 *data = 0;
5764
5765         return *data;
5766 }
5767
5768 /**
5769  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5770  * @sp - private member of the device structure, which is a pointer to the
5771  * s2io_nic structure.
5772  * @data - variable that returns the result of each of the test
5773  * conducted by the driver.
5774  * Description:
5775  *  This is one of the offline test that tests the read and write
5776  *  access to the RldRam chip on the NIC.
5777  * Return value:
5778  *  0 on success.
5779  */
5780
5781 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5782 {
5783         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5784         u64 val64;
5785         int cnt, iteration = 0, test_fail = 0;
5786
5787         val64 = readq(&bar0->adapter_control);
5788         val64 &= ~ADAPTER_ECC_EN;
5789         writeq(val64, &bar0->adapter_control);
5790
5791         val64 = readq(&bar0->mc_rldram_test_ctrl);
5792         val64 |= MC_RLDRAM_TEST_MODE;
5793         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5794
5795         val64 = readq(&bar0->mc_rldram_mrs);
5796         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5797         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5798
5799         val64 |= MC_RLDRAM_MRS_ENABLE;
5800         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5801
5802         while (iteration < 2) {
5803                 val64 = 0x55555555aaaa0000ULL;
5804                 if (iteration == 1) {
5805                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5806                 }
5807                 writeq(val64, &bar0->mc_rldram_test_d0);
5808
5809                 val64 = 0xaaaa5a5555550000ULL;
5810                 if (iteration == 1) {
5811                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5812                 }
5813                 writeq(val64, &bar0->mc_rldram_test_d1);
5814
5815                 val64 = 0x55aaaaaaaa5a0000ULL;
5816                 if (iteration == 1) {
5817                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5818                 }
5819                 writeq(val64, &bar0->mc_rldram_test_d2);
5820
5821                 val64 = (u64) (0x0000003ffffe0100ULL);
5822                 writeq(val64, &bar0->mc_rldram_test_add);
5823
5824                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5825                         MC_RLDRAM_TEST_GO;
5826                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5827
5828                 for (cnt = 0; cnt < 5; cnt++) {
5829                         val64 = readq(&bar0->mc_rldram_test_ctrl);
5830                         if (val64 & MC_RLDRAM_TEST_DONE)
5831                                 break;
5832                         msleep(200);
5833                 }
5834
5835                 if (cnt == 5)
5836                         break;
5837
5838                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5839                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5840
5841                 for (cnt = 0; cnt < 5; cnt++) {
5842                         val64 = readq(&bar0->mc_rldram_test_ctrl);
5843                         if (val64 & MC_RLDRAM_TEST_DONE)
5844                                 break;
5845                         msleep(500);
5846                 }
5847
5848                 if (cnt == 5)
5849                         break;
5850
5851                 val64 = readq(&bar0->mc_rldram_test_ctrl);
5852                 if (!(val64 & MC_RLDRAM_TEST_PASS))
5853                         test_fail = 1;
5854
5855                 iteration++;
5856         }
5857
5858         *data = test_fail;
5859
5860         /* Bring the adapter out of test mode */
5861         SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5862
5863         return test_fail;
5864 }
5865
5866 /**
5867  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5868  *  @sp : private member of the device structure, which is a pointer to the
5869  *  s2io_nic structure.
5870  *  @ethtest : pointer to a ethtool command specific structure that will be
5871  *  returned to the user.
5872  *  @data : variable that returns the result of each of the test
5873  * conducted by the driver.
5874  * Description:
5875  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
5876  *  the health of the card.
5877  * Return value:
5878  *  void
5879  */
5880
5881 static void s2io_ethtool_test(struct net_device *dev,
5882                               struct ethtool_test *ethtest,
5883                               uint64_t * data)
5884 {
5885         struct s2io_nic *sp = dev->priv;
5886         int orig_state = netif_running(sp->dev);
5887
5888         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5889                 /* Offline Tests. */
5890                 if (orig_state)
5891                         s2io_close(sp->dev);
5892
5893                 if (s2io_register_test(sp, &data[0]))
5894                         ethtest->flags |= ETH_TEST_FL_FAILED;
5895
5896                 s2io_reset(sp);
5897
5898                 if (s2io_rldram_test(sp, &data[3]))
5899                         ethtest->flags |= ETH_TEST_FL_FAILED;
5900
5901                 s2io_reset(sp);
5902
5903                 if (s2io_eeprom_test(sp, &data[1]))
5904                         ethtest->flags |= ETH_TEST_FL_FAILED;
5905
5906                 if (s2io_bist_test(sp, &data[4]))
5907                         ethtest->flags |= ETH_TEST_FL_FAILED;
5908
5909                 if (orig_state)
5910                         s2io_open(sp->dev);
5911
5912                 data[2] = 0;
5913         } else {
5914                 /* Online Tests. */
5915                 if (!orig_state) {
5916                         DBG_PRINT(ERR_DBG,
5917                                   "%s: is not up, cannot run test\n",
5918                                   dev->name);
5919                         data[0] = -1;
5920                         data[1] = -1;
5921                         data[2] = -1;
5922                         data[3] = -1;
5923                         data[4] = -1;
5924                 }
5925
5926                 if (s2io_link_test(sp, &data[2]))
5927                         ethtest->flags |= ETH_TEST_FL_FAILED;
5928
5929                 data[0] = 0;
5930                 data[1] = 0;
5931                 data[3] = 0;
5932                 data[4] = 0;
5933         }
5934 }
5935
5936 static void s2io_get_ethtool_stats(struct net_device *dev,
5937                                    struct ethtool_stats *estats,
5938                                    u64 * tmp_stats)
5939 {
5940         int i = 0, k;
5941         struct s2io_nic *sp = dev->priv;
5942         struct stat_block *stat_info = sp->mac_control.stats_info;
5943
5944         s2io_updt_stats(sp);
5945         tmp_stats[i++] =
5946                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
5947                 le32_to_cpu(stat_info->tmac_frms);
5948         tmp_stats[i++] =
5949                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5950                 le32_to_cpu(stat_info->tmac_data_octets);
5951         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5952         tmp_stats[i++] =
5953                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5954                 le32_to_cpu(stat_info->tmac_mcst_frms);
5955         tmp_stats[i++] =
5956                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5957                 le32_to_cpu(stat_info->tmac_bcst_frms);
5958         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5959         tmp_stats[i++] =
5960                 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5961                 le32_to_cpu(stat_info->tmac_ttl_octets);
5962         tmp_stats[i++] =
5963                 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5964                 le32_to_cpu(stat_info->tmac_ucst_frms);
5965         tmp_stats[i++] =
5966                 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5967                 le32_to_cpu(stat_info->tmac_nucst_frms);
5968         tmp_stats[i++] =
5969                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5970                 le32_to_cpu(stat_info->tmac_any_err_frms);
5971         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5972         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5973         tmp_stats[i++] =
5974                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5975                 le32_to_cpu(stat_info->tmac_vld_ip);
5976         tmp_stats[i++] =
5977                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5978                 le32_to_cpu(stat_info->tmac_drop_ip);
5979         tmp_stats[i++] =
5980                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5981                 le32_to_cpu(stat_info->tmac_icmp);
5982         tmp_stats[i++] =
5983                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5984                 le32_to_cpu(stat_info->tmac_rst_tcp);
5985         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5986         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5987                 le32_to_cpu(stat_info->tmac_udp);
5988         tmp_stats[i++] =
5989                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5990                 le32_to_cpu(stat_info->rmac_vld_frms);
5991         tmp_stats[i++] =
5992                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5993                 le32_to_cpu(stat_info->rmac_data_octets);
5994         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5995         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5996         tmp_stats[i++] =
5997                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5998                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5999         tmp_stats[i++] =
6000                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6001                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
6002         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
6003         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
6004         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6005         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
6006         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6007         tmp_stats[i++] =
6008                 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6009                 le32_to_cpu(stat_info->rmac_ttl_octets);
6010         tmp_stats[i++] =
6011                 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6012                 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6013         tmp_stats[i++] =
6014                 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6015                  << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
6016         tmp_stats[i++] =
6017                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6018                 le32_to_cpu(stat_info->rmac_discarded_frms);
6019         tmp_stats[i++] =
6020                 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6021                  << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6022         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6023         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
6024         tmp_stats[i++] =
6025                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6026                 le32_to_cpu(stat_info->rmac_usized_frms);
6027         tmp_stats[i++] =
6028                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6029                 le32_to_cpu(stat_info->rmac_osized_frms);
6030         tmp_stats[i++] =
6031                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6032                 le32_to_cpu(stat_info->rmac_frag_frms);
6033         tmp_stats[i++] =
6034                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6035                 le32_to_cpu(stat_info->rmac_jabber_frms);
6036         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6037         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6038         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6039         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6040         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6041         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6042         tmp_stats[i++] =
6043                 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6044                 le32_to_cpu(stat_info->rmac_ip);
6045         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6046         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6047         tmp_stats[i++] =
6048                 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6049                 le32_to_cpu(stat_info->rmac_drop_ip);
6050         tmp_stats[i++] =
6051                 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6052                 le32_to_cpu(stat_info->rmac_icmp);
6053         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6054         tmp_stats[i++] =
6055                 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6056                 le32_to_cpu(stat_info->rmac_udp);
6057         tmp_stats[i++] =
6058                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6059                 le32_to_cpu(stat_info->rmac_err_drp_udp);
6060         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6061         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6062         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6063         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6064         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6065         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6066         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6067         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6068         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6069         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6070         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6071         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6072         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6073         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6074         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6075         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6076         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6077         tmp_stats[i++] =
6078                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6079                 le32_to_cpu(stat_info->rmac_pause_cnt);
6080         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6081         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6082         tmp_stats[i++] =
6083                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6084                 le32_to_cpu(stat_info->rmac_accepted_ip);
6085         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6086         tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6087         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6088         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6089         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6090         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6091         tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6092         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6093         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6094         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6095         tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6096         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6097         tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6098         tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6099         tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6100         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6101         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6102         tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6103         tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6104
6105         /* Enhanced statistics exist only for Hercules */
6106         if(sp->device_type == XFRAME_II_DEVICE) {
6107                 tmp_stats[i++] =
6108                                 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6109                 tmp_stats[i++] =
6110                                 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6111                 tmp_stats[i++] =
6112                                 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6113                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6114                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6115                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6116                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6117                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6118                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6119                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6120                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6121                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6122                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6123                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6124                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6125                 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6126         }
6127
6128         tmp_stats[i++] = 0;
6129         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6130         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6131         tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6132         tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6133         tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6134         tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6135         for (k = 0; k < MAX_RX_RINGS; k++)
6136                 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6137         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6138         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6139         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6140         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6141         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6142         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6143         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6144         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6145         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6146         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6147         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6148         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6149         tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6150         tmp_stats[i++] = stat_info->sw_stat.sending_both;
6151         tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6152         tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6153         if (stat_info->sw_stat.num_aggregations) {
6154                 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6155                 int count = 0;
6156                 /*
6157                  * Since 64-bit divide does not work on all platforms,
6158                  * do repeated subtraction.
6159                  */
6160                 while (tmp >= stat_info->sw_stat.num_aggregations) {
6161                         tmp -= stat_info->sw_stat.num_aggregations;
6162                         count++;
6163                 }
6164                 tmp_stats[i++] = count;
6165         }
6166         else
6167                 tmp_stats[i++] = 0;
6168         tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6169         tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6170         tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6171         tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6172         tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6173         tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6174         tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6175         tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6176         tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6177
6178         tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6179         tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6180         tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6181         tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6182         tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6183
6184         tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6185         tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6186         tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6187         tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6188         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6189         tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6190         tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6191         tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6192         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6193         tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6194         tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6195         tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6196         tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6197         tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6198         tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6199         tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6200         tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6201         tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6202         tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6203         tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6204         tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6205         tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6206         tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6207         tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6208         tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6209         tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6210 }
6211
6212 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6213 {
6214         return (XENA_REG_SPACE);
6215 }
6216
6217
6218 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6219 {
6220         struct s2io_nic *sp = dev->priv;
6221
6222         return (sp->rx_csum);
6223 }
6224
6225 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6226 {
6227         struct s2io_nic *sp = dev->priv;
6228
6229         if (data)
6230                 sp->rx_csum = 1;
6231         else
6232                 sp->rx_csum = 0;
6233
6234         return 0;
6235 }
6236
6237 static int s2io_get_eeprom_len(struct net_device *dev)
6238 {
6239         return (XENA_EEPROM_SPACE);
6240 }
6241
6242 static int s2io_get_sset_count(struct net_device *dev, int sset)
6243 {
6244         struct s2io_nic *sp = dev->priv;
6245
6246         switch (sset) {
6247         case ETH_SS_TEST:
6248                 return S2IO_TEST_LEN;
6249         case ETH_SS_STATS:
6250                 switch(sp->device_type) {
6251                 case XFRAME_I_DEVICE:
6252                         return XFRAME_I_STAT_LEN;
6253                 case XFRAME_II_DEVICE:
6254                         return XFRAME_II_STAT_LEN;
6255                 default:
6256                         return 0;
6257                 }
6258         default:
6259                 return -EOPNOTSUPP;
6260         }
6261 }
6262
6263 static void s2io_ethtool_get_strings(struct net_device *dev,
6264                                      u32 stringset, u8 * data)
6265 {
6266         int stat_size = 0;
6267         struct s2io_nic *sp = dev->priv;
6268
6269         switch (stringset) {
6270         case ETH_SS_TEST:
6271                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6272                 break;
6273         case ETH_SS_STATS:
6274                 stat_size = sizeof(ethtool_xena_stats_keys);
6275                 memcpy(data, &ethtool_xena_stats_keys,stat_size);
6276                 if(sp->device_type == XFRAME_II_DEVICE) {
6277                         memcpy(data + stat_size,
6278                                 &ethtool_enhanced_stats_keys,
6279                                 sizeof(ethtool_enhanced_stats_keys));
6280                         stat_size += sizeof(ethtool_enhanced_stats_keys);
6281                 }
6282
6283                 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6284                         sizeof(ethtool_driver_stats_keys));
6285         }
6286 }
6287
6288 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6289 {
6290         if (data)
6291                 dev->features |= NETIF_F_IP_CSUM;
6292         else
6293                 dev->features &= ~NETIF_F_IP_CSUM;
6294
6295         return 0;
6296 }
6297
6298 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6299 {
6300         return (dev->features & NETIF_F_TSO) != 0;
6301 }
6302 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6303 {
6304         if (data)
6305                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6306         else
6307                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6308
6309         return 0;
6310 }
6311
6312 static const struct ethtool_ops netdev_ethtool_ops = {
6313         .get_settings = s2io_ethtool_gset,
6314         .set_settings = s2io_ethtool_sset,
6315         .get_drvinfo = s2io_ethtool_gdrvinfo,
6316         .get_regs_len = s2io_ethtool_get_regs_len,
6317         .get_regs = s2io_ethtool_gregs,
6318         .get_link = ethtool_op_get_link,
6319         .get_eeprom_len = s2io_get_eeprom_len,
6320         .get_eeprom = s2io_ethtool_geeprom,
6321         .set_eeprom = s2io_ethtool_seeprom,
6322         .get_ringparam = s2io_ethtool_gringparam,
6323         .get_pauseparam = s2io_ethtool_getpause_data,
6324         .set_pauseparam = s2io_ethtool_setpause_data,
6325         .get_rx_csum = s2io_ethtool_get_rx_csum,
6326         .set_rx_csum = s2io_ethtool_set_rx_csum,
6327         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6328         .set_sg = ethtool_op_set_sg,
6329         .get_tso = s2io_ethtool_op_get_tso,
6330         .set_tso = s2io_ethtool_op_set_tso,
6331         .set_ufo = ethtool_op_set_ufo,
6332         .self_test = s2io_ethtool_test,
6333         .get_strings = s2io_ethtool_get_strings,
6334         .phys_id = s2io_ethtool_idnic,
6335         .get_ethtool_stats = s2io_get_ethtool_stats,
6336         .get_sset_count = s2io_get_sset_count,
6337 };
6338
6339 /**
6340  *  s2io_ioctl - Entry point for the Ioctl
6341  *  @dev :  Device pointer.
6342  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6343  *  a proprietary structure used to pass information to the driver.
6344  *  @cmd :  This is used to distinguish between the different commands that
6345  *  can be passed to the IOCTL functions.
6346  *  Description:
6347  *  Currently there are no special functionality supported in IOCTL, hence
6348  *  function always return EOPNOTSUPPORTED
6349  */
6350
6351 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6352 {
6353         return -EOPNOTSUPP;
6354 }
6355
6356 /**
6357  *  s2io_change_mtu - entry point to change MTU size for the device.
6358  *   @dev : device pointer.
6359  *   @new_mtu : the new MTU size for the device.
6360  *   Description: A driver entry point to change MTU size for the device.
6361  *   Before changing the MTU the device must be stopped.
6362  *  Return value:
6363  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6364  *   file on failure.
6365  */
6366
6367 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6368 {
6369         struct s2io_nic *sp = dev->priv;
6370         int ret = 0;
6371
6372         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6373                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6374                           dev->name);
6375                 return -EPERM;
6376         }
6377
6378         dev->mtu = new_mtu;
6379         if (netif_running(dev)) {
6380                 s2io_card_down(sp);
6381                 netif_stop_queue(dev);
6382                 ret = s2io_card_up(sp);
6383                 if (ret) {
6384                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6385                                   __FUNCTION__);
6386                         return ret;
6387                 }
6388                 if (netif_queue_stopped(dev))
6389                         netif_wake_queue(dev);
6390         } else { /* Device is down */
6391                 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6392                 u64 val64 = new_mtu;
6393
6394                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6395         }
6396
6397         return ret;
6398 }
6399
6400 /**
6401  *  s2io_tasklet - Bottom half of the ISR.
6402  *  @dev_adr : address of the device structure in dma_addr_t format.
6403  *  Description:
6404  *  This is the tasklet or the bottom half of the ISR. This is
6405  *  an extension of the ISR which is scheduled by the scheduler to be run
6406  *  when the load on the CPU is low. All low priority tasks of the ISR can
6407  *  be pushed into the tasklet. For now the tasklet is used only to
6408  *  replenish the Rx buffers in the Rx buffer descriptors.
6409  *  Return value:
6410  *  void.
6411  */
6412
6413 static void s2io_tasklet(unsigned long dev_addr)
6414 {
6415         struct net_device *dev = (struct net_device *) dev_addr;
6416         struct s2io_nic *sp = dev->priv;
6417         int i, ret;
6418         struct mac_info *mac_control;
6419         struct config_param *config;
6420
6421         mac_control = &sp->mac_control;
6422         config = &sp->config;
6423
6424         if (!TASKLET_IN_USE) {
6425                 for (i = 0; i < config->rx_ring_num; i++) {
6426                         ret = fill_rx_buffers(sp, i);
6427                         if (ret == -ENOMEM) {
6428                                 DBG_PRINT(INFO_DBG, "%s: Out of ",
6429                                           dev->name);
6430                                 DBG_PRINT(INFO_DBG, "memory in tasklet\n");
6431                                 break;
6432                         } else if (ret == -EFILL) {
6433                                 DBG_PRINT(INFO_DBG,
6434                                           "%s: Rx Ring %d is full\n",
6435                                           dev->name, i);
6436                                 break;
6437                         }
6438                 }
6439                 clear_bit(0, (&sp->tasklet_status));
6440         }
6441 }
6442
6443 /**
6444  * s2io_set_link - Set the LInk status
6445  * @data: long pointer to device private structue
6446  * Description: Sets the link status for the adapter
6447  */
6448
6449 static void s2io_set_link(struct work_struct *work)
6450 {
6451         struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6452         struct net_device *dev = nic->dev;
6453         struct XENA_dev_config __iomem *bar0 = nic->bar0;
6454         register u64 val64;
6455         u16 subid;
6456
6457         rtnl_lock();
6458
6459         if (!netif_running(dev))
6460                 goto out_unlock;
6461
6462         if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6463                 /* The card is being reset, no point doing anything */
6464                 goto out_unlock;
6465         }
6466
6467         subid = nic->pdev->subsystem_device;
6468         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6469                 /*
6470                  * Allow a small delay for the NICs self initiated
6471                  * cleanup to complete.
6472                  */
6473                 msleep(100);
6474         }
6475
6476         val64 = readq(&bar0->adapter_status);
6477         if (LINK_IS_UP(val64)) {
6478                 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6479                         if (verify_xena_quiescence(nic)) {
6480                                 val64 = readq(&bar0->adapter_control);
6481                                 val64 |= ADAPTER_CNTL_EN;
6482                                 writeq(val64, &bar0->adapter_control);
6483                                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6484                                         nic->device_type, subid)) {
6485                                         val64 = readq(&bar0->gpio_control);
6486                                         val64 |= GPIO_CTRL_GPIO_0;
6487                                         writeq(val64, &bar0->gpio_control);
6488                                         val64 = readq(&bar0->gpio_control);
6489                                 } else {
6490                                         val64 |= ADAPTER_LED_ON;
6491                                         writeq(val64, &bar0->adapter_control);
6492                                 }
6493                                 nic->device_enabled_once = TRUE;
6494                         } else {
6495                                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6496                                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6497                                 netif_stop_queue(dev);
6498                         }
6499                 }
6500                 val64 = readq(&bar0->adapter_control);
6501                 val64 |= ADAPTER_LED_ON;
6502                 writeq(val64, &bar0->adapter_control);
6503                 s2io_link(nic, LINK_UP);
6504         } else {
6505                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6506                                                       subid)) {
6507                         val64 = readq(&bar0->gpio_control);
6508                         val64 &= ~GPIO_CTRL_GPIO_0;
6509                         writeq(val64, &bar0->gpio_control);
6510                         val64 = readq(&bar0->gpio_control);
6511                 }
6512                 /* turn off LED */
6513                 val64 = readq(&bar0->adapter_control);
6514                 val64 = val64 &(~ADAPTER_LED_ON);
6515                 writeq(val64, &bar0->adapter_control);
6516                 s2io_link(nic, LINK_DOWN);
6517         }
6518         clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6519
6520 out_unlock:
6521         rtnl_unlock();
6522 }
6523
6524 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6525                                 struct buffAdd *ba,
6526                                 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6527                                 u64 *temp2, int size)
6528 {
6529         struct net_device *dev = sp->dev;
6530         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6531
6532         if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6533                 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6534                 /* allocate skb */
6535                 if (*skb) {
6536                         DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6537                         /*
6538                          * As Rx frame are not going to be processed,
6539                          * using same mapped address for the Rxd
6540                          * buffer pointer
6541                          */
6542                         rxdp1->Buffer0_ptr = *temp0;
6543                 } else {
6544                         *skb = dev_alloc_skb(size);
6545                         if (!(*skb)) {
6546                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6547                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6548                                 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6549                                 sp->mac_control.stats_info->sw_stat. \
6550                                         mem_alloc_fail_cnt++;
6551                                 return -ENOMEM ;
6552                         }
6553                         sp->mac_control.stats_info->sw_stat.mem_allocated
6554                                 += (*skb)->truesize;
6555                         /* storing the mapped addr in a temp variable
6556                          * such it will be used for next rxd whose
6557                          * Host Control is NULL
6558                          */
6559                         rxdp1->Buffer0_ptr = *temp0 =
6560                                 pci_map_single( sp->pdev, (*skb)->data,
6561                                         size - NET_IP_ALIGN,
6562                                         PCI_DMA_FROMDEVICE);
6563                         if( (rxdp1->Buffer0_ptr == 0) ||
6564                                 (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
6565                                 goto memalloc_failed;
6566                         }
6567                         rxdp->Host_Control = (unsigned long) (*skb);
6568                 }
6569         } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6570                 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6571                 /* Two buffer Mode */
6572                 if (*skb) {
6573                         rxdp3->Buffer2_ptr = *temp2;
6574                         rxdp3->Buffer0_ptr = *temp0;
6575                         rxdp3->Buffer1_ptr = *temp1;
6576                 } else {
6577                         *skb = dev_alloc_skb(size);
6578                         if (!(*skb)) {
6579                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6580                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6581                                 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6582                                 sp->mac_control.stats_info->sw_stat. \
6583                                         mem_alloc_fail_cnt++;
6584                                 return -ENOMEM;
6585                         }
6586                         sp->mac_control.stats_info->sw_stat.mem_allocated
6587                                 += (*skb)->truesize;
6588                         rxdp3->Buffer2_ptr = *temp2 =
6589                                 pci_map_single(sp->pdev, (*skb)->data,
6590                                                dev->mtu + 4,
6591                                                PCI_DMA_FROMDEVICE);
6592                         if( (rxdp3->Buffer2_ptr == 0) ||
6593                                 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
6594                                 goto memalloc_failed;
6595                         }
6596                         rxdp3->Buffer0_ptr = *temp0 =
6597                                 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6598                                                 PCI_DMA_FROMDEVICE);
6599                         if( (rxdp3->Buffer0_ptr == 0) ||
6600                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
6601                                 pci_unmap_single (sp->pdev,
6602                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6603                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6604                                 goto memalloc_failed;
6605                         }
6606                         rxdp->Host_Control = (unsigned long) (*skb);
6607
6608                         /* Buffer-1 will be dummy buffer not used */
6609                         rxdp3->Buffer1_ptr = *temp1 =
6610                                 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6611                                                 PCI_DMA_FROMDEVICE);
6612                         if( (rxdp3->Buffer1_ptr == 0) ||
6613                                 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
6614                                 pci_unmap_single (sp->pdev,
6615                                         (dma_addr_t)rxdp3->Buffer0_ptr,
6616                                         BUF0_LEN, PCI_DMA_FROMDEVICE);
6617                                 pci_unmap_single (sp->pdev,
6618                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6619                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6620                                 goto memalloc_failed;
6621                         }
6622                 }
6623         }
6624         return 0;
6625         memalloc_failed:
6626                 stats->pci_map_fail_cnt++;
6627                 stats->mem_freed += (*skb)->truesize;
6628                 dev_kfree_skb(*skb);
6629                 return -ENOMEM;
6630 }
6631
6632 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6633                                 int size)
6634 {
6635         struct net_device *dev = sp->dev;
6636         if (sp->rxd_mode == RXD_MODE_1) {
6637                 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6638         } else if (sp->rxd_mode == RXD_MODE_3B) {
6639                 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6640                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6641                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6642         }
6643 }
6644
6645 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6646 {
6647         int i, j, k, blk_cnt = 0, size;
6648         struct mac_info * mac_control = &sp->mac_control;
6649         struct config_param *config = &sp->config;
6650         struct net_device *dev = sp->dev;
6651         struct RxD_t *rxdp = NULL;
6652         struct sk_buff *skb = NULL;
6653         struct buffAdd *ba = NULL;
6654         u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6655
6656         /* Calculate the size based on ring mode */
6657         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6658                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6659         if (sp->rxd_mode == RXD_MODE_1)
6660                 size += NET_IP_ALIGN;
6661         else if (sp->rxd_mode == RXD_MODE_3B)
6662                 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6663
6664         for (i = 0; i < config->rx_ring_num; i++) {
6665                 blk_cnt = config->rx_cfg[i].num_rxd /
6666                         (rxd_count[sp->rxd_mode] +1);
6667
6668                 for (j = 0; j < blk_cnt; j++) {
6669                         for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6670                                 rxdp = mac_control->rings[i].
6671                                         rx_blocks[j].rxds[k].virt_addr;
6672                                 if(sp->rxd_mode == RXD_MODE_3B)
6673                                         ba = &mac_control->rings[i].ba[j][k];
6674                                 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6675                                                        &skb,(u64 *)&temp0_64,
6676                                                        (u64 *)&temp1_64,
6677                                                        (u64 *)&temp2_64,
6678                                                         size) == ENOMEM) {
6679                                         return 0;
6680                                 }
6681
6682                                 set_rxd_buffer_size(sp, rxdp, size);
6683                                 wmb();
6684                                 /* flip the Ownership bit to Hardware */
6685                                 rxdp->Control_1 |= RXD_OWN_XENA;
6686                         }
6687                 }
6688         }
6689         return 0;
6690
6691 }
6692
6693 static int s2io_add_isr(struct s2io_nic * sp)
6694 {
6695         int ret = 0;
6696         struct net_device *dev = sp->dev;
6697         int err = 0;
6698
6699         if (sp->config.intr_type == MSI_X)
6700                 ret = s2io_enable_msi_x(sp);
6701         if (ret) {
6702                 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6703                 sp->config.intr_type = INTA;
6704         }
6705
6706         /* Store the values of the MSIX table in the struct s2io_nic structure */
6707         store_xmsi_data(sp);
6708
6709         /* After proper initialization of H/W, register ISR */
6710         if (sp->config.intr_type == MSI_X) {
6711                 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6712
6713                 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6714                         if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6715                                 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6716                                         dev->name, i);
6717                                 err = request_irq(sp->entries[i].vector,
6718                                           s2io_msix_fifo_handle, 0, sp->desc[i],
6719                                                   sp->s2io_entries[i].arg);
6720                                 /* If either data or addr is zero print it */
6721                                 if(!(sp->msix_info[i].addr &&
6722                                         sp->msix_info[i].data)) {
6723                                         DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6724                                                 "Data:0x%lx\n",sp->desc[i],
6725                                                 (unsigned long long)
6726                                                 sp->msix_info[i].addr,
6727                                                 (unsigned long)
6728                                                 ntohl(sp->msix_info[i].data));
6729                                 } else {
6730                                         msix_tx_cnt++;
6731                                 }
6732                         } else {
6733                                 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6734                                         dev->name, i);
6735                                 err = request_irq(sp->entries[i].vector,
6736                                           s2io_msix_ring_handle, 0, sp->desc[i],
6737                                                   sp->s2io_entries[i].arg);
6738                                 /* If either data or addr is zero print it */
6739                                 if(!(sp->msix_info[i].addr &&
6740                                         sp->msix_info[i].data)) {
6741                                         DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6742                                                 "Data:0x%lx\n",sp->desc[i],
6743                                                 (unsigned long long)
6744                                                 sp->msix_info[i].addr,
6745                                                 (unsigned long)
6746                                                 ntohl(sp->msix_info[i].data));
6747                                 } else {
6748                                         msix_rx_cnt++;
6749                                 }
6750                         }
6751                         if (err) {
6752                                 remove_msix_isr(sp);
6753                                 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6754                                           "failed\n", dev->name, i);
6755                                 DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n",
6756                                                  dev->name);
6757                                 sp->config.intr_type = INTA;
6758                                 break;
6759                         }
6760                         sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6761                 }
6762                 if (!err) {
6763                         printk(KERN_INFO "MSI-X-TX %d entries enabled\n",
6764                                 msix_tx_cnt);
6765                         printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
6766                                 msix_rx_cnt);
6767                 }
6768         }
6769         if (sp->config.intr_type == INTA) {
6770                 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6771                                 sp->name, dev);
6772                 if (err) {
6773                         DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6774                                   dev->name);
6775                         return -1;
6776                 }
6777         }
6778         return 0;
6779 }
6780 static void s2io_rem_isr(struct s2io_nic * sp)
6781 {
6782         if (sp->config.intr_type == MSI_X)
6783                 remove_msix_isr(sp);
6784         else
6785                 remove_inta_isr(sp);
6786 }
6787
6788 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
6789 {
6790         int cnt = 0;
6791         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6792         unsigned long flags;
6793         register u64 val64 = 0;
6794         struct config_param *config;
6795         config = &sp->config;
6796
6797         if (!is_s2io_card_up(sp))
6798                 return;
6799
6800         del_timer_sync(&sp->alarm_timer);
6801         /* If s2io_set_link task is executing, wait till it completes. */
6802         while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
6803                 msleep(50);
6804         }
6805         clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
6806
6807         /* Disable napi */
6808         if (config->napi)
6809                 napi_disable(&sp->napi);
6810
6811         /* disable Tx and Rx traffic on the NIC */
6812         if (do_io)
6813                 stop_nic(sp);
6814
6815         s2io_rem_isr(sp);
6816
6817         /* Kill tasklet. */
6818         tasklet_kill(&sp->task);
6819
6820         /* Check if the device is Quiescent and then Reset the NIC */
6821         while(do_io) {
6822                 /* As per the HW requirement we need to replenish the
6823                  * receive buffer to avoid the ring bump. Since there is
6824                  * no intention of processing the Rx frame at this pointwe are
6825                  * just settting the ownership bit of rxd in Each Rx
6826                  * ring to HW and set the appropriate buffer size
6827                  * based on the ring mode
6828                  */
6829                 rxd_owner_bit_reset(sp);
6830
6831                 val64 = readq(&bar0->adapter_status);
6832                 if (verify_xena_quiescence(sp)) {
6833                         if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6834                         break;
6835                 }
6836
6837                 msleep(50);
6838                 cnt++;
6839                 if (cnt == 10) {
6840                         DBG_PRINT(ERR_DBG,
6841                                   "s2io_close:Device not Quiescent ");
6842                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6843                                   (unsigned long long) val64);
6844                         break;
6845                 }
6846         }
6847         if (do_io)
6848                 s2io_reset(sp);
6849
6850         spin_lock_irqsave(&sp->tx_lock, flags);
6851         /* Free all Tx buffers */
6852         free_tx_buffers(sp);
6853         spin_unlock_irqrestore(&sp->tx_lock, flags);
6854
6855         /* Free all Rx buffers */
6856         spin_lock_irqsave(&sp->rx_lock, flags);
6857         free_rx_buffers(sp);
6858         spin_unlock_irqrestore(&sp->rx_lock, flags);
6859
6860         clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
6861 }
6862
6863 static void s2io_card_down(struct s2io_nic * sp)
6864 {
6865         do_s2io_card_down(sp, 1);
6866 }
6867
6868 static int s2io_card_up(struct s2io_nic * sp)
6869 {
6870         int i, ret = 0;
6871         struct mac_info *mac_control;
6872         struct config_param *config;
6873         struct net_device *dev = (struct net_device *) sp->dev;
6874         u16 interruptible;
6875
6876         /* Initialize the H/W I/O registers */
6877         ret = init_nic(sp);
6878         if (ret != 0) {
6879                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6880                           dev->name);
6881                 if (ret != -EIO)
6882                         s2io_reset(sp);
6883                 return ret;
6884         }
6885
6886         /*
6887          * Initializing the Rx buffers. For now we are considering only 1
6888          * Rx ring and initializing buffers into 30 Rx blocks
6889          */
6890         mac_control = &sp->mac_control;
6891         config = &sp->config;
6892
6893         for (i = 0; i < config->rx_ring_num; i++) {
6894                 if ((ret = fill_rx_buffers(sp, i))) {
6895                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6896                                   dev->name);
6897                         s2io_reset(sp);
6898                         free_rx_buffers(sp);
6899                         return -ENOMEM;
6900                 }
6901                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6902                           atomic_read(&sp->rx_bufs_left[i]));
6903         }
6904
6905         /* Initialise napi */
6906         if (config->napi)
6907                 napi_enable(&sp->napi);
6908
6909         /* Maintain the state prior to the open */
6910         if (sp->promisc_flg)
6911                 sp->promisc_flg = 0;
6912         if (sp->m_cast_flg) {
6913                 sp->m_cast_flg = 0;
6914                 sp->all_multi_pos= 0;
6915         }
6916
6917         /* Setting its receive mode */
6918         s2io_set_multicast(dev);
6919
6920         if (sp->lro) {
6921                 /* Initialize max aggregatable pkts per session based on MTU */
6922                 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6923                 /* Check if we can use(if specified) user provided value */
6924                 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6925                         sp->lro_max_aggr_per_sess = lro_max_pkts;
6926         }
6927
6928         /* Enable Rx Traffic and interrupts on the NIC */
6929         if (start_nic(sp)) {
6930                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6931                 s2io_reset(sp);
6932                 free_rx_buffers(sp);
6933                 return -ENODEV;
6934         }
6935
6936         /* Add interrupt service routine */
6937         if (s2io_add_isr(sp) != 0) {
6938                 if (sp->config.intr_type == MSI_X)
6939                         s2io_rem_isr(sp);
6940                 s2io_reset(sp);
6941                 free_rx_buffers(sp);
6942                 return -ENODEV;
6943         }
6944
6945         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6946
6947         /* Enable tasklet for the device */
6948         tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6949
6950         /*  Enable select interrupts */
6951         en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
6952         if (sp->config.intr_type != INTA)
6953                 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6954         else {
6955                 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6956                 interruptible |= TX_PIC_INTR;
6957                 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6958         }
6959
6960         set_bit(__S2IO_STATE_CARD_UP, &sp->state);
6961         return 0;
6962 }
6963
6964 /**
6965  * s2io_restart_nic - Resets the NIC.
6966  * @data : long pointer to the device private structure
6967  * Description:
6968  * This function is scheduled to be run by the s2io_tx_watchdog
6969  * function after 0.5 secs to reset the NIC. The idea is to reduce
6970  * the run time of the watch dog routine which is run holding a
6971  * spin lock.
6972  */
6973
6974 static void s2io_restart_nic(struct work_struct *work)
6975 {
6976         struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6977         struct net_device *dev = sp->dev;
6978
6979         rtnl_lock();
6980
6981         if (!netif_running(dev))
6982                 goto out_unlock;
6983
6984         s2io_card_down(sp);
6985         if (s2io_card_up(sp)) {
6986                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6987                           dev->name);
6988         }
6989         netif_wake_queue(dev);
6990         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6991                   dev->name);
6992 out_unlock:
6993         rtnl_unlock();
6994 }
6995
6996 /**
6997  *  s2io_tx_watchdog - Watchdog for transmit side.
6998  *  @dev : Pointer to net device structure
6999  *  Description:
7000  *  This function is triggered if the Tx Queue is stopped
7001  *  for a pre-defined amount of time when the Interface is still up.
7002  *  If the Interface is jammed in such a situation, the hardware is
7003  *  reset (by s2io_close) and restarted again (by s2io_open) to
7004  *  overcome any problem that might have been caused in the hardware.
7005  *  Return value:
7006  *  void
7007  */
7008
7009 static void s2io_tx_watchdog(struct net_device *dev)
7010 {
7011         struct s2io_nic *sp = dev->priv;
7012
7013         if (netif_carrier_ok(dev)) {
7014                 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
7015                 schedule_work(&sp->rst_timer_task);
7016                 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
7017         }
7018 }
7019
7020 /**
7021  *   rx_osm_handler - To perform some OS related operations on SKB.
7022  *   @sp: private member of the device structure,pointer to s2io_nic structure.
7023  *   @skb : the socket buffer pointer.
7024  *   @len : length of the packet
7025  *   @cksum : FCS checksum of the frame.
7026  *   @ring_no : the ring from which this RxD was extracted.
7027  *   Description:
7028  *   This function is called by the Rx interrupt serivce routine to perform
7029  *   some OS related operations on the SKB before passing it to the upper
7030  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7031  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7032  *   to the upper layer. If the checksum is wrong, it increments the Rx
7033  *   packet error count, frees the SKB and returns error.
7034  *   Return value:
7035  *   SUCCESS on success and -1 on failure.
7036  */
7037 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7038 {
7039         struct s2io_nic *sp = ring_data->nic;
7040         struct net_device *dev = (struct net_device *) sp->dev;
7041         struct sk_buff *skb = (struct sk_buff *)
7042                 ((unsigned long) rxdp->Host_Control);
7043         int ring_no = ring_data->ring_no;
7044         u16 l3_csum, l4_csum;
7045         unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7046         struct lro *lro;
7047         u8 err_mask;
7048
7049         skb->dev = dev;
7050
7051         if (err) {
7052                 /* Check for parity error */
7053                 if (err & 0x1) {
7054                         sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7055                 }
7056                 err_mask = err >> 48;
7057                 switch(err_mask) {
7058                         case 1:
7059                                 sp->mac_control.stats_info->sw_stat.
7060                                 rx_parity_err_cnt++;
7061                         break;
7062
7063                         case 2:
7064                                 sp->mac_control.stats_info->sw_stat.
7065                                 rx_abort_cnt++;
7066                         break;
7067
7068                         case 3:
7069                                 sp->mac_control.stats_info->sw_stat.
7070                                 rx_parity_abort_cnt++;
7071                         break;
7072
7073                         case 4:
7074                                 sp->mac_control.stats_info->sw_stat.
7075                                 rx_rda_fail_cnt++;
7076                         break;
7077
7078                         case 5:
7079                                 sp->mac_control.stats_info->sw_stat.
7080                                 rx_unkn_prot_cnt++;
7081                         break;
7082
7083                         case 6:
7084                                 sp->mac_control.stats_info->sw_stat.
7085                                 rx_fcs_err_cnt++;
7086                         break;
7087
7088                         case 7:
7089                                 sp->mac_control.stats_info->sw_stat.
7090                                 rx_buf_size_err_cnt++;
7091                         break;
7092
7093                         case 8:
7094                                 sp->mac_control.stats_info->sw_stat.
7095                                 rx_rxd_corrupt_cnt++;
7096                         break;
7097
7098                         case 15:
7099                                 sp->mac_control.stats_info->sw_stat.
7100                                 rx_unkn_err_cnt++;
7101                         break;
7102                 }
7103                 /*
7104                 * Drop the packet if bad transfer code. Exception being
7105                 * 0x5, which could be due to unsupported IPv6 extension header.
7106                 * In this case, we let stack handle the packet.
7107                 * Note that in this case, since checksum will be incorrect,
7108                 * stack will validate the same.
7109                 */
7110                 if (err_mask != 0x5) {
7111                         DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7112                                 dev->name, err_mask);
7113                         sp->stats.rx_crc_errors++;
7114                         sp->mac_control.stats_info->sw_stat.mem_freed
7115                                 += skb->truesize;
7116                         dev_kfree_skb(skb);
7117                         atomic_dec(&sp->rx_bufs_left[ring_no]);
7118                         rxdp->Host_Control = 0;
7119                         return 0;
7120                 }
7121         }
7122
7123         /* Updating statistics */
7124         sp->stats.rx_packets++;
7125         rxdp->Host_Control = 0;
7126         if (sp->rxd_mode == RXD_MODE_1) {
7127                 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7128
7129                 sp->stats.rx_bytes += len;
7130                 skb_put(skb, len);
7131
7132         } else if (sp->rxd_mode == RXD_MODE_3B) {
7133                 int get_block = ring_data->rx_curr_get_info.block_index;
7134                 int get_off = ring_data->rx_curr_get_info.offset;
7135                 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7136                 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7137                 unsigned char *buff = skb_push(skb, buf0_len);
7138
7139                 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7140                 sp->stats.rx_bytes += buf0_len + buf2_len;
7141                 memcpy(buff, ba->ba_0, buf0_len);
7142                 skb_put(skb, buf2_len);
7143         }
7144
7145         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
7146             (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7147             (sp->rx_csum)) {
7148                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7149                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7150                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7151                         /*
7152                          * NIC verifies if the Checksum of the received
7153                          * frame is Ok or not and accordingly returns
7154                          * a flag in the RxD.
7155                          */
7156                         skb->ip_summed = CHECKSUM_UNNECESSARY;
7157                         if (sp->lro) {
7158                                 u32 tcp_len;
7159                                 u8 *tcp;
7160                                 int ret = 0;
7161
7162                                 ret = s2io_club_tcp_session(skb->data, &tcp,
7163                                                             &tcp_len, &lro,
7164                                                             rxdp, sp);
7165                                 switch (ret) {
7166                                         case 3: /* Begin anew */
7167                                                 lro->parent = skb;
7168                                                 goto aggregate;
7169                                         case 1: /* Aggregate */
7170                                         {
7171                                                 lro_append_pkt(sp, lro,
7172                                                         skb, tcp_len);
7173                                                 goto aggregate;
7174                                         }
7175                                         case 4: /* Flush session */
7176                                         {
7177                                                 lro_append_pkt(sp, lro,
7178                                                         skb, tcp_len);
7179                                                 queue_rx_frame(lro->parent);
7180                                                 clear_lro_session(lro);
7181                                                 sp->mac_control.stats_info->
7182                                                     sw_stat.flush_max_pkts++;
7183                                                 goto aggregate;
7184                                         }
7185                                         case 2: /* Flush both */
7186                                                 lro->parent->data_len =
7187                                                         lro->frags_len;
7188                                                 sp->mac_control.stats_info->
7189                                                      sw_stat.sending_both++;
7190                                                 queue_rx_frame(lro->parent);
7191                                                 clear_lro_session(lro);
7192                                                 goto send_up;
7193                                         case 0: /* sessions exceeded */
7194                                         case -1: /* non-TCP or not
7195                                                   * L2 aggregatable
7196                                                   */
7197                                         case 5: /*
7198                                                  * First pkt in session not
7199                                                  * L3/L4 aggregatable
7200                                                  */
7201                                                 break;
7202                                         default:
7203                                                 DBG_PRINT(ERR_DBG,
7204                                                         "%s: Samadhana!!\n",
7205                                                          __FUNCTION__);
7206                                                 BUG();
7207                                 }
7208                         }
7209                 } else {
7210                         /*
7211                          * Packet with erroneous checksum, let the
7212                          * upper layers deal with it.
7213                          */
7214                         skb->ip_summed = CHECKSUM_NONE;
7215                 }
7216         } else {
7217                 skb->ip_summed = CHECKSUM_NONE;
7218         }
7219         sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7220         if (!sp->lro) {
7221                 skb->protocol = eth_type_trans(skb, dev);
7222                 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
7223                         vlan_strip_flag)) {
7224                         /* Queueing the vlan frame to the upper layer */
7225                         if (napi)
7226                                 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
7227                                         RXD_GET_VLAN_TAG(rxdp->Control_2));
7228                         else
7229                                 vlan_hwaccel_rx(skb, sp->vlgrp,
7230                                         RXD_GET_VLAN_TAG(rxdp->Control_2));
7231                 } else {
7232                         if (napi)
7233                                 netif_receive_skb(skb);
7234                         else
7235                                 netif_rx(skb);
7236                 }
7237         } else {
7238 send_up:
7239                 queue_rx_frame(skb);
7240         }
7241         dev->last_rx = jiffies;
7242 aggregate:
7243         atomic_dec(&sp->rx_bufs_left[ring_no]);
7244         return SUCCESS;
7245 }
7246
7247 /**
7248  *  s2io_link - stops/starts the Tx queue.
7249  *  @sp : private member of the device structure, which is a pointer to the
7250  *  s2io_nic structure.
7251  *  @link : inidicates whether link is UP/DOWN.
7252  *  Description:
7253  *  This function stops/starts the Tx queue depending on whether the link
7254  *  status of the NIC is is down or up. This is called by the Alarm
7255  *  interrupt handler whenever a link change interrupt comes up.
7256  *  Return value:
7257  *  void.
7258  */
7259
7260 static void s2io_link(struct s2io_nic * sp, int link)
7261 {
7262         struct net_device *dev = (struct net_device *) sp->dev;
7263
7264         if (link != sp->last_link_state) {
7265                 if (link == LINK_DOWN) {
7266                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7267                         netif_carrier_off(dev);
7268                         if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7269                         sp->mac_control.stats_info->sw_stat.link_up_time =
7270                                 jiffies - sp->start_time;
7271                         sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7272                 } else {
7273                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7274                         if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7275                         sp->mac_control.stats_info->sw_stat.link_down_time =
7276                                 jiffies - sp->start_time;
7277                         sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7278                         netif_carrier_on(dev);
7279                 }
7280         }
7281         sp->last_link_state = link;
7282         sp->start_time = jiffies;
7283 }
7284
7285 /**
7286  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7287  *  @sp : private member of the device structure, which is a pointer to the
7288  *  s2io_nic structure.
7289  *  Description:
7290  *  This function initializes a few of the PCI and PCI-X configuration registers
7291  *  with recommended values.
7292  *  Return value:
7293  *  void
7294  */
7295
7296 static void s2io_init_pci(struct s2io_nic * sp)
7297 {
7298         u16 pci_cmd = 0, pcix_cmd = 0;
7299
7300         /* Enable Data Parity Error Recovery in PCI-X command register. */
7301         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7302                              &(pcix_cmd));
7303         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7304                               (pcix_cmd | 1));
7305         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7306                              &(pcix_cmd));
7307
7308         /* Set the PErr Response bit in PCI command register. */
7309         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7310         pci_write_config_word(sp->pdev, PCI_COMMAND,
7311                               (pci_cmd | PCI_COMMAND_PARITY));
7312         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7313 }
7314
7315 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
7316 {
7317         if ( tx_fifo_num > 8) {
7318                 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
7319                          "supported\n");
7320                 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
7321                 tx_fifo_num = 8;
7322         }
7323         if ( rx_ring_num > 8) {
7324                 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
7325                          "supported\n");
7326                 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
7327                 rx_ring_num = 8;
7328         }
7329         if (*dev_intr_type != INTA)
7330                 napi = 0;
7331
7332         if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7333                 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7334                           "Defaulting to INTA\n");
7335                 *dev_intr_type = INTA;
7336         }
7337
7338         if ((*dev_intr_type == MSI_X) &&
7339                         ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7340                         (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7341                 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7342                                         "Defaulting to INTA\n");
7343                 *dev_intr_type = INTA;
7344         }
7345
7346         if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7347                 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7348                 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7349                 rx_ring_mode = 1;
7350         }
7351         return SUCCESS;
7352 }
7353
7354 /**
7355  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7356  * or Traffic class respectively.
7357  * @nic: device peivate variable
7358  * Description: The function configures the receive steering to
7359  * desired receive ring.
7360  * Return Value:  SUCCESS on success and
7361  * '-1' on failure (endian settings incorrect).
7362  */
7363 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7364 {
7365         struct XENA_dev_config __iomem *bar0 = nic->bar0;
7366         register u64 val64 = 0;
7367
7368         if (ds_codepoint > 63)
7369                 return FAILURE;
7370
7371         val64 = RTS_DS_MEM_DATA(ring);
7372         writeq(val64, &bar0->rts_ds_mem_data);
7373
7374         val64 = RTS_DS_MEM_CTRL_WE |
7375                 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7376                 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7377
7378         writeq(val64, &bar0->rts_ds_mem_ctrl);
7379
7380         return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7381                                 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7382                                 S2IO_BIT_RESET);
7383 }
7384
7385 /**
7386  *  s2io_init_nic - Initialization of the adapter .
7387  *  @pdev : structure containing the PCI related information of the device.
7388  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7389  *  Description:
7390  *  The function initializes an adapter identified by the pci_dec structure.
7391  *  All OS related initialization including memory and device structure and
7392  *  initlaization of the device private variable is done. Also the swapper
7393  *  control register is initialized to enable read and write into the I/O
7394  *  registers of the device.
7395  *  Return value:
7396  *  returns 0 on success and negative on failure.
7397  */
7398
7399 static int __devinit
7400 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7401 {
7402         struct s2io_nic *sp;
7403         struct net_device *dev;
7404         int i, j, ret;
7405         int dma_flag = FALSE;
7406         u32 mac_up, mac_down;
7407         u64 val64 = 0, tmp64 = 0;
7408         struct XENA_dev_config __iomem *bar0 = NULL;
7409         u16 subid;
7410         struct mac_info *mac_control;
7411         struct config_param *config;
7412         int mode;
7413         u8 dev_intr_type = intr_type;
7414         DECLARE_MAC_BUF(mac);
7415
7416         if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
7417                 return ret;
7418
7419         if ((ret = pci_enable_device(pdev))) {
7420                 DBG_PRINT(ERR_DBG,
7421                           "s2io_init_nic: pci_enable_device failed\n");
7422                 return ret;
7423         }
7424
7425         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7426                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7427                 dma_flag = TRUE;
7428                 if (pci_set_consistent_dma_mask
7429                     (pdev, DMA_64BIT_MASK)) {
7430                         DBG_PRINT(ERR_DBG,
7431                                   "Unable to obtain 64bit DMA for \
7432                                         consistent allocations\n");
7433                         pci_disable_device(pdev);
7434                         return -ENOMEM;
7435                 }
7436         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7437                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7438         } else {
7439                 pci_disable_device(pdev);
7440                 return -ENOMEM;
7441         }
7442         if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7443                 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7444                 pci_disable_device(pdev);
7445                 return -ENODEV;
7446         }
7447
7448         dev = alloc_etherdev(sizeof(struct s2io_nic));
7449         if (dev == NULL) {
7450                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7451                 pci_disable_device(pdev);
7452                 pci_release_regions(pdev);
7453                 return -ENODEV;
7454         }
7455
7456         pci_set_master(pdev);
7457         pci_set_drvdata(pdev, dev);
7458         SET_NETDEV_DEV(dev, &pdev->dev);
7459
7460         /*  Private member variable initialized to s2io NIC structure */
7461         sp = dev->priv;
7462         memset(sp, 0, sizeof(struct s2io_nic));
7463         sp->dev = dev;
7464         sp->pdev = pdev;
7465         sp->high_dma_flag = dma_flag;
7466         sp->device_enabled_once = FALSE;
7467         if (rx_ring_mode == 1)
7468                 sp->rxd_mode = RXD_MODE_1;
7469         if (rx_ring_mode == 2)
7470                 sp->rxd_mode = RXD_MODE_3B;
7471
7472         sp->config.intr_type = dev_intr_type;
7473
7474         if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7475                 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7476                 sp->device_type = XFRAME_II_DEVICE;
7477         else
7478                 sp->device_type = XFRAME_I_DEVICE;
7479
7480         sp->lro = lro_enable;
7481
7482         /* Initialize some PCI/PCI-X fields of the NIC. */
7483         s2io_init_pci(sp);
7484
7485         /*
7486          * Setting the device configuration parameters.
7487          * Most of these parameters can be specified by the user during
7488          * module insertion as they are module loadable parameters. If
7489          * these parameters are not not specified during load time, they
7490          * are initialized with default values.
7491          */
7492         mac_control = &sp->mac_control;
7493         config = &sp->config;
7494
7495         config->napi = napi;
7496
7497         /* Tx side parameters. */
7498         config->tx_fifo_num = tx_fifo_num;
7499         for (i = 0; i < MAX_TX_FIFOS; i++) {
7500                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7501                 config->tx_cfg[i].fifo_priority = i;
7502         }
7503
7504         /* mapping the QoS priority to the configured fifos */
7505         for (i = 0; i < MAX_TX_FIFOS; i++)
7506                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7507
7508         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7509         for (i = 0; i < config->tx_fifo_num; i++) {
7510                 config->tx_cfg[i].f_no_snoop =
7511                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7512                 if (config->tx_cfg[i].fifo_len < 65) {
7513                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7514                         break;
7515                 }
7516         }
7517         /* + 2 because one Txd for skb->data and one Txd for UFO */
7518         config->max_txds = MAX_SKB_FRAGS + 2;
7519
7520         /* Rx side parameters. */
7521         config->rx_ring_num = rx_ring_num;
7522         for (i = 0; i < MAX_RX_RINGS; i++) {
7523                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7524                     (rxd_count[sp->rxd_mode] + 1);
7525                 config->rx_cfg[i].ring_priority = i;
7526         }
7527
7528         for (i = 0; i < rx_ring_num; i++) {
7529                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7530                 config->rx_cfg[i].f_no_snoop =
7531                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7532         }
7533
7534         /*  Setting Mac Control parameters */
7535         mac_control->rmac_pause_time = rmac_pause_time;
7536         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7537         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7538
7539
7540         /* Initialize Ring buffer parameters. */
7541         for (i = 0; i < config->rx_ring_num; i++)
7542                 atomic_set(&sp->rx_bufs_left[i], 0);
7543
7544         /*  initialize the shared memory used by the NIC and the host */
7545         if (init_shared_mem(sp)) {
7546                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7547                           dev->name);
7548                 ret = -ENOMEM;
7549                 goto mem_alloc_failed;
7550         }
7551
7552         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7553                                      pci_resource_len(pdev, 0));
7554         if (!sp->bar0) {
7555                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7556                           dev->name);
7557                 ret = -ENOMEM;
7558                 goto bar0_remap_failed;
7559         }
7560
7561         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7562                                      pci_resource_len(pdev, 2));
7563         if (!sp->bar1) {
7564                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7565                           dev->name);
7566                 ret = -ENOMEM;
7567                 goto bar1_remap_failed;
7568         }
7569
7570         dev->irq = pdev->irq;
7571         dev->base_addr = (unsigned long) sp->bar0;
7572
7573         /* Initializing the BAR1 address as the start of the FIFO pointer. */
7574         for (j = 0; j < MAX_TX_FIFOS; j++) {
7575                 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7576                     (sp->bar1 + (j * 0x00020000));
7577         }
7578
7579         /*  Driver entry points */
7580         dev->open = &s2io_open;
7581         dev->stop = &s2io_close;
7582         dev->hard_start_xmit = &s2io_xmit;
7583         dev->get_stats = &s2io_get_stats;
7584         dev->set_multicast_list = &s2io_set_multicast;
7585         dev->do_ioctl = &s2io_ioctl;
7586         dev->set_mac_address = &s2io_set_mac_addr;
7587         dev->change_mtu = &s2io_change_mtu;
7588         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7589         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7590         dev->vlan_rx_register = s2io_vlan_rx_register;
7591
7592         /*
7593          * will use eth_mac_addr() for  dev->set_mac_address
7594          * mac address will be set every time dev->open() is called
7595          */
7596         netif_napi_add(dev, &sp->napi, s2io_poll, 32);
7597
7598 #ifdef CONFIG_NET_POLL_CONTROLLER
7599         dev->poll_controller = s2io_netpoll;
7600 #endif
7601
7602         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7603         if (sp->high_dma_flag == TRUE)
7604                 dev->features |= NETIF_F_HIGHDMA;
7605         dev->features |= NETIF_F_TSO;
7606         dev->features |= NETIF_F_TSO6;
7607         if ((sp->device_type & XFRAME_II_DEVICE) && (ufo))  {
7608                 dev->features |= NETIF_F_UFO;
7609                 dev->features |= NETIF_F_HW_CSUM;
7610         }
7611
7612         dev->tx_timeout = &s2io_tx_watchdog;
7613         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7614         INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7615         INIT_WORK(&sp->set_link_task, s2io_set_link);
7616
7617         pci_save_state(sp->pdev);
7618
7619         /* Setting swapper control on the NIC, for proper reset operation */
7620         if (s2io_set_swapper(sp)) {
7621                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7622                           dev->name);
7623                 ret = -EAGAIN;
7624                 goto set_swap_failed;
7625         }
7626
7627         /* Verify if the Herc works on the slot its placed into */
7628         if (sp->device_type & XFRAME_II_DEVICE) {
7629                 mode = s2io_verify_pci_mode(sp);
7630                 if (mode < 0) {
7631                         DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7632                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7633                         ret = -EBADSLT;
7634                         goto set_swap_failed;
7635                 }
7636         }
7637
7638         /* Not needed for Herc */
7639         if (sp->device_type & XFRAME_I_DEVICE) {
7640                 /*
7641                  * Fix for all "FFs" MAC address problems observed on
7642                  * Alpha platforms
7643                  */
7644                 fix_mac_address(sp);
7645                 s2io_reset(sp);
7646         }
7647
7648         /*
7649          * MAC address initialization.
7650          * For now only one mac address will be read and used.
7651          */
7652         bar0 = sp->bar0;
7653         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7654             RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7655         writeq(val64, &bar0->rmac_addr_cmd_mem);
7656         wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7657                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7658         tmp64 = readq(&bar0->rmac_addr_data0_mem);
7659         mac_down = (u32) tmp64;
7660         mac_up = (u32) (tmp64 >> 32);
7661
7662         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7663         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7664         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7665         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7666         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7667         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7668
7669         /*  Set the factory defined MAC address initially   */
7670         dev->addr_len = ETH_ALEN;
7671         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7672         memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
7673
7674          /* Store the values of the MSIX table in the s2io_nic structure */
7675         store_xmsi_data(sp);
7676         /* reset Nic and bring it to known state */
7677         s2io_reset(sp);
7678
7679         /*
7680          * Initialize the tasklet status and link state flags
7681          * and the card state parameter
7682          */
7683         sp->tasklet_status = 0;
7684         sp->state = 0;
7685
7686         /* Initialize spinlocks */
7687         spin_lock_init(&sp->tx_lock);
7688
7689         if (!napi)
7690                 spin_lock_init(&sp->put_lock);
7691         spin_lock_init(&sp->rx_lock);
7692
7693         /*
7694          * SXE-002: Configure link and activity LED to init state
7695          * on driver load.
7696          */
7697         subid = sp->pdev->subsystem_device;
7698         if ((subid & 0xFF) >= 0x07) {
7699                 val64 = readq(&bar0->gpio_control);
7700                 val64 |= 0x0000800000000000ULL;
7701                 writeq(val64, &bar0->gpio_control);
7702                 val64 = 0x0411040400000000ULL;
7703                 writeq(val64, (void __iomem *) bar0 + 0x2700);
7704                 val64 = readq(&bar0->gpio_control);
7705         }
7706
7707         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
7708
7709         if (register_netdev(dev)) {
7710                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7711                 ret = -ENODEV;
7712                 goto register_failed;
7713         }
7714         s2io_vpd_read(sp);
7715         DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
7716         DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7717                   sp->product_name, pdev->revision);
7718         DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7719                   s2io_driver_version);
7720         DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n",
7721                   dev->name, print_mac(mac, dev->dev_addr));
7722         DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7723         if (sp->device_type & XFRAME_II_DEVICE) {
7724                 mode = s2io_print_pci_mode(sp);
7725                 if (mode < 0) {
7726                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7727                         ret = -EBADSLT;
7728                         unregister_netdev(dev);
7729                         goto set_swap_failed;
7730                 }
7731         }
7732         switch(sp->rxd_mode) {
7733                 case RXD_MODE_1:
7734                     DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7735                                                 dev->name);
7736                     break;
7737                 case RXD_MODE_3B:
7738                     DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7739                                                 dev->name);
7740                     break;
7741         }
7742
7743         if (napi)
7744                 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7745         switch(sp->config.intr_type) {
7746                 case INTA:
7747                     DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7748                     break;
7749                 case MSI_X:
7750                     DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7751                     break;
7752         }
7753         if (sp->lro)
7754                 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7755                           dev->name);
7756         if (ufo)
7757                 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7758                                         " enabled\n", dev->name);
7759         /* Initialize device name */
7760         sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7761
7762         /*
7763          * Make Link state as off at this point, when the Link change
7764          * interrupt comes the state will be automatically changed to
7765          * the right state.
7766          */
7767         netif_carrier_off(dev);
7768
7769         return 0;
7770
7771       register_failed:
7772       set_swap_failed:
7773         iounmap(sp->bar1);
7774       bar1_remap_failed:
7775         iounmap(sp->bar0);
7776       bar0_remap_failed:
7777       mem_alloc_failed:
7778         free_shared_mem(sp);
7779         pci_disable_device(pdev);
7780         pci_release_regions(pdev);
7781         pci_set_drvdata(pdev, NULL);
7782         free_netdev(dev);
7783
7784         return ret;
7785 }
7786
7787 /**
7788  * s2io_rem_nic - Free the PCI device
7789  * @pdev: structure containing the PCI related information of the device.
7790  * Description: This function is called by the Pci subsystem to release a
7791  * PCI device and free up all resource held up by the device. This could
7792  * be in response to a Hot plug event or when the driver is to be removed
7793  * from memory.
7794  */
7795
7796 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7797 {
7798         struct net_device *dev =
7799             (struct net_device *) pci_get_drvdata(pdev);
7800         struct s2io_nic *sp;
7801
7802         if (dev == NULL) {
7803                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7804                 return;
7805         }
7806
7807         flush_scheduled_work();
7808
7809         sp = dev->priv;
7810         unregister_netdev(dev);
7811
7812         free_shared_mem(sp);
7813         iounmap(sp->bar0);
7814         iounmap(sp->bar1);
7815         pci_release_regions(pdev);
7816         pci_set_drvdata(pdev, NULL);
7817         free_netdev(dev);
7818         pci_disable_device(pdev);
7819 }
7820
7821 /**
7822  * s2io_starter - Entry point for the driver
7823  * Description: This function is the entry point for the driver. It verifies
7824  * the module loadable parameters and initializes PCI configuration space.
7825  */
7826
7827 static int __init s2io_starter(void)
7828 {
7829         return pci_register_driver(&s2io_driver);
7830 }
7831
7832 /**
7833  * s2io_closer - Cleanup routine for the driver
7834  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7835  */
7836
7837 static __exit void s2io_closer(void)
7838 {
7839         pci_unregister_driver(&s2io_driver);
7840         DBG_PRINT(INIT_DBG, "cleanup done\n");
7841 }
7842
7843 module_init(s2io_starter);
7844 module_exit(s2io_closer);
7845
7846 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7847                 struct tcphdr **tcp, struct RxD_t *rxdp)
7848 {
7849         int ip_off;
7850         u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7851
7852         if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7853                 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7854                           __FUNCTION__);
7855                 return -1;
7856         }
7857
7858         /* TODO:
7859          * By default the VLAN field in the MAC is stripped by the card, if this
7860          * feature is turned off in rx_pa_cfg register, then the ip_off field
7861          * has to be shifted by a further 2 bytes
7862          */
7863         switch (l2_type) {
7864                 case 0: /* DIX type */
7865                 case 4: /* DIX type with VLAN */
7866                         ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7867                         break;
7868                 /* LLC, SNAP etc are considered non-mergeable */
7869                 default:
7870                         return -1;
7871         }
7872
7873         *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7874         ip_len = (u8)((*ip)->ihl);
7875         ip_len <<= 2;
7876         *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7877
7878         return 0;
7879 }
7880
7881 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7882                                   struct tcphdr *tcp)
7883 {
7884         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7885         if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7886            (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7887                 return -1;
7888         return 0;
7889 }
7890
7891 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7892 {
7893         return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7894 }
7895
7896 static void initiate_new_session(struct lro *lro, u8 *l2h,
7897                      struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7898 {
7899         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7900         lro->l2h = l2h;
7901         lro->iph = ip;
7902         lro->tcph = tcp;
7903         lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7904         lro->tcp_ack = ntohl(tcp->ack_seq);
7905         lro->sg_num = 1;
7906         lro->total_len = ntohs(ip->tot_len);
7907         lro->frags_len = 0;
7908         /*
7909          * check if we saw TCP timestamp. Other consistency checks have
7910          * already been done.
7911          */
7912         if (tcp->doff == 8) {
7913                 u32 *ptr;
7914                 ptr = (u32 *)(tcp+1);
7915                 lro->saw_ts = 1;
7916                 lro->cur_tsval = *(ptr+1);
7917                 lro->cur_tsecr = *(ptr+2);
7918         }
7919         lro->in_use = 1;
7920 }
7921
7922 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7923 {
7924         struct iphdr *ip = lro->iph;
7925         struct tcphdr *tcp = lro->tcph;
7926         __sum16 nchk;
7927         struct stat_block *statinfo = sp->mac_control.stats_info;
7928         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7929
7930         /* Update L3 header */
7931         ip->tot_len = htons(lro->total_len);
7932         ip->check = 0;
7933         nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7934         ip->check = nchk;
7935
7936         /* Update L4 header */
7937         tcp->ack_seq = lro->tcp_ack;
7938         tcp->window = lro->window;
7939
7940         /* Update tsecr field if this session has timestamps enabled */
7941         if (lro->saw_ts) {
7942                 u32 *ptr = (u32 *)(tcp + 1);
7943                 *(ptr+2) = lro->cur_tsecr;
7944         }
7945
7946         /* Update counters required for calculation of
7947          * average no. of packets aggregated.
7948          */
7949         statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7950         statinfo->sw_stat.num_aggregations++;
7951 }
7952
7953 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7954                 struct tcphdr *tcp, u32 l4_pyld)
7955 {
7956         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7957         lro->total_len += l4_pyld;
7958         lro->frags_len += l4_pyld;
7959         lro->tcp_next_seq += l4_pyld;
7960         lro->sg_num++;
7961
7962         /* Update ack seq no. and window ad(from this pkt) in LRO object */
7963         lro->tcp_ack = tcp->ack_seq;
7964         lro->window = tcp->window;
7965
7966         if (lro->saw_ts) {
7967                 u32 *ptr;
7968                 /* Update tsecr and tsval from this packet */
7969                 ptr = (u32 *) (tcp + 1);
7970                 lro->cur_tsval = *(ptr + 1);
7971                 lro->cur_tsecr = *(ptr + 2);
7972         }
7973 }
7974
7975 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7976                                     struct tcphdr *tcp, u32 tcp_pyld_len)
7977 {
7978         u8 *ptr;
7979
7980         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7981
7982         if (!tcp_pyld_len) {
7983                 /* Runt frame or a pure ack */
7984                 return -1;
7985         }
7986
7987         if (ip->ihl != 5) /* IP has options */
7988                 return -1;
7989
7990         /* If we see CE codepoint in IP header, packet is not mergeable */
7991         if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7992                 return -1;
7993
7994         /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7995         if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7996                                     tcp->ece || tcp->cwr || !tcp->ack) {
7997                 /*
7998                  * Currently recognize only the ack control word and
7999                  * any other control field being set would result in
8000                  * flushing the LRO session
8001                  */
8002                 return -1;
8003         }
8004
8005         /*
8006          * Allow only one TCP timestamp option. Don't aggregate if
8007          * any other options are detected.
8008          */
8009         if (tcp->doff != 5 && tcp->doff != 8)
8010                 return -1;
8011
8012         if (tcp->doff == 8) {
8013                 ptr = (u8 *)(tcp + 1);
8014                 while (*ptr == TCPOPT_NOP)
8015                         ptr++;
8016                 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8017                         return -1;
8018
8019                 /* Ensure timestamp value increases monotonically */
8020                 if (l_lro)
8021                         if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
8022                                 return -1;
8023
8024                 /* timestamp echo reply should be non-zero */
8025                 if (*((u32 *)(ptr+6)) == 0)
8026                         return -1;
8027         }
8028
8029         return 0;
8030 }
8031
8032 static int
8033 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
8034                       struct RxD_t *rxdp, struct s2io_nic *sp)
8035 {
8036         struct iphdr *ip;
8037         struct tcphdr *tcph;
8038         int ret = 0, i;
8039
8040         if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8041                                          rxdp))) {
8042                 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8043                           ip->saddr, ip->daddr);
8044         } else {
8045                 return ret;
8046         }
8047
8048         tcph = (struct tcphdr *)*tcp;
8049         *tcp_len = get_l4_pyld_length(ip, tcph);
8050         for (i=0; i<MAX_LRO_SESSIONS; i++) {
8051                 struct lro *l_lro = &sp->lro0_n[i];
8052                 if (l_lro->in_use) {
8053                         if (check_for_socket_match(l_lro, ip, tcph))
8054                                 continue;
8055                         /* Sock pair matched */
8056                         *lro = l_lro;
8057
8058                         if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8059                                 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8060                                           "0x%x, actual 0x%x\n", __FUNCTION__,
8061                                           (*lro)->tcp_next_seq,
8062                                           ntohl(tcph->seq));
8063
8064                                 sp->mac_control.stats_info->
8065                                    sw_stat.outof_sequence_pkts++;
8066                                 ret = 2;
8067                                 break;
8068                         }
8069
8070                         if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8071                                 ret = 1; /* Aggregate */
8072                         else
8073                                 ret = 2; /* Flush both */
8074                         break;
8075                 }
8076         }
8077
8078         if (ret == 0) {
8079                 /* Before searching for available LRO objects,
8080                  * check if the pkt is L3/L4 aggregatable. If not
8081                  * don't create new LRO session. Just send this
8082                  * packet up.
8083                  */
8084                 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8085                         return 5;
8086                 }
8087
8088                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8089                         struct lro *l_lro = &sp->lro0_n[i];
8090                         if (!(l_lro->in_use)) {
8091                                 *lro = l_lro;
8092                                 ret = 3; /* Begin anew */
8093                                 break;
8094                         }
8095                 }
8096         }
8097
8098         if (ret == 0) { /* sessions exceeded */
8099                 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8100                           __FUNCTION__);
8101                 *lro = NULL;
8102                 return ret;
8103         }
8104
8105         switch (ret) {
8106                 case 3:
8107                         initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
8108                         break;
8109                 case 2:
8110                         update_L3L4_header(sp, *lro);
8111                         break;
8112                 case 1:
8113                         aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8114                         if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8115                                 update_L3L4_header(sp, *lro);
8116                                 ret = 4; /* Flush the LRO */
8117                         }
8118                         break;
8119                 default:
8120                         DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8121                                 __FUNCTION__);
8122                         break;
8123         }
8124
8125         return ret;
8126 }
8127
8128 static void clear_lro_session(struct lro *lro)
8129 {
8130         static u16 lro_struct_size = sizeof(struct lro);
8131
8132         memset(lro, 0, lro_struct_size);
8133 }
8134
8135 static void queue_rx_frame(struct sk_buff *skb)
8136 {
8137         struct net_device *dev = skb->dev;
8138
8139         skb->protocol = eth_type_trans(skb, dev);
8140         if (napi)
8141                 netif_receive_skb(skb);
8142         else
8143                 netif_rx(skb);
8144 }
8145
8146 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8147                            struct sk_buff *skb,
8148                            u32 tcp_len)
8149 {
8150         struct sk_buff *first = lro->parent;
8151
8152         first->len += tcp_len;
8153         first->data_len = lro->frags_len;
8154         skb_pull(skb, (skb->len - tcp_len));
8155         if (skb_shinfo(first)->frag_list)
8156                 lro->last_frag->next = skb;
8157         else
8158                 skb_shinfo(first)->frag_list = skb;
8159         first->truesize += skb->truesize;
8160         lro->last_frag = skb;
8161         sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8162         return;
8163 }
8164
8165 /**
8166  * s2io_io_error_detected - called when PCI error is detected
8167  * @pdev: Pointer to PCI device
8168  * @state: The current pci connection state
8169  *
8170  * This function is called after a PCI bus error affecting
8171  * this device has been detected.
8172  */
8173 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8174                                                pci_channel_state_t state)
8175 {
8176         struct net_device *netdev = pci_get_drvdata(pdev);
8177         struct s2io_nic *sp = netdev->priv;
8178
8179         netif_device_detach(netdev);
8180
8181         if (netif_running(netdev)) {
8182                 /* Bring down the card, while avoiding PCI I/O */
8183                 do_s2io_card_down(sp, 0);
8184         }
8185         pci_disable_device(pdev);
8186
8187         return PCI_ERS_RESULT_NEED_RESET;
8188 }
8189
8190 /**
8191  * s2io_io_slot_reset - called after the pci bus has been reset.
8192  * @pdev: Pointer to PCI device
8193  *
8194  * Restart the card from scratch, as if from a cold-boot.
8195  * At this point, the card has exprienced a hard reset,
8196  * followed by fixups by BIOS, and has its config space
8197  * set up identically to what it was at cold boot.
8198  */
8199 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8200 {
8201         struct net_device *netdev = pci_get_drvdata(pdev);
8202         struct s2io_nic *sp = netdev->priv;
8203
8204         if (pci_enable_device(pdev)) {
8205                 printk(KERN_ERR "s2io: "
8206                        "Cannot re-enable PCI device after reset.\n");
8207                 return PCI_ERS_RESULT_DISCONNECT;
8208         }
8209
8210         pci_set_master(pdev);
8211         s2io_reset(sp);
8212
8213         return PCI_ERS_RESULT_RECOVERED;
8214 }
8215
8216 /**
8217  * s2io_io_resume - called when traffic can start flowing again.
8218  * @pdev: Pointer to PCI device
8219  *
8220  * This callback is called when the error recovery driver tells
8221  * us that its OK to resume normal operation.
8222  */
8223 static void s2io_io_resume(struct pci_dev *pdev)
8224 {
8225         struct net_device *netdev = pci_get_drvdata(pdev);
8226         struct s2io_nic *sp = netdev->priv;
8227
8228         if (netif_running(netdev)) {
8229                 if (s2io_card_up(sp)) {
8230                         printk(KERN_ERR "s2io: "
8231                                "Can't bring device back up after reset.\n");
8232                         return;
8233                 }
8234
8235                 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8236                         s2io_card_down(sp);
8237                         printk(KERN_ERR "s2io: "
8238                                "Can't resetore mac addr after reset.\n");
8239                         return;
8240                 }
8241         }
8242
8243         netif_device_attach(netdev);
8244         netif_wake_queue(netdev);
8245 }