]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/s2io.c
[PATCH] S2io: MSI/MSI-X support (runtime configurable)
[linux-2.6-omap-h63xx.git] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2005 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  * rx_ring_num : This can be used to program the number of receive rings used
30  * in the driver.
31  * rx_ring_sz: This defines the number of descriptors each ring can have. This
32  * is also an array of size 8.
33  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34  * tx_fifo_len: This too is an array of 8. Each element defines the number of
35  * Tx descriptors that can be associated with each corresponding FIFO.
36  ************************************************************************/
37
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
58 #include <linux/if_vlan.h>
59
60 #include <asm/system.h>
61 #include <asm/uaccess.h>
62 #include <asm/io.h>
63
64 /* local include */
65 #include "s2io.h"
66 #include "s2io-regs.h"
67
68 /* S2io Driver name & version. */
69 static char s2io_driver_name[] = "Neterion";
70 static char s2io_driver_version[] = "Version 2.0.9.1";
71
72 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
73 {
74         int ret;
75
76         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
77                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
78
79         return ret;
80 }
81
82 /*
83  * Cards with following subsystem_id have a link state indication
84  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
85  * macro below identifies these cards given the subsystem_id.
86  */
87 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
88         (dev_type == XFRAME_I_DEVICE) ?                 \
89                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
90                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
91
92 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
93                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
94 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
95 #define PANIC   1
96 #define LOW     2
97 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
98 {
99         int level = 0;
100         mac_info_t *mac_control;
101
102         mac_control = &sp->mac_control;
103         if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
104                 level = LOW;
105                 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
106                         level = PANIC;
107                 }
108         }
109
110         return level;
111 }
112
113 /* Ethtool related variables and Macros. */
114 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
115         "Register test\t(offline)",
116         "Eeprom test\t(offline)",
117         "Link test\t(online)",
118         "RLDRAM test\t(offline)",
119         "BIST Test\t(offline)"
120 };
121
122 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
123         {"tmac_frms"},
124         {"tmac_data_octets"},
125         {"tmac_drop_frms"},
126         {"tmac_mcst_frms"},
127         {"tmac_bcst_frms"},
128         {"tmac_pause_ctrl_frms"},
129         {"tmac_any_err_frms"},
130         {"tmac_vld_ip_octets"},
131         {"tmac_vld_ip"},
132         {"tmac_drop_ip"},
133         {"tmac_icmp"},
134         {"tmac_rst_tcp"},
135         {"tmac_tcp"},
136         {"tmac_udp"},
137         {"rmac_vld_frms"},
138         {"rmac_data_octets"},
139         {"rmac_fcs_err_frms"},
140         {"rmac_drop_frms"},
141         {"rmac_vld_mcst_frms"},
142         {"rmac_vld_bcst_frms"},
143         {"rmac_in_rng_len_err_frms"},
144         {"rmac_long_frms"},
145         {"rmac_pause_ctrl_frms"},
146         {"rmac_discarded_frms"},
147         {"rmac_usized_frms"},
148         {"rmac_osized_frms"},
149         {"rmac_frag_frms"},
150         {"rmac_jabber_frms"},
151         {"rmac_ip"},
152         {"rmac_ip_octets"},
153         {"rmac_hdr_err_ip"},
154         {"rmac_drop_ip"},
155         {"rmac_icmp"},
156         {"rmac_tcp"},
157         {"rmac_udp"},
158         {"rmac_err_drp_udp"},
159         {"rmac_pause_cnt"},
160         {"rmac_accepted_ip"},
161         {"rmac_err_tcp"},
162         {"\n DRIVER STATISTICS"},
163         {"single_bit_ecc_errs"},
164         {"double_bit_ecc_errs"},
165 };
166
167 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
168 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
169
170 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
171 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
172
173 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
174                         init_timer(&timer);                     \
175                         timer.function = handle;                \
176                         timer.data = (unsigned long) arg;       \
177                         mod_timer(&timer, (jiffies + exp))      \
178
179 /* Add the vlan */
180 static void s2io_vlan_rx_register(struct net_device *dev,
181                                         struct vlan_group *grp)
182 {
183         nic_t *nic = dev->priv;
184         unsigned long flags;
185
186         spin_lock_irqsave(&nic->tx_lock, flags);
187         nic->vlgrp = grp;
188         spin_unlock_irqrestore(&nic->tx_lock, flags);
189 }
190
191 /* Unregister the vlan */
192 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
193 {
194         nic_t *nic = dev->priv;
195         unsigned long flags;
196
197         spin_lock_irqsave(&nic->tx_lock, flags);
198         if (nic->vlgrp)
199                 nic->vlgrp->vlan_devices[vid] = NULL;
200         spin_unlock_irqrestore(&nic->tx_lock, flags);
201 }
202
203 /*
204  * Constants to be programmed into the Xena's registers, to configure
205  * the XAUI.
206  */
207
208 #define SWITCH_SIGN     0xA5A5A5A5A5A5A5A5ULL
209 #define END_SIGN        0x0
210
211 static u64 herc_act_dtx_cfg[] = {
212         /* Set address */
213         0x8000051536750000ULL, 0x80000515367500E0ULL,
214         /* Write data */
215         0x8000051536750004ULL, 0x80000515367500E4ULL,
216         /* Set address */
217         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
218         /* Write data */
219         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
220         /* Set address */
221         0x801205150D440000ULL, 0x801205150D4400E0ULL,
222         /* Write data */
223         0x801205150D440004ULL, 0x801205150D4400E4ULL,
224         /* Set address */
225         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
226         /* Write data */
227         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
228         /* Done */
229         END_SIGN
230 };
231
232 static u64 xena_mdio_cfg[] = {
233         /* Reset PMA PLL */
234         0xC001010000000000ULL, 0xC0010100000000E0ULL,
235         0xC0010100008000E4ULL,
236         /* Remove Reset from PMA PLL */
237         0xC001010000000000ULL, 0xC0010100000000E0ULL,
238         0xC0010100000000E4ULL,
239         END_SIGN
240 };
241
242 static u64 xena_dtx_cfg[] = {
243         0x8000051500000000ULL, 0x80000515000000E0ULL,
244         0x80000515D93500E4ULL, 0x8001051500000000ULL,
245         0x80010515000000E0ULL, 0x80010515001E00E4ULL,
246         0x8002051500000000ULL, 0x80020515000000E0ULL,
247         0x80020515F21000E4ULL,
248         /* Set PADLOOPBACKN */
249         0x8002051500000000ULL, 0x80020515000000E0ULL,
250         0x80020515B20000E4ULL, 0x8003051500000000ULL,
251         0x80030515000000E0ULL, 0x80030515B20000E4ULL,
252         0x8004051500000000ULL, 0x80040515000000E0ULL,
253         0x80040515B20000E4ULL, 0x8005051500000000ULL,
254         0x80050515000000E0ULL, 0x80050515B20000E4ULL,
255         SWITCH_SIGN,
256         /* Remove PADLOOPBACKN */
257         0x8002051500000000ULL, 0x80020515000000E0ULL,
258         0x80020515F20000E4ULL, 0x8003051500000000ULL,
259         0x80030515000000E0ULL, 0x80030515F20000E4ULL,
260         0x8004051500000000ULL, 0x80040515000000E0ULL,
261         0x80040515F20000E4ULL, 0x8005051500000000ULL,
262         0x80050515000000E0ULL, 0x80050515F20000E4ULL,
263         END_SIGN
264 };
265
266 /*
267  * Constants for Fixing the MacAddress problem seen mostly on
268  * Alpha machines.
269  */
270 static u64 fix_mac[] = {
271         0x0060000000000000ULL, 0x0060600000000000ULL,
272         0x0040600000000000ULL, 0x0000600000000000ULL,
273         0x0020600000000000ULL, 0x0060600000000000ULL,
274         0x0020600000000000ULL, 0x0060600000000000ULL,
275         0x0020600000000000ULL, 0x0060600000000000ULL,
276         0x0020600000000000ULL, 0x0060600000000000ULL,
277         0x0020600000000000ULL, 0x0060600000000000ULL,
278         0x0020600000000000ULL, 0x0060600000000000ULL,
279         0x0020600000000000ULL, 0x0060600000000000ULL,
280         0x0020600000000000ULL, 0x0060600000000000ULL,
281         0x0020600000000000ULL, 0x0060600000000000ULL,
282         0x0020600000000000ULL, 0x0060600000000000ULL,
283         0x0020600000000000ULL, 0x0000600000000000ULL,
284         0x0040600000000000ULL, 0x0060600000000000ULL,
285         END_SIGN
286 };
287
288 /* Module Loadable parameters. */
289 static unsigned int tx_fifo_num = 1;
290 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
291     {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
292 static unsigned int rx_ring_num = 1;
293 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
294     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
295 static unsigned int rts_frm_len[MAX_RX_RINGS] =
296     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
297 static unsigned int use_continuous_tx_intrs = 1;
298 static unsigned int rmac_pause_time = 65535;
299 static unsigned int mc_pause_threshold_q0q3 = 187;
300 static unsigned int mc_pause_threshold_q4q7 = 187;
301 static unsigned int shared_splits;
302 static unsigned int tmac_util_period = 5;
303 static unsigned int rmac_util_period = 5;
304 static unsigned int bimodal = 0;
305 #ifndef CONFIG_S2IO_NAPI
306 static unsigned int indicate_max_pkts;
307 #endif
308 /* Frequency of Rx desc syncs expressed as power of 2 */
309 static unsigned int rxsync_frequency = 3;
310 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
311 static unsigned int intr_type = 0;
312
313 /*
314  * S2IO device table.
315  * This table lists all the devices that this driver supports.
316  */
317 static struct pci_device_id s2io_tbl[] __devinitdata = {
318         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
319          PCI_ANY_ID, PCI_ANY_ID},
320         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
321          PCI_ANY_ID, PCI_ANY_ID},
322         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
323          PCI_ANY_ID, PCI_ANY_ID},
324         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
325          PCI_ANY_ID, PCI_ANY_ID},
326         {0,}
327 };
328
329 MODULE_DEVICE_TABLE(pci, s2io_tbl);
330
331 static struct pci_driver s2io_driver = {
332       .name = "S2IO",
333       .id_table = s2io_tbl,
334       .probe = s2io_init_nic,
335       .remove = __devexit_p(s2io_rem_nic),
336 };
337
338 /* A simplifier macro used both by init and free shared_mem Fns(). */
339 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
340
341 /**
342  * init_shared_mem - Allocation and Initialization of Memory
343  * @nic: Device private variable.
344  * Description: The function allocates all the memory areas shared
345  * between the NIC and the driver. This includes Tx descriptors,
346  * Rx descriptors and the statistics block.
347  */
348
349 static int init_shared_mem(struct s2io_nic *nic)
350 {
351         u32 size;
352         void *tmp_v_addr, *tmp_v_addr_next;
353         dma_addr_t tmp_p_addr, tmp_p_addr_next;
354         RxD_block_t *pre_rxd_blk = NULL;
355         int i, j, blk_cnt, rx_sz, tx_sz;
356         int lst_size, lst_per_page;
357         struct net_device *dev = nic->dev;
358 #ifdef CONFIG_2BUFF_MODE
359         unsigned long tmp;
360         buffAdd_t *ba;
361 #endif
362
363         mac_info_t *mac_control;
364         struct config_param *config;
365
366         mac_control = &nic->mac_control;
367         config = &nic->config;
368
369
370         /* Allocation and initialization of TXDLs in FIOFs */
371         size = 0;
372         for (i = 0; i < config->tx_fifo_num; i++) {
373                 size += config->tx_cfg[i].fifo_len;
374         }
375         if (size > MAX_AVAILABLE_TXDS) {
376                 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
377                           __FUNCTION__);
378                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
379                 return FAILURE;
380         }
381
382         lst_size = (sizeof(TxD_t) * config->max_txds);
383         tx_sz = lst_size * size;
384         lst_per_page = PAGE_SIZE / lst_size;
385
386         for (i = 0; i < config->tx_fifo_num; i++) {
387                 int fifo_len = config->tx_cfg[i].fifo_len;
388                 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
389                 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
390                                                           GFP_KERNEL);
391                 if (!mac_control->fifos[i].list_info) {
392                         DBG_PRINT(ERR_DBG,
393                                   "Malloc failed for list_info\n");
394                         return -ENOMEM;
395                 }
396                 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
397         }
398         for (i = 0; i < config->tx_fifo_num; i++) {
399                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
400                                                 lst_per_page);
401                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
402                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
403                     config->tx_cfg[i].fifo_len - 1;
404                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
405                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
406                     config->tx_cfg[i].fifo_len - 1;
407                 mac_control->fifos[i].fifo_no = i;
408                 mac_control->fifos[i].nic = nic;
409                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 1;
410
411                 for (j = 0; j < page_num; j++) {
412                         int k = 0;
413                         dma_addr_t tmp_p;
414                         void *tmp_v;
415                         tmp_v = pci_alloc_consistent(nic->pdev,
416                                                      PAGE_SIZE, &tmp_p);
417                         if (!tmp_v) {
418                                 DBG_PRINT(ERR_DBG,
419                                           "pci_alloc_consistent ");
420                                 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
421                                 return -ENOMEM;
422                         }
423                         /* If we got a zero DMA address(can happen on
424                          * certain platforms like PPC), reallocate.
425                          * Store virtual address of page we don't want,
426                          * to be freed later.
427                          */
428                         if (!tmp_p) {
429                                 mac_control->zerodma_virt_addr = tmp_v;
430                                 DBG_PRINT(INIT_DBG, 
431                                 "%s: Zero DMA address for TxDL. ", dev->name);
432                                 DBG_PRINT(INIT_DBG, 
433                                 "Virtual address %p\n", tmp_v);
434                                 tmp_v = pci_alloc_consistent(nic->pdev,
435                                                      PAGE_SIZE, &tmp_p);
436                                 if (!tmp_v) {
437                                         DBG_PRINT(ERR_DBG,
438                                           "pci_alloc_consistent ");
439                                         DBG_PRINT(ERR_DBG, "failed for TxDL\n");
440                                         return -ENOMEM;
441                                 }
442                         }
443                         while (k < lst_per_page) {
444                                 int l = (j * lst_per_page) + k;
445                                 if (l == config->tx_cfg[i].fifo_len)
446                                         break;
447                                 mac_control->fifos[i].list_info[l].list_virt_addr =
448                                     tmp_v + (k * lst_size);
449                                 mac_control->fifos[i].list_info[l].list_phy_addr =
450                                     tmp_p + (k * lst_size);
451                                 k++;
452                         }
453                 }
454         }
455
456         /* Allocation and initialization of RXDs in Rings */
457         size = 0;
458         for (i = 0; i < config->rx_ring_num; i++) {
459                 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
460                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
461                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
462                                   i);
463                         DBG_PRINT(ERR_DBG, "RxDs per Block");
464                         return FAILURE;
465                 }
466                 size += config->rx_cfg[i].num_rxd;
467                 mac_control->rings[i].block_count =
468                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
469                 mac_control->rings[i].pkt_cnt =
470                     config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
471         }
472         size = (size * (sizeof(RxD_t)));
473         rx_sz = size;
474
475         for (i = 0; i < config->rx_ring_num; i++) {
476                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
477                 mac_control->rings[i].rx_curr_get_info.offset = 0;
478                 mac_control->rings[i].rx_curr_get_info.ring_len =
479                     config->rx_cfg[i].num_rxd - 1;
480                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
481                 mac_control->rings[i].rx_curr_put_info.offset = 0;
482                 mac_control->rings[i].rx_curr_put_info.ring_len =
483                     config->rx_cfg[i].num_rxd - 1;
484                 mac_control->rings[i].nic = nic;
485                 mac_control->rings[i].ring_no = i;
486
487                 blk_cnt =
488                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
489                 /*  Allocating all the Rx blocks */
490                 for (j = 0; j < blk_cnt; j++) {
491 #ifndef CONFIG_2BUFF_MODE
492                         size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
493 #else
494                         size = SIZE_OF_BLOCK;
495 #endif
496                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
497                                                           &tmp_p_addr);
498                         if (tmp_v_addr == NULL) {
499                                 /*
500                                  * In case of failure, free_shared_mem()
501                                  * is called, which should free any
502                                  * memory that was alloced till the
503                                  * failure happened.
504                                  */
505                                 mac_control->rings[i].rx_blocks[j].block_virt_addr =
506                                     tmp_v_addr;
507                                 return -ENOMEM;
508                         }
509                         memset(tmp_v_addr, 0, size);
510                         mac_control->rings[i].rx_blocks[j].block_virt_addr =
511                                 tmp_v_addr;
512                         mac_control->rings[i].rx_blocks[j].block_dma_addr =
513                                 tmp_p_addr;
514                 }
515                 /* Interlinking all Rx Blocks */
516                 for (j = 0; j < blk_cnt; j++) {
517                         tmp_v_addr =
518                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
519                         tmp_v_addr_next =
520                                 mac_control->rings[i].rx_blocks[(j + 1) %
521                                               blk_cnt].block_virt_addr;
522                         tmp_p_addr =
523                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
524                         tmp_p_addr_next =
525                                 mac_control->rings[i].rx_blocks[(j + 1) %
526                                               blk_cnt].block_dma_addr;
527
528                         pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
529                         pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
530                                                                  * marker.
531                                                                  */
532 #ifndef CONFIG_2BUFF_MODE
533                         pre_rxd_blk->reserved_2_pNext_RxD_block =
534                             (unsigned long) tmp_v_addr_next;
535 #endif
536                         pre_rxd_blk->pNext_RxD_Blk_physical =
537                             (u64) tmp_p_addr_next;
538                 }
539         }
540
541 #ifdef CONFIG_2BUFF_MODE
542         /*
543          * Allocation of Storages for buffer addresses in 2BUFF mode
544          * and the buffers as well.
545          */
546         for (i = 0; i < config->rx_ring_num; i++) {
547                 blk_cnt =
548                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
549                 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
550                                      GFP_KERNEL);
551                 if (!mac_control->rings[i].ba)
552                         return -ENOMEM;
553                 for (j = 0; j < blk_cnt; j++) {
554                         int k = 0;
555                         mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
556                                                  (MAX_RXDS_PER_BLOCK + 1)),
557                                                 GFP_KERNEL);
558                         if (!mac_control->rings[i].ba[j])
559                                 return -ENOMEM;
560                         while (k != MAX_RXDS_PER_BLOCK) {
561                                 ba = &mac_control->rings[i].ba[j][k];
562
563                                 ba->ba_0_org = (void *) kmalloc
564                                     (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
565                                 if (!ba->ba_0_org)
566                                         return -ENOMEM;
567                                 tmp = (unsigned long) ba->ba_0_org;
568                                 tmp += ALIGN_SIZE;
569                                 tmp &= ~((unsigned long) ALIGN_SIZE);
570                                 ba->ba_0 = (void *) tmp;
571
572                                 ba->ba_1_org = (void *) kmalloc
573                                     (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
574                                 if (!ba->ba_1_org)
575                                         return -ENOMEM;
576                                 tmp = (unsigned long) ba->ba_1_org;
577                                 tmp += ALIGN_SIZE;
578                                 tmp &= ~((unsigned long) ALIGN_SIZE);
579                                 ba->ba_1 = (void *) tmp;
580                                 k++;
581                         }
582                 }
583         }
584 #endif
585
586         /* Allocation and initialization of Statistics block */
587         size = sizeof(StatInfo_t);
588         mac_control->stats_mem = pci_alloc_consistent
589             (nic->pdev, size, &mac_control->stats_mem_phy);
590
591         if (!mac_control->stats_mem) {
592                 /*
593                  * In case of failure, free_shared_mem() is called, which
594                  * should free any memory that was alloced till the
595                  * failure happened.
596                  */
597                 return -ENOMEM;
598         }
599         mac_control->stats_mem_sz = size;
600
601         tmp_v_addr = mac_control->stats_mem;
602         mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
603         memset(tmp_v_addr, 0, size);
604         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
605                   (unsigned long long) tmp_p_addr);
606
607         return SUCCESS;
608 }
609
610 /**
611  * free_shared_mem - Free the allocated Memory
612  * @nic:  Device private variable.
613  * Description: This function is to free all memory locations allocated by
614  * the init_shared_mem() function and return it to the kernel.
615  */
616
617 static void free_shared_mem(struct s2io_nic *nic)
618 {
619         int i, j, blk_cnt, size;
620         void *tmp_v_addr;
621         dma_addr_t tmp_p_addr;
622         mac_info_t *mac_control;
623         struct config_param *config;
624         int lst_size, lst_per_page;
625         struct net_device *dev = nic->dev;
626
627         if (!nic)
628                 return;
629
630         mac_control = &nic->mac_control;
631         config = &nic->config;
632
633         lst_size = (sizeof(TxD_t) * config->max_txds);
634         lst_per_page = PAGE_SIZE / lst_size;
635
636         for (i = 0; i < config->tx_fifo_num; i++) {
637                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
638                                                 lst_per_page);
639                 for (j = 0; j < page_num; j++) {
640                         int mem_blks = (j * lst_per_page);
641                         if (!mac_control->fifos[i].list_info)
642                                 return; 
643                         if (!mac_control->fifos[i].list_info[mem_blks].
644                                  list_virt_addr)
645                                 break;
646                         pci_free_consistent(nic->pdev, PAGE_SIZE,
647                                             mac_control->fifos[i].
648                                             list_info[mem_blks].
649                                             list_virt_addr,
650                                             mac_control->fifos[i].
651                                             list_info[mem_blks].
652                                             list_phy_addr);
653                 }
654                 /* If we got a zero DMA address during allocation,
655                  * free the page now
656                  */
657                 if (mac_control->zerodma_virt_addr) {
658                         pci_free_consistent(nic->pdev, PAGE_SIZE,
659                                             mac_control->zerodma_virt_addr,
660                                             (dma_addr_t)0);
661                         DBG_PRINT(INIT_DBG, 
662                                 "%s: Freeing TxDL with zero DMA addr. ",
663                                 dev->name);
664                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
665                                 mac_control->zerodma_virt_addr);
666                 }
667                 kfree(mac_control->fifos[i].list_info);
668         }
669
670 #ifndef CONFIG_2BUFF_MODE
671         size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
672 #else
673         size = SIZE_OF_BLOCK;
674 #endif
675         for (i = 0; i < config->rx_ring_num; i++) {
676                 blk_cnt = mac_control->rings[i].block_count;
677                 for (j = 0; j < blk_cnt; j++) {
678                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
679                                 block_virt_addr;
680                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
681                                 block_dma_addr;
682                         if (tmp_v_addr == NULL)
683                                 break;
684                         pci_free_consistent(nic->pdev, size,
685                                             tmp_v_addr, tmp_p_addr);
686                 }
687         }
688
689 #ifdef CONFIG_2BUFF_MODE
690         /* Freeing buffer storage addresses in 2BUFF mode. */
691         for (i = 0; i < config->rx_ring_num; i++) {
692                 blk_cnt =
693                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
694                 for (j = 0; j < blk_cnt; j++) {
695                         int k = 0;
696                         if (!mac_control->rings[i].ba[j])
697                                 continue;
698                         while (k != MAX_RXDS_PER_BLOCK) {
699                                 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
700                                 kfree(ba->ba_0_org);
701                                 kfree(ba->ba_1_org);
702                                 k++;
703                         }
704                         kfree(mac_control->rings[i].ba[j]);
705                 }
706                 if (mac_control->rings[i].ba)
707                         kfree(mac_control->rings[i].ba);
708         }
709 #endif
710
711         if (mac_control->stats_mem) {
712                 pci_free_consistent(nic->pdev,
713                                     mac_control->stats_mem_sz,
714                                     mac_control->stats_mem,
715                                     mac_control->stats_mem_phy);
716         }
717 }
718
719 /**
720  * s2io_verify_pci_mode -
721  */
722
723 static int s2io_verify_pci_mode(nic_t *nic)
724 {
725         XENA_dev_config_t __iomem *bar0 = nic->bar0;
726         register u64 val64 = 0;
727         int     mode;
728
729         val64 = readq(&bar0->pci_mode);
730         mode = (u8)GET_PCI_MODE(val64);
731
732         if ( val64 & PCI_MODE_UNKNOWN_MODE)
733                 return -1;      /* Unknown PCI mode */
734         return mode;
735 }
736
737
738 /**
739  * s2io_print_pci_mode -
740  */
741 static int s2io_print_pci_mode(nic_t *nic)
742 {
743         XENA_dev_config_t __iomem *bar0 = nic->bar0;
744         register u64 val64 = 0;
745         int     mode;
746         struct config_param *config = &nic->config;
747
748         val64 = readq(&bar0->pci_mode);
749         mode = (u8)GET_PCI_MODE(val64);
750
751         if ( val64 & PCI_MODE_UNKNOWN_MODE)
752                 return -1;      /* Unknown PCI mode */
753
754         if (val64 & PCI_MODE_32_BITS) {
755                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
756         } else {
757                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
758         }
759
760         switch(mode) {
761                 case PCI_MODE_PCI_33:
762                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
763                         config->bus_speed = 33;
764                         break;
765                 case PCI_MODE_PCI_66:
766                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
767                         config->bus_speed = 133;
768                         break;
769                 case PCI_MODE_PCIX_M1_66:
770                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
771                         config->bus_speed = 133; /* Herc doubles the clock rate */
772                         break;
773                 case PCI_MODE_PCIX_M1_100:
774                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
775                         config->bus_speed = 200;
776                         break;
777                 case PCI_MODE_PCIX_M1_133:
778                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
779                         config->bus_speed = 266;
780                         break;
781                 case PCI_MODE_PCIX_M2_66:
782                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
783                         config->bus_speed = 133;
784                         break;
785                 case PCI_MODE_PCIX_M2_100:
786                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
787                         config->bus_speed = 200;
788                         break;
789                 case PCI_MODE_PCIX_M2_133:
790                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
791                         config->bus_speed = 266;
792                         break;
793                 default:
794                         return -1;      /* Unsupported bus speed */
795         }
796
797         return mode;
798 }
799
800 /**
801  *  init_nic - Initialization of hardware
802  *  @nic: device peivate variable
803  *  Description: The function sequentially configures every block
804  *  of the H/W from their reset values.
805  *  Return Value:  SUCCESS on success and
806  *  '-1' on failure (endian settings incorrect).
807  */
808
809 static int init_nic(struct s2io_nic *nic)
810 {
811         XENA_dev_config_t __iomem *bar0 = nic->bar0;
812         struct net_device *dev = nic->dev;
813         register u64 val64 = 0;
814         void __iomem *add;
815         u32 time;
816         int i, j;
817         mac_info_t *mac_control;
818         struct config_param *config;
819         int mdio_cnt = 0, dtx_cnt = 0;
820         unsigned long long mem_share;
821         int mem_size;
822
823         mac_control = &nic->mac_control;
824         config = &nic->config;
825
826         /* to set the swapper controle on the card */
827         if(s2io_set_swapper(nic)) {
828                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
829                 return -1;
830         }
831
832         /*
833          * Herc requires EOI to be removed from reset before XGXS, so..
834          */
835         if (nic->device_type & XFRAME_II_DEVICE) {
836                 val64 = 0xA500000000ULL;
837                 writeq(val64, &bar0->sw_reset);
838                 msleep(500);
839                 val64 = readq(&bar0->sw_reset);
840         }
841
842         /* Remove XGXS from reset state */
843         val64 = 0;
844         writeq(val64, &bar0->sw_reset);
845         msleep(500);
846         val64 = readq(&bar0->sw_reset);
847
848         /*  Enable Receiving broadcasts */
849         add = &bar0->mac_cfg;
850         val64 = readq(&bar0->mac_cfg);
851         val64 |= MAC_RMAC_BCAST_ENABLE;
852         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
853         writel((u32) val64, add);
854         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
855         writel((u32) (val64 >> 32), (add + 4));
856
857         /* Read registers in all blocks */
858         val64 = readq(&bar0->mac_int_mask);
859         val64 = readq(&bar0->mc_int_mask);
860         val64 = readq(&bar0->xgxs_int_mask);
861
862         /*  Set MTU */
863         val64 = dev->mtu;
864         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
865
866         /*
867          * Configuring the XAUI Interface of Xena.
868          * ***************************************
869          * To Configure the Xena's XAUI, one has to write a series
870          * of 64 bit values into two registers in a particular
871          * sequence. Hence a macro 'SWITCH_SIGN' has been defined
872          * which will be defined in the array of configuration values
873          * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
874          * to switch writing from one regsiter to another. We continue
875          * writing these values until we encounter the 'END_SIGN' macro.
876          * For example, After making a series of 21 writes into
877          * dtx_control register the 'SWITCH_SIGN' appears and hence we
878          * start writing into mdio_control until we encounter END_SIGN.
879          */
880         if (nic->device_type & XFRAME_II_DEVICE) {
881                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
882                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
883                                           &bar0->dtx_control, UF);
884                         if (dtx_cnt & 0x1)
885                                 msleep(1); /* Necessary!! */
886                         dtx_cnt++;
887                 }
888         } else {
889                 while (1) {
890                       dtx_cfg:
891                         while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
892                                 if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
893                                         dtx_cnt++;
894                                         goto mdio_cfg;
895                                 }
896                                 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
897                                                   &bar0->dtx_control, UF);
898                                 val64 = readq(&bar0->dtx_control);
899                                 dtx_cnt++;
900                         }
901                       mdio_cfg:
902                         while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
903                                 if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
904                                         mdio_cnt++;
905                                         goto dtx_cfg;
906                                 }
907                                 SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
908                                                   &bar0->mdio_control, UF);
909                                 val64 = readq(&bar0->mdio_control);
910                                 mdio_cnt++;
911                         }
912                         if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
913                             (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
914                                 break;
915                         } else {
916                                 goto dtx_cfg;
917                         }
918                 }
919         }
920
921         /*  Tx DMA Initialization */
922         val64 = 0;
923         writeq(val64, &bar0->tx_fifo_partition_0);
924         writeq(val64, &bar0->tx_fifo_partition_1);
925         writeq(val64, &bar0->tx_fifo_partition_2);
926         writeq(val64, &bar0->tx_fifo_partition_3);
927
928
929         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
930                 val64 |=
931                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
932                          13) | vBIT(config->tx_cfg[i].fifo_priority,
933                                     ((i * 32) + 5), 3);
934
935                 if (i == (config->tx_fifo_num - 1)) {
936                         if (i % 2 == 0)
937                                 i++;
938                 }
939
940                 switch (i) {
941                 case 1:
942                         writeq(val64, &bar0->tx_fifo_partition_0);
943                         val64 = 0;
944                         break;
945                 case 3:
946                         writeq(val64, &bar0->tx_fifo_partition_1);
947                         val64 = 0;
948                         break;
949                 case 5:
950                         writeq(val64, &bar0->tx_fifo_partition_2);
951                         val64 = 0;
952                         break;
953                 case 7:
954                         writeq(val64, &bar0->tx_fifo_partition_3);
955                         break;
956                 }
957         }
958
959         /* Enable Tx FIFO partition 0. */
960         val64 = readq(&bar0->tx_fifo_partition_0);
961         val64 |= BIT(0);        /* To enable the FIFO partition. */
962         writeq(val64, &bar0->tx_fifo_partition_0);
963
964         /*
965          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
966          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
967          */
968         if ((nic->device_type == XFRAME_I_DEVICE) &&
969                 (get_xena_rev_id(nic->pdev) < 4))
970                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
971
972         val64 = readq(&bar0->tx_fifo_partition_0);
973         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
974                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
975
976         /*
977          * Initialization of Tx_PA_CONFIG register to ignore packet
978          * integrity checking.
979          */
980         val64 = readq(&bar0->tx_pa_cfg);
981         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
982             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
983         writeq(val64, &bar0->tx_pa_cfg);
984
985         /* Rx DMA intialization. */
986         val64 = 0;
987         for (i = 0; i < config->rx_ring_num; i++) {
988                 val64 |=
989                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
990                          3);
991         }
992         writeq(val64, &bar0->rx_queue_priority);
993
994         /*
995          * Allocating equal share of memory to all the
996          * configured Rings.
997          */
998         val64 = 0;
999         if (nic->device_type & XFRAME_II_DEVICE)
1000                 mem_size = 32;
1001         else
1002                 mem_size = 64;
1003
1004         for (i = 0; i < config->rx_ring_num; i++) {
1005                 switch (i) {
1006                 case 0:
1007                         mem_share = (mem_size / config->rx_ring_num +
1008                                      mem_size % config->rx_ring_num);
1009                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1010                         continue;
1011                 case 1:
1012                         mem_share = (mem_size / config->rx_ring_num);
1013                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1014                         continue;
1015                 case 2:
1016                         mem_share = (mem_size / config->rx_ring_num);
1017                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1018                         continue;
1019                 case 3:
1020                         mem_share = (mem_size / config->rx_ring_num);
1021                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1022                         continue;
1023                 case 4:
1024                         mem_share = (mem_size / config->rx_ring_num);
1025                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1026                         continue;
1027                 case 5:
1028                         mem_share = (mem_size / config->rx_ring_num);
1029                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1030                         continue;
1031                 case 6:
1032                         mem_share = (mem_size / config->rx_ring_num);
1033                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1034                         continue;
1035                 case 7:
1036                         mem_share = (mem_size / config->rx_ring_num);
1037                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1038                         continue;
1039                 }
1040         }
1041         writeq(val64, &bar0->rx_queue_cfg);
1042
1043         /*
1044          * Filling Tx round robin registers
1045          * as per the number of FIFOs
1046          */
1047         switch (config->tx_fifo_num) {
1048         case 1:
1049                 val64 = 0x0000000000000000ULL;
1050                 writeq(val64, &bar0->tx_w_round_robin_0);
1051                 writeq(val64, &bar0->tx_w_round_robin_1);
1052                 writeq(val64, &bar0->tx_w_round_robin_2);
1053                 writeq(val64, &bar0->tx_w_round_robin_3);
1054                 writeq(val64, &bar0->tx_w_round_robin_4);
1055                 break;
1056         case 2:
1057                 val64 = 0x0000010000010000ULL;
1058                 writeq(val64, &bar0->tx_w_round_robin_0);
1059                 val64 = 0x0100000100000100ULL;
1060                 writeq(val64, &bar0->tx_w_round_robin_1);
1061                 val64 = 0x0001000001000001ULL;
1062                 writeq(val64, &bar0->tx_w_round_robin_2);
1063                 val64 = 0x0000010000010000ULL;
1064                 writeq(val64, &bar0->tx_w_round_robin_3);
1065                 val64 = 0x0100000000000000ULL;
1066                 writeq(val64, &bar0->tx_w_round_robin_4);
1067                 break;
1068         case 3:
1069                 val64 = 0x0001000102000001ULL;
1070                 writeq(val64, &bar0->tx_w_round_robin_0);
1071                 val64 = 0x0001020000010001ULL;
1072                 writeq(val64, &bar0->tx_w_round_robin_1);
1073                 val64 = 0x0200000100010200ULL;
1074                 writeq(val64, &bar0->tx_w_round_robin_2);
1075                 val64 = 0x0001000102000001ULL;
1076                 writeq(val64, &bar0->tx_w_round_robin_3);
1077                 val64 = 0x0001020000000000ULL;
1078                 writeq(val64, &bar0->tx_w_round_robin_4);
1079                 break;
1080         case 4:
1081                 val64 = 0x0001020300010200ULL;
1082                 writeq(val64, &bar0->tx_w_round_robin_0);
1083                 val64 = 0x0100000102030001ULL;
1084                 writeq(val64, &bar0->tx_w_round_robin_1);
1085                 val64 = 0x0200010000010203ULL;
1086                 writeq(val64, &bar0->tx_w_round_robin_2);
1087                 val64 = 0x0001020001000001ULL;
1088                 writeq(val64, &bar0->tx_w_round_robin_3);
1089                 val64 = 0x0203000100000000ULL;
1090                 writeq(val64, &bar0->tx_w_round_robin_4);
1091                 break;
1092         case 5:
1093                 val64 = 0x0001000203000102ULL;
1094                 writeq(val64, &bar0->tx_w_round_robin_0);
1095                 val64 = 0x0001020001030004ULL;
1096                 writeq(val64, &bar0->tx_w_round_robin_1);
1097                 val64 = 0x0001000203000102ULL;
1098                 writeq(val64, &bar0->tx_w_round_robin_2);
1099                 val64 = 0x0001020001030004ULL;
1100                 writeq(val64, &bar0->tx_w_round_robin_3);
1101                 val64 = 0x0001000000000000ULL;
1102                 writeq(val64, &bar0->tx_w_round_robin_4);
1103                 break;
1104         case 6:
1105                 val64 = 0x0001020304000102ULL;
1106                 writeq(val64, &bar0->tx_w_round_robin_0);
1107                 val64 = 0x0304050001020001ULL;
1108                 writeq(val64, &bar0->tx_w_round_robin_1);
1109                 val64 = 0x0203000100000102ULL;
1110                 writeq(val64, &bar0->tx_w_round_robin_2);
1111                 val64 = 0x0304000102030405ULL;
1112                 writeq(val64, &bar0->tx_w_round_robin_3);
1113                 val64 = 0x0001000200000000ULL;
1114                 writeq(val64, &bar0->tx_w_round_robin_4);
1115                 break;
1116         case 7:
1117                 val64 = 0x0001020001020300ULL;
1118                 writeq(val64, &bar0->tx_w_round_robin_0);
1119                 val64 = 0x0102030400010203ULL;
1120                 writeq(val64, &bar0->tx_w_round_robin_1);
1121                 val64 = 0x0405060001020001ULL;
1122                 writeq(val64, &bar0->tx_w_round_robin_2);
1123                 val64 = 0x0304050000010200ULL;
1124                 writeq(val64, &bar0->tx_w_round_robin_3);
1125                 val64 = 0x0102030000000000ULL;
1126                 writeq(val64, &bar0->tx_w_round_robin_4);
1127                 break;
1128         case 8:
1129                 val64 = 0x0001020300040105ULL;
1130                 writeq(val64, &bar0->tx_w_round_robin_0);
1131                 val64 = 0x0200030106000204ULL;
1132                 writeq(val64, &bar0->tx_w_round_robin_1);
1133                 val64 = 0x0103000502010007ULL;
1134                 writeq(val64, &bar0->tx_w_round_robin_2);
1135                 val64 = 0x0304010002060500ULL;
1136                 writeq(val64, &bar0->tx_w_round_robin_3);
1137                 val64 = 0x0103020400000000ULL;
1138                 writeq(val64, &bar0->tx_w_round_robin_4);
1139                 break;
1140         }
1141
1142         /* Filling the Rx round robin registers as per the
1143          * number of Rings and steering based on QoS.
1144          */
1145         switch (config->rx_ring_num) {
1146         case 1:
1147                 val64 = 0x8080808080808080ULL;
1148                 writeq(val64, &bar0->rts_qos_steering);
1149                 break;
1150         case 2:
1151                 val64 = 0x0000010000010000ULL;
1152                 writeq(val64, &bar0->rx_w_round_robin_0);
1153                 val64 = 0x0100000100000100ULL;
1154                 writeq(val64, &bar0->rx_w_round_robin_1);
1155                 val64 = 0x0001000001000001ULL;
1156                 writeq(val64, &bar0->rx_w_round_robin_2);
1157                 val64 = 0x0000010000010000ULL;
1158                 writeq(val64, &bar0->rx_w_round_robin_3);
1159                 val64 = 0x0100000000000000ULL;
1160                 writeq(val64, &bar0->rx_w_round_robin_4);
1161
1162                 val64 = 0x8080808040404040ULL;
1163                 writeq(val64, &bar0->rts_qos_steering);
1164                 break;
1165         case 3:
1166                 val64 = 0x0001000102000001ULL;
1167                 writeq(val64, &bar0->rx_w_round_robin_0);
1168                 val64 = 0x0001020000010001ULL;
1169                 writeq(val64, &bar0->rx_w_round_robin_1);
1170                 val64 = 0x0200000100010200ULL;
1171                 writeq(val64, &bar0->rx_w_round_robin_2);
1172                 val64 = 0x0001000102000001ULL;
1173                 writeq(val64, &bar0->rx_w_round_robin_3);
1174                 val64 = 0x0001020000000000ULL;
1175                 writeq(val64, &bar0->rx_w_round_robin_4);
1176
1177                 val64 = 0x8080804040402020ULL;
1178                 writeq(val64, &bar0->rts_qos_steering);
1179                 break;
1180         case 4:
1181                 val64 = 0x0001020300010200ULL;
1182                 writeq(val64, &bar0->rx_w_round_robin_0);
1183                 val64 = 0x0100000102030001ULL;
1184                 writeq(val64, &bar0->rx_w_round_robin_1);
1185                 val64 = 0x0200010000010203ULL;
1186                 writeq(val64, &bar0->rx_w_round_robin_2);
1187                 val64 = 0x0001020001000001ULL;  
1188                 writeq(val64, &bar0->rx_w_round_robin_3);
1189                 val64 = 0x0203000100000000ULL;
1190                 writeq(val64, &bar0->rx_w_round_robin_4);
1191
1192                 val64 = 0x8080404020201010ULL;
1193                 writeq(val64, &bar0->rts_qos_steering);
1194                 break;
1195         case 5:
1196                 val64 = 0x0001000203000102ULL;
1197                 writeq(val64, &bar0->rx_w_round_robin_0);
1198                 val64 = 0x0001020001030004ULL;
1199                 writeq(val64, &bar0->rx_w_round_robin_1);
1200                 val64 = 0x0001000203000102ULL;
1201                 writeq(val64, &bar0->rx_w_round_robin_2);
1202                 val64 = 0x0001020001030004ULL;
1203                 writeq(val64, &bar0->rx_w_round_robin_3);
1204                 val64 = 0x0001000000000000ULL;
1205                 writeq(val64, &bar0->rx_w_round_robin_4);
1206
1207                 val64 = 0x8080404020201008ULL;
1208                 writeq(val64, &bar0->rts_qos_steering);
1209                 break;
1210         case 6:
1211                 val64 = 0x0001020304000102ULL;
1212                 writeq(val64, &bar0->rx_w_round_robin_0);
1213                 val64 = 0x0304050001020001ULL;
1214                 writeq(val64, &bar0->rx_w_round_robin_1);
1215                 val64 = 0x0203000100000102ULL;
1216                 writeq(val64, &bar0->rx_w_round_robin_2);
1217                 val64 = 0x0304000102030405ULL;
1218                 writeq(val64, &bar0->rx_w_round_robin_3);
1219                 val64 = 0x0001000200000000ULL;
1220                 writeq(val64, &bar0->rx_w_round_robin_4);
1221
1222                 val64 = 0x8080404020100804ULL;
1223                 writeq(val64, &bar0->rts_qos_steering);
1224                 break;
1225         case 7:
1226                 val64 = 0x0001020001020300ULL;
1227                 writeq(val64, &bar0->rx_w_round_robin_0);
1228                 val64 = 0x0102030400010203ULL;
1229                 writeq(val64, &bar0->rx_w_round_robin_1);
1230                 val64 = 0x0405060001020001ULL;
1231                 writeq(val64, &bar0->rx_w_round_robin_2);
1232                 val64 = 0x0304050000010200ULL;
1233                 writeq(val64, &bar0->rx_w_round_robin_3);
1234                 val64 = 0x0102030000000000ULL;
1235                 writeq(val64, &bar0->rx_w_round_robin_4);
1236
1237                 val64 = 0x8080402010080402ULL;
1238                 writeq(val64, &bar0->rts_qos_steering);
1239                 break;
1240         case 8:
1241                 val64 = 0x0001020300040105ULL;
1242                 writeq(val64, &bar0->rx_w_round_robin_0);
1243                 val64 = 0x0200030106000204ULL;
1244                 writeq(val64, &bar0->rx_w_round_robin_1);
1245                 val64 = 0x0103000502010007ULL;
1246                 writeq(val64, &bar0->rx_w_round_robin_2);
1247                 val64 = 0x0304010002060500ULL;
1248                 writeq(val64, &bar0->rx_w_round_robin_3);
1249                 val64 = 0x0103020400000000ULL;
1250                 writeq(val64, &bar0->rx_w_round_robin_4);
1251
1252                 val64 = 0x8040201008040201ULL;
1253                 writeq(val64, &bar0->rts_qos_steering);
1254                 break;
1255         }
1256
1257         /* UDP Fix */
1258         val64 = 0;
1259         for (i = 0; i < 8; i++)
1260                 writeq(val64, &bar0->rts_frm_len_n[i]);
1261
1262         /* Set the default rts frame length for the rings configured */
1263         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1264         for (i = 0 ; i < config->rx_ring_num ; i++)
1265                 writeq(val64, &bar0->rts_frm_len_n[i]);
1266
1267         /* Set the frame length for the configured rings
1268          * desired by the user
1269          */
1270         for (i = 0; i < config->rx_ring_num; i++) {
1271                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1272                  * specified frame length steering.
1273                  * If the user provides the frame length then program
1274                  * the rts_frm_len register for those values or else
1275                  * leave it as it is.
1276                  */
1277                 if (rts_frm_len[i] != 0) {
1278                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1279                                 &bar0->rts_frm_len_n[i]);
1280                 }
1281         }
1282
1283         /* Program statistics memory */
1284         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1285
1286         if (nic->device_type == XFRAME_II_DEVICE) {
1287                 val64 = STAT_BC(0x320);
1288                 writeq(val64, &bar0->stat_byte_cnt);
1289         }
1290
1291         /*
1292          * Initializing the sampling rate for the device to calculate the
1293          * bandwidth utilization.
1294          */
1295         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1296             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1297         writeq(val64, &bar0->mac_link_util);
1298
1299
1300         /*
1301          * Initializing the Transmit and Receive Traffic Interrupt
1302          * Scheme.
1303          */
1304         /*
1305          * TTI Initialization. Default Tx timer gets us about
1306          * 250 interrupts per sec. Continuous interrupts are enabled
1307          * by default.
1308          */
1309         if (nic->device_type == XFRAME_II_DEVICE) {
1310                 int count = (nic->config.bus_speed * 125)/2;
1311                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1312         } else {
1313
1314                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1315         }
1316         val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1317             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1318             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1319                 if (use_continuous_tx_intrs)
1320                         val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1321         writeq(val64, &bar0->tti_data1_mem);
1322
1323         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1324             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1325             TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1326         writeq(val64, &bar0->tti_data2_mem);
1327
1328         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1329         writeq(val64, &bar0->tti_command_mem);
1330
1331         /*
1332          * Once the operation completes, the Strobe bit of the command
1333          * register will be reset. We poll for this particular condition
1334          * We wait for a maximum of 500ms for the operation to complete,
1335          * if it's not complete by then we return error.
1336          */
1337         time = 0;
1338         while (TRUE) {
1339                 val64 = readq(&bar0->tti_command_mem);
1340                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1341                         break;
1342                 }
1343                 if (time > 10) {
1344                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1345                                   dev->name);
1346                         return -1;
1347                 }
1348                 msleep(50);
1349                 time++;
1350         }
1351
1352         if (nic->config.bimodal) {
1353                 int k = 0;
1354                 for (k = 0; k < config->rx_ring_num; k++) {
1355                         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1356                         val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1357                         writeq(val64, &bar0->tti_command_mem);
1358
1359                 /*
1360                  * Once the operation completes, the Strobe bit of the command
1361                  * register will be reset. We poll for this particular condition
1362                  * We wait for a maximum of 500ms for the operation to complete,
1363                  * if it's not complete by then we return error.
1364                 */
1365                         time = 0;
1366                         while (TRUE) {
1367                                 val64 = readq(&bar0->tti_command_mem);
1368                                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1369                                         break;
1370                                 }
1371                                 if (time > 10) {
1372                                         DBG_PRINT(ERR_DBG,
1373                                                 "%s: TTI init Failed\n",
1374                                         dev->name);
1375                                         return -1;
1376                                 }
1377                                 time++;
1378                                 msleep(50);
1379                         }
1380                 }
1381         } else {
1382
1383                 /* RTI Initialization */
1384                 if (nic->device_type == XFRAME_II_DEVICE) {
1385                         /*
1386                          * Programmed to generate Apprx 500 Intrs per
1387                          * second
1388                          */
1389                         int count = (nic->config.bus_speed * 125)/4;
1390                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1391                 } else {
1392                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1393                 }
1394                 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1395                     RTI_DATA1_MEM_RX_URNG_B(0x10) |
1396                     RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1397
1398                 writeq(val64, &bar0->rti_data1_mem);
1399
1400                 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1401                     RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1402                 if (nic->intr_type == MSI_X)
1403                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1404                                 RTI_DATA2_MEM_RX_UFC_D(0x40));
1405                 else
1406                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1407                                 RTI_DATA2_MEM_RX_UFC_D(0x80));
1408                 writeq(val64, &bar0->rti_data2_mem);
1409
1410                 for (i = 0; i < config->rx_ring_num; i++) {
1411                         val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1412                                         | RTI_CMD_MEM_OFFSET(i);
1413                         writeq(val64, &bar0->rti_command_mem);
1414
1415                         /*
1416                          * Once the operation completes, the Strobe bit of the
1417                          * command register will be reset. We poll for this
1418                          * particular condition. We wait for a maximum of 500ms
1419                          * for the operation to complete, if it's not complete
1420                          * by then we return error.
1421                          */
1422                         time = 0;
1423                         while (TRUE) {
1424                                 val64 = readq(&bar0->rti_command_mem);
1425                                 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1426                                         break;
1427                                 }
1428                                 if (time > 10) {
1429                                         DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1430                                                   dev->name);
1431                                         return -1;
1432                                 }
1433                                 time++;
1434                                 msleep(50);
1435                         }
1436                 }
1437         }
1438
1439         /*
1440          * Initializing proper values as Pause threshold into all
1441          * the 8 Queues on Rx side.
1442          */
1443         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1444         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1445
1446         /* Disable RMAC PAD STRIPPING */
1447         add = &bar0->mac_cfg;
1448         val64 = readq(&bar0->mac_cfg);
1449         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1450         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1451         writel((u32) (val64), add);
1452         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1453         writel((u32) (val64 >> 32), (add + 4));
1454         val64 = readq(&bar0->mac_cfg);
1455
1456         /*
1457          * Set the time value to be inserted in the pause frame
1458          * generated by xena.
1459          */
1460         val64 = readq(&bar0->rmac_pause_cfg);
1461         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1462         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1463         writeq(val64, &bar0->rmac_pause_cfg);
1464
1465         /*
1466          * Set the Threshold Limit for Generating the pause frame
1467          * If the amount of data in any Queue exceeds ratio of
1468          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1469          * pause frame is generated
1470          */
1471         val64 = 0;
1472         for (i = 0; i < 4; i++) {
1473                 val64 |=
1474                     (((u64) 0xFF00 | nic->mac_control.
1475                       mc_pause_threshold_q0q3)
1476                      << (i * 2 * 8));
1477         }
1478         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1479
1480         val64 = 0;
1481         for (i = 0; i < 4; i++) {
1482                 val64 |=
1483                     (((u64) 0xFF00 | nic->mac_control.
1484                       mc_pause_threshold_q4q7)
1485                      << (i * 2 * 8));
1486         }
1487         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1488
1489         /*
1490          * TxDMA will stop Read request if the number of read split has
1491          * exceeded the limit pointed by shared_splits
1492          */
1493         val64 = readq(&bar0->pic_control);
1494         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1495         writeq(val64, &bar0->pic_control);
1496
1497         /*
1498          * Programming the Herc to split every write transaction
1499          * that does not start on an ADB to reduce disconnects.
1500          */
1501         if (nic->device_type == XFRAME_II_DEVICE) {
1502                 val64 = WREQ_SPLIT_MASK_SET_MASK(255);
1503                 writeq(val64, &bar0->wreq_split_mask);
1504         }
1505
1506         /* Setting Link stability period to 64 ms */ 
1507         if (nic->device_type == XFRAME_II_DEVICE) {
1508                 val64 = MISC_LINK_STABILITY_PRD(3);
1509                 writeq(val64, &bar0->misc_control);
1510         }
1511
1512         return SUCCESS;
1513 }
1514 #define LINK_UP_DOWN_INTERRUPT          1
1515 #define MAC_RMAC_ERR_TIMER              2
1516
1517 int s2io_link_fault_indication(nic_t *nic)
1518 {
1519         if (nic->intr_type != INTA)
1520                 return MAC_RMAC_ERR_TIMER;
1521         if (nic->device_type == XFRAME_II_DEVICE)
1522                 return LINK_UP_DOWN_INTERRUPT;
1523         else
1524                 return MAC_RMAC_ERR_TIMER;
1525 }
1526
1527 /**
1528  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1529  *  @nic: device private variable,
1530  *  @mask: A mask indicating which Intr block must be modified and,
1531  *  @flag: A flag indicating whether to enable or disable the Intrs.
1532  *  Description: This function will either disable or enable the interrupts
1533  *  depending on the flag argument. The mask argument can be used to
1534  *  enable/disable any Intr block.
1535  *  Return Value: NONE.
1536  */
1537
1538 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1539 {
1540         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1541         register u64 val64 = 0, temp64 = 0;
1542
1543         /*  Top level interrupt classification */
1544         /*  PIC Interrupts */
1545         if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1546                 /*  Enable PIC Intrs in the general intr mask register */
1547                 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1548                 if (flag == ENABLE_INTRS) {
1549                         temp64 = readq(&bar0->general_int_mask);
1550                         temp64 &= ~((u64) val64);
1551                         writeq(temp64, &bar0->general_int_mask);
1552                         /*
1553                          * If Hercules adapter enable GPIO otherwise
1554                          * disabled all PCIX, Flash, MDIO, IIC and GPIO
1555                          * interrupts for now.
1556                          * TODO
1557                          */
1558                         if (s2io_link_fault_indication(nic) ==
1559                                         LINK_UP_DOWN_INTERRUPT ) {
1560                                 temp64 = readq(&bar0->pic_int_mask);
1561                                 temp64 &= ~((u64) PIC_INT_GPIO);
1562                                 writeq(temp64, &bar0->pic_int_mask);
1563                                 temp64 = readq(&bar0->gpio_int_mask);
1564                                 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1565                                 writeq(temp64, &bar0->gpio_int_mask);
1566                         } else {
1567                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1568                         }
1569                         /*
1570                          * No MSI Support is available presently, so TTI and
1571                          * RTI interrupts are also disabled.
1572                          */
1573                 } else if (flag == DISABLE_INTRS) {
1574                         /*
1575                          * Disable PIC Intrs in the general
1576                          * intr mask register
1577                          */
1578                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1579                         temp64 = readq(&bar0->general_int_mask);
1580                         val64 |= temp64;
1581                         writeq(val64, &bar0->general_int_mask);
1582                 }
1583         }
1584
1585         /*  DMA Interrupts */
1586         /*  Enabling/Disabling Tx DMA interrupts */
1587         if (mask & TX_DMA_INTR) {
1588                 /* Enable TxDMA Intrs in the general intr mask register */
1589                 val64 = TXDMA_INT_M;
1590                 if (flag == ENABLE_INTRS) {
1591                         temp64 = readq(&bar0->general_int_mask);
1592                         temp64 &= ~((u64) val64);
1593                         writeq(temp64, &bar0->general_int_mask);
1594                         /*
1595                          * Keep all interrupts other than PFC interrupt
1596                          * and PCC interrupt disabled in DMA level.
1597                          */
1598                         val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1599                                                       TXDMA_PCC_INT_M);
1600                         writeq(val64, &bar0->txdma_int_mask);
1601                         /*
1602                          * Enable only the MISC error 1 interrupt in PFC block
1603                          */
1604                         val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1605                         writeq(val64, &bar0->pfc_err_mask);
1606                         /*
1607                          * Enable only the FB_ECC error interrupt in PCC block
1608                          */
1609                         val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1610                         writeq(val64, &bar0->pcc_err_mask);
1611                 } else if (flag == DISABLE_INTRS) {
1612                         /*
1613                          * Disable TxDMA Intrs in the general intr mask
1614                          * register
1615                          */
1616                         writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1617                         writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1618                         temp64 = readq(&bar0->general_int_mask);
1619                         val64 |= temp64;
1620                         writeq(val64, &bar0->general_int_mask);
1621                 }
1622         }
1623
1624         /*  Enabling/Disabling Rx DMA interrupts */
1625         if (mask & RX_DMA_INTR) {
1626                 /*  Enable RxDMA Intrs in the general intr mask register */
1627                 val64 = RXDMA_INT_M;
1628                 if (flag == ENABLE_INTRS) {
1629                         temp64 = readq(&bar0->general_int_mask);
1630                         temp64 &= ~((u64) val64);
1631                         writeq(temp64, &bar0->general_int_mask);
1632                         /*
1633                          * All RxDMA block interrupts are disabled for now
1634                          * TODO
1635                          */
1636                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1637                 } else if (flag == DISABLE_INTRS) {
1638                         /*
1639                          * Disable RxDMA Intrs in the general intr mask
1640                          * register
1641                          */
1642                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1643                         temp64 = readq(&bar0->general_int_mask);
1644                         val64 |= temp64;
1645                         writeq(val64, &bar0->general_int_mask);
1646                 }
1647         }
1648
1649         /*  MAC Interrupts */
1650         /*  Enabling/Disabling MAC interrupts */
1651         if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1652                 val64 = TXMAC_INT_M | RXMAC_INT_M;
1653                 if (flag == ENABLE_INTRS) {
1654                         temp64 = readq(&bar0->general_int_mask);
1655                         temp64 &= ~((u64) val64);
1656                         writeq(temp64, &bar0->general_int_mask);
1657                         /*
1658                          * All MAC block error interrupts are disabled for now
1659                          * TODO
1660                          */
1661                 } else if (flag == DISABLE_INTRS) {
1662                         /*
1663                          * Disable MAC Intrs in the general intr mask register
1664                          */
1665                         writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1666                         writeq(DISABLE_ALL_INTRS,
1667                                &bar0->mac_rmac_err_mask);
1668
1669                         temp64 = readq(&bar0->general_int_mask);
1670                         val64 |= temp64;
1671                         writeq(val64, &bar0->general_int_mask);
1672                 }
1673         }
1674
1675         /*  XGXS Interrupts */
1676         if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1677                 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1678                 if (flag == ENABLE_INTRS) {
1679                         temp64 = readq(&bar0->general_int_mask);
1680                         temp64 &= ~((u64) val64);
1681                         writeq(temp64, &bar0->general_int_mask);
1682                         /*
1683                          * All XGXS block error interrupts are disabled for now
1684                          * TODO
1685                          */
1686                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1687                 } else if (flag == DISABLE_INTRS) {
1688                         /*
1689                          * Disable MC Intrs in the general intr mask register
1690                          */
1691                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1692                         temp64 = readq(&bar0->general_int_mask);
1693                         val64 |= temp64;
1694                         writeq(val64, &bar0->general_int_mask);
1695                 }
1696         }
1697
1698         /*  Memory Controller(MC) interrupts */
1699         if (mask & MC_INTR) {
1700                 val64 = MC_INT_M;
1701                 if (flag == ENABLE_INTRS) {
1702                         temp64 = readq(&bar0->general_int_mask);
1703                         temp64 &= ~((u64) val64);
1704                         writeq(temp64, &bar0->general_int_mask);
1705                         /*
1706                          * Enable all MC Intrs.
1707                          */
1708                         writeq(0x0, &bar0->mc_int_mask);
1709                         writeq(0x0, &bar0->mc_err_mask);
1710                 } else if (flag == DISABLE_INTRS) {
1711                         /*
1712                          * Disable MC Intrs in the general intr mask register
1713                          */
1714                         writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1715                         temp64 = readq(&bar0->general_int_mask);
1716                         val64 |= temp64;
1717                         writeq(val64, &bar0->general_int_mask);
1718                 }
1719         }
1720
1721
1722         /*  Tx traffic interrupts */
1723         if (mask & TX_TRAFFIC_INTR) {
1724                 val64 = TXTRAFFIC_INT_M;
1725                 if (flag == ENABLE_INTRS) {
1726                         temp64 = readq(&bar0->general_int_mask);
1727                         temp64 &= ~((u64) val64);
1728                         writeq(temp64, &bar0->general_int_mask);
1729                         /*
1730                          * Enable all the Tx side interrupts
1731                          * writing 0 Enables all 64 TX interrupt levels
1732                          */
1733                         writeq(0x0, &bar0->tx_traffic_mask);
1734                 } else if (flag == DISABLE_INTRS) {
1735                         /*
1736                          * Disable Tx Traffic Intrs in the general intr mask
1737                          * register.
1738                          */
1739                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1740                         temp64 = readq(&bar0->general_int_mask);
1741                         val64 |= temp64;
1742                         writeq(val64, &bar0->general_int_mask);
1743                 }
1744         }
1745
1746         /*  Rx traffic interrupts */
1747         if (mask & RX_TRAFFIC_INTR) {
1748                 val64 = RXTRAFFIC_INT_M;
1749                 if (flag == ENABLE_INTRS) {
1750                         temp64 = readq(&bar0->general_int_mask);
1751                         temp64 &= ~((u64) val64);
1752                         writeq(temp64, &bar0->general_int_mask);
1753                         /* writing 0 Enables all 8 RX interrupt levels */
1754                         writeq(0x0, &bar0->rx_traffic_mask);
1755                 } else if (flag == DISABLE_INTRS) {
1756                         /*
1757                          * Disable Rx Traffic Intrs in the general intr mask
1758                          * register.
1759                          */
1760                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1761                         temp64 = readq(&bar0->general_int_mask);
1762                         val64 |= temp64;
1763                         writeq(val64, &bar0->general_int_mask);
1764                 }
1765         }
1766 }
1767
1768 static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1769 {
1770         int ret = 0;
1771
1772         if (flag == FALSE) {
1773                 if ((!herc && (rev_id >= 4)) || herc) {
1774                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1775                             ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1776                              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1777                                 ret = 1;
1778                         }
1779                 }else {
1780                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1781                             ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1782                              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1783                                 ret = 1;
1784                         }
1785                 }
1786         } else {
1787                 if ((!herc && (rev_id >= 4)) || herc) {
1788                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1789                              ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1790                             (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1791                              ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1792                               ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1793                                 ret = 1;
1794                         }
1795                 } else {
1796                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1797                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1798                             (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1799                              ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1800                               ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1801                                 ret = 1;
1802                         }
1803                 }
1804         }
1805
1806         return ret;
1807 }
1808 /**
1809  *  verify_xena_quiescence - Checks whether the H/W is ready
1810  *  @val64 :  Value read from adapter status register.
1811  *  @flag : indicates if the adapter enable bit was ever written once
1812  *  before.
1813  *  Description: Returns whether the H/W is ready to go or not. Depending
1814  *  on whether adapter enable bit was written or not the comparison
1815  *  differs and the calling function passes the input argument flag to
1816  *  indicate this.
1817  *  Return: 1 If xena is quiescence
1818  *          0 If Xena is not quiescence
1819  */
1820
1821 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1822 {
1823         int ret = 0, herc;
1824         u64 tmp64 = ~((u64) val64);
1825         int rev_id = get_xena_rev_id(sp->pdev);
1826
1827         herc = (sp->device_type == XFRAME_II_DEVICE);
1828         if (!
1829             (tmp64 &
1830              (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1831               ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1832               ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1833               ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1834               ADAPTER_STATUS_P_PLL_LOCK))) {
1835                 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1836         }
1837
1838         return ret;
1839 }
1840
1841 /**
1842  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
1843  * @sp: Pointer to device specifc structure
1844  * Description :
1845  * New procedure to clear mac address reading  problems on Alpha platforms
1846  *
1847  */
1848
1849 void fix_mac_address(nic_t * sp)
1850 {
1851         XENA_dev_config_t __iomem *bar0 = sp->bar0;
1852         u64 val64;
1853         int i = 0;
1854
1855         while (fix_mac[i] != END_SIGN) {
1856                 writeq(fix_mac[i++], &bar0->gpio_control);
1857                 udelay(10);
1858                 val64 = readq(&bar0->gpio_control);
1859         }
1860 }
1861
1862 /**
1863  *  start_nic - Turns the device on
1864  *  @nic : device private variable.
1865  *  Description:
1866  *  This function actually turns the device on. Before this  function is
1867  *  called,all Registers are configured from their reset states
1868  *  and shared memory is allocated but the NIC is still quiescent. On
1869  *  calling this function, the device interrupts are cleared and the NIC is
1870  *  literally switched on by writing into the adapter control register.
1871  *  Return Value:
1872  *  SUCCESS on success and -1 on failure.
1873  */
1874
1875 static int start_nic(struct s2io_nic *nic)
1876 {
1877         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1878         struct net_device *dev = nic->dev;
1879         register u64 val64 = 0;
1880         u16 interruptible;
1881         u16 subid, i;
1882         mac_info_t *mac_control;
1883         struct config_param *config;
1884
1885         mac_control = &nic->mac_control;
1886         config = &nic->config;
1887
1888         /*  PRC Initialization and configuration */
1889         for (i = 0; i < config->rx_ring_num; i++) {
1890                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1891                        &bar0->prc_rxd0_n[i]);
1892
1893                 val64 = readq(&bar0->prc_ctrl_n[i]);
1894                 if (nic->config.bimodal)
1895                         val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1896 #ifndef CONFIG_2BUFF_MODE
1897                 val64 |= PRC_CTRL_RC_ENABLED;
1898 #else
1899                 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1900 #endif
1901                 writeq(val64, &bar0->prc_ctrl_n[i]);
1902         }
1903
1904 #ifdef CONFIG_2BUFF_MODE
1905         /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1906         val64 = readq(&bar0->rx_pa_cfg);
1907         val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1908         writeq(val64, &bar0->rx_pa_cfg);
1909 #endif
1910
1911         /*
1912          * Enabling MC-RLDRAM. After enabling the device, we timeout
1913          * for around 100ms, which is approximately the time required
1914          * for the device to be ready for operation.
1915          */
1916         val64 = readq(&bar0->mc_rldram_mrs);
1917         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1918         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1919         val64 = readq(&bar0->mc_rldram_mrs);
1920
1921         msleep(100);    /* Delay by around 100 ms. */
1922
1923         /* Enabling ECC Protection. */
1924         val64 = readq(&bar0->adapter_control);
1925         val64 &= ~ADAPTER_ECC_EN;
1926         writeq(val64, &bar0->adapter_control);
1927
1928         /*
1929          * Clearing any possible Link state change interrupts that
1930          * could have popped up just before Enabling the card.
1931          */
1932         val64 = readq(&bar0->mac_rmac_err_reg);
1933         if (val64)
1934                 writeq(val64, &bar0->mac_rmac_err_reg);
1935
1936         /*
1937          * Verify if the device is ready to be enabled, if so enable
1938          * it.
1939          */
1940         val64 = readq(&bar0->adapter_status);
1941         if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1942                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1943                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1944                           (unsigned long long) val64);
1945                 return FAILURE;
1946         }
1947
1948         /*  Enable select interrupts */
1949         if (nic->intr_type != INTA)
1950                 en_dis_able_nic_intrs(nic, ENA_ALL_INTRS, DISABLE_INTRS);
1951         else {
1952                 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
1953                 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
1954                 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1955                 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1956         }
1957
1958         /*
1959          * With some switches, link might be already up at this point.
1960          * Because of this weird behavior, when we enable laser,
1961          * we may not get link. We need to handle this. We cannot
1962          * figure out which switch is misbehaving. So we are forced to
1963          * make a global change.
1964          */
1965
1966         /* Enabling Laser. */
1967         val64 = readq(&bar0->adapter_control);
1968         val64 |= ADAPTER_EOI_TX_ON;
1969         writeq(val64, &bar0->adapter_control);
1970
1971         /* SXE-002: Initialize link and activity LED */
1972         subid = nic->pdev->subsystem_device;
1973         if (((subid & 0xFF) >= 0x07) &&
1974             (nic->device_type == XFRAME_I_DEVICE)) {
1975                 val64 = readq(&bar0->gpio_control);
1976                 val64 |= 0x0000800000000000ULL;
1977                 writeq(val64, &bar0->gpio_control);
1978                 val64 = 0x0411040400000000ULL;
1979                 writeq(val64, (void __iomem *)bar0 + 0x2700);
1980         }
1981
1982         /*
1983          * Don't see link state interrupts on certain switches, so
1984          * directly scheduling a link state task from here.
1985          */
1986         schedule_work(&nic->set_link_task);
1987
1988         return SUCCESS;
1989 }
1990
1991 /**
1992  *  free_tx_buffers - Free all queued Tx buffers
1993  *  @nic : device private variable.
1994  *  Description:
1995  *  Free all queued Tx buffers.
1996  *  Return Value: void
1997 */
1998
1999 static void free_tx_buffers(struct s2io_nic *nic)
2000 {
2001         struct net_device *dev = nic->dev;
2002         struct sk_buff *skb;
2003         TxD_t *txdp;
2004         int i, j;
2005         mac_info_t *mac_control;
2006         struct config_param *config;
2007         int cnt = 0, frg_cnt;
2008
2009         mac_control = &nic->mac_control;
2010         config = &nic->config;
2011
2012         for (i = 0; i < config->tx_fifo_num; i++) {
2013                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2014                         txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
2015                             list_virt_addr;
2016                         skb =
2017                             (struct sk_buff *) ((unsigned long) txdp->
2018                                                 Host_Control);
2019                         if (skb == NULL) {
2020                                 memset(txdp, 0, sizeof(TxD_t) *
2021                                        config->max_txds);
2022                                 continue;
2023                         }
2024                         frg_cnt = skb_shinfo(skb)->nr_frags;
2025                         pci_unmap_single(nic->pdev, (dma_addr_t)
2026                                          txdp->Buffer_Pointer,
2027                                          skb->len - skb->data_len,
2028                                          PCI_DMA_TODEVICE);
2029                         if (frg_cnt) {
2030                                 TxD_t *temp;
2031                                 temp = txdp;
2032                                 txdp++;
2033                                 for (j = 0; j < frg_cnt; j++, txdp++) {
2034                                         skb_frag_t *frag =
2035                                             &skb_shinfo(skb)->frags[j];
2036                                         pci_unmap_page(nic->pdev,
2037                                                        (dma_addr_t)
2038                                                        txdp->
2039                                                        Buffer_Pointer,
2040                                                        frag->size,
2041                                                        PCI_DMA_TODEVICE);
2042                                 }
2043                                 txdp = temp;
2044                         }
2045                         dev_kfree_skb(skb);
2046                         memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
2047                         cnt++;
2048                 }
2049                 DBG_PRINT(INTR_DBG,
2050                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2051                           dev->name, cnt, i);
2052                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2053                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2054         }
2055 }
2056
2057 /**
2058  *   stop_nic -  To stop the nic
2059  *   @nic ; device private variable.
2060  *   Description:
2061  *   This function does exactly the opposite of what the start_nic()
2062  *   function does. This function is called to stop the device.
2063  *   Return Value:
2064  *   void.
2065  */
2066
2067 static void stop_nic(struct s2io_nic *nic)
2068 {
2069         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2070         register u64 val64 = 0;
2071         u16 interruptible, i;
2072         mac_info_t *mac_control;
2073         struct config_param *config;
2074
2075         mac_control = &nic->mac_control;
2076         config = &nic->config;
2077
2078         /*  Disable all interrupts */
2079         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2080         interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2081         interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2082         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2083
2084         /*  Disable PRCs */
2085         for (i = 0; i < config->rx_ring_num; i++) {
2086                 val64 = readq(&bar0->prc_ctrl_n[i]);
2087                 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
2088                 writeq(val64, &bar0->prc_ctrl_n[i]);
2089         }
2090 }
2091
2092 /**
2093  *  fill_rx_buffers - Allocates the Rx side skbs
2094  *  @nic:  device private variable
2095  *  @ring_no: ring number
2096  *  Description:
2097  *  The function allocates Rx side skbs and puts the physical
2098  *  address of these buffers into the RxD buffer pointers, so that the NIC
2099  *  can DMA the received frame into these locations.
2100  *  The NIC supports 3 receive modes, viz
2101  *  1. single buffer,
2102  *  2. three buffer and
2103  *  3. Five buffer modes.
2104  *  Each mode defines how many fragments the received frame will be split
2105  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2106  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2107  *  is split into 3 fragments. As of now only single buffer mode is
2108  *  supported.
2109  *   Return Value:
2110  *  SUCCESS on success or an appropriate -ve value on failure.
2111  */
2112
2113 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2114 {
2115         struct net_device *dev = nic->dev;
2116         struct sk_buff *skb;
2117         RxD_t *rxdp;
2118         int off, off1, size, block_no, block_no1;
2119         int offset, offset1;
2120         u32 alloc_tab = 0;
2121         u32 alloc_cnt;
2122         mac_info_t *mac_control;
2123         struct config_param *config;
2124 #ifdef CONFIG_2BUFF_MODE
2125         RxD_t *rxdpnext;
2126         int nextblk;
2127         u64 tmp;
2128         buffAdd_t *ba;
2129         dma_addr_t rxdpphys;
2130 #endif
2131 #ifndef CONFIG_S2IO_NAPI
2132         unsigned long flags;
2133 #endif
2134         RxD_t *first_rxdp = NULL;
2135
2136         mac_control = &nic->mac_control;
2137         config = &nic->config;
2138         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2139             atomic_read(&nic->rx_bufs_left[ring_no]);
2140         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2141             HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2142
2143         while (alloc_tab < alloc_cnt) {
2144                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2145                     block_index;
2146                 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
2147                     block_index;
2148                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2149                 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2150 #ifndef CONFIG_2BUFF_MODE
2151                 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
2152                 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
2153 #else
2154                 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
2155                 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
2156 #endif
2157
2158                 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2159                     block_virt_addr + off;
2160                 if ((offset == offset1) && (rxdp->Host_Control)) {
2161                         DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
2162                         DBG_PRINT(INTR_DBG, " info equated\n");
2163                         goto end;
2164                 }
2165 #ifndef CONFIG_2BUFF_MODE
2166                 if (rxdp->Control_1 == END_OF_BLOCK) {
2167                         mac_control->rings[ring_no].rx_curr_put_info.
2168                             block_index++;
2169                         mac_control->rings[ring_no].rx_curr_put_info.
2170                             block_index %= mac_control->rings[ring_no].block_count;
2171                         block_no = mac_control->rings[ring_no].rx_curr_put_info.
2172                                 block_index;
2173                         off++;
2174                         off %= (MAX_RXDS_PER_BLOCK + 1);
2175                         mac_control->rings[ring_no].rx_curr_put_info.offset =
2176                             off;
2177                         rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
2178                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2179                                   dev->name, rxdp);
2180                 }
2181 #ifndef CONFIG_S2IO_NAPI
2182                 spin_lock_irqsave(&nic->put_lock, flags);
2183                 mac_control->rings[ring_no].put_pos =
2184                     (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
2185                 spin_unlock_irqrestore(&nic->put_lock, flags);
2186 #endif
2187 #else
2188                 if (rxdp->Host_Control == END_OF_BLOCK) {
2189                         mac_control->rings[ring_no].rx_curr_put_info.
2190                             block_index++;
2191                         mac_control->rings[ring_no].rx_curr_put_info.block_index
2192                             %= mac_control->rings[ring_no].block_count;
2193                         block_no = mac_control->rings[ring_no].rx_curr_put_info
2194                             .block_index;
2195                         off = 0;
2196                         DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
2197                                   dev->name, block_no,
2198                                   (unsigned long long) rxdp->Control_1);
2199                         mac_control->rings[ring_no].rx_curr_put_info.offset =
2200                             off;
2201                         rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2202                             block_virt_addr;
2203                 }
2204 #ifndef CONFIG_S2IO_NAPI
2205                 spin_lock_irqsave(&nic->put_lock, flags);
2206                 mac_control->rings[ring_no].put_pos = (block_no *
2207                                          (MAX_RXDS_PER_BLOCK + 1)) + off;
2208                 spin_unlock_irqrestore(&nic->put_lock, flags);
2209 #endif
2210 #endif
2211
2212 #ifndef CONFIG_2BUFF_MODE
2213                 if (rxdp->Control_1 & RXD_OWN_XENA)
2214 #else
2215                 if (rxdp->Control_2 & BIT(0))
2216 #endif
2217                 {
2218                         mac_control->rings[ring_no].rx_curr_put_info.
2219                             offset = off;
2220                         goto end;
2221                 }
2222 #ifdef  CONFIG_2BUFF_MODE
2223                 /*
2224                  * RxDs Spanning cache lines will be replenished only
2225                  * if the succeeding RxD is also owned by Host. It
2226                  * will always be the ((8*i)+3) and ((8*i)+6)
2227                  * descriptors for the 48 byte descriptor. The offending
2228                  * decsriptor is of-course the 3rd descriptor.
2229                  */
2230                 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
2231                     block_dma_addr + (off * sizeof(RxD_t));
2232                 if (((u64) (rxdpphys)) % 128 > 80) {
2233                         rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
2234                             block_virt_addr + (off + 1);
2235                         if (rxdpnext->Host_Control == END_OF_BLOCK) {
2236                                 nextblk = (block_no + 1) %
2237                                     (mac_control->rings[ring_no].block_count);
2238                                 rxdpnext = mac_control->rings[ring_no].rx_blocks
2239                                     [nextblk].block_virt_addr;
2240                         }
2241                         if (rxdpnext->Control_2 & BIT(0))
2242                                 goto end;
2243                 }
2244 #endif
2245
2246 #ifndef CONFIG_2BUFF_MODE
2247                 skb = dev_alloc_skb(size + NET_IP_ALIGN);
2248 #else
2249                 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
2250 #endif
2251                 if (!skb) {
2252                         DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2253                         DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2254                         if (first_rxdp) {
2255                                 wmb();
2256                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2257                         }
2258                         return -ENOMEM;
2259                 }
2260 #ifndef CONFIG_2BUFF_MODE
2261                 skb_reserve(skb, NET_IP_ALIGN);
2262                 memset(rxdp, 0, sizeof(RxD_t));
2263                 rxdp->Buffer0_ptr = pci_map_single
2264                     (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2265                 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
2266                 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
2267                 rxdp->Host_Control = (unsigned long) (skb);
2268                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2269                         rxdp->Control_1 |= RXD_OWN_XENA;
2270                 off++;
2271                 off %= (MAX_RXDS_PER_BLOCK + 1);
2272                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2273 #else
2274                 ba = &mac_control->rings[ring_no].ba[block_no][off];
2275                 skb_reserve(skb, BUF0_LEN);
2276                 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
2277                 if (tmp)
2278                         skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
2279
2280                 memset(rxdp, 0, sizeof(RxD_t));
2281                 rxdp->Buffer2_ptr = pci_map_single
2282                     (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
2283                      PCI_DMA_FROMDEVICE);
2284                 rxdp->Buffer0_ptr =
2285                     pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2286                                    PCI_DMA_FROMDEVICE);
2287                 rxdp->Buffer1_ptr =
2288                     pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2289                                    PCI_DMA_FROMDEVICE);
2290
2291                 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
2292                 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
2293                 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
2294                 rxdp->Control_2 |= BIT(0);      /* Set Buffer_Empty bit. */
2295                 rxdp->Host_Control = (u64) ((unsigned long) (skb));
2296                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2297                         rxdp->Control_1 |= RXD_OWN_XENA;
2298                 off++;
2299                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2300 #endif
2301                 rxdp->Control_2 |= SET_RXD_MARKER;
2302
2303                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2304                         if (first_rxdp) {
2305                                 wmb();
2306                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2307                         }
2308                         first_rxdp = rxdp;
2309                 }
2310                 atomic_inc(&nic->rx_bufs_left[ring_no]);
2311                 alloc_tab++;
2312         }
2313
2314       end:
2315         /* Transfer ownership of first descriptor to adapter just before
2316          * exiting. Before that, use memory barrier so that ownership
2317          * and other fields are seen by adapter correctly.
2318          */
2319         if (first_rxdp) {
2320                 wmb();
2321                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2322         }
2323
2324         return SUCCESS;
2325 }
2326
2327 /**
2328  *  free_rx_buffers - Frees all Rx buffers
2329  *  @sp: device private variable.
2330  *  Description:
2331  *  This function will free all Rx buffers allocated by host.
2332  *  Return Value:
2333  *  NONE.
2334  */
2335
2336 static void free_rx_buffers(struct s2io_nic *sp)
2337 {
2338         struct net_device *dev = sp->dev;
2339         int i, j, blk = 0, off, buf_cnt = 0;
2340         RxD_t *rxdp;
2341         struct sk_buff *skb;
2342         mac_info_t *mac_control;
2343         struct config_param *config;
2344 #ifdef CONFIG_2BUFF_MODE
2345         buffAdd_t *ba;
2346 #endif
2347
2348         mac_control = &sp->mac_control;
2349         config = &sp->config;
2350
2351         for (i = 0; i < config->rx_ring_num; i++) {
2352                 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2353                         off = j % (MAX_RXDS_PER_BLOCK + 1);
2354                         rxdp = mac_control->rings[i].rx_blocks[blk].
2355                                 block_virt_addr + off;
2356
2357 #ifndef CONFIG_2BUFF_MODE
2358                         if (rxdp->Control_1 == END_OF_BLOCK) {
2359                                 rxdp =
2360                                     (RxD_t *) ((unsigned long) rxdp->
2361                                                Control_2);
2362                                 j++;
2363                                 blk++;
2364                         }
2365 #else
2366                         if (rxdp->Host_Control == END_OF_BLOCK) {
2367                                 blk++;
2368                                 continue;
2369                         }
2370 #endif
2371
2372                         if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2373                                 memset(rxdp, 0, sizeof(RxD_t));
2374                                 continue;
2375                         }
2376
2377                         skb =
2378                             (struct sk_buff *) ((unsigned long) rxdp->
2379                                                 Host_Control);
2380                         if (skb) {
2381 #ifndef CONFIG_2BUFF_MODE
2382                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2383                                                  rxdp->Buffer0_ptr,
2384                                                  dev->mtu +
2385                                                  HEADER_ETHERNET_II_802_3_SIZE
2386                                                  + HEADER_802_2_SIZE +
2387                                                  HEADER_SNAP_SIZE,
2388                                                  PCI_DMA_FROMDEVICE);
2389 #else
2390                                 ba = &mac_control->rings[i].ba[blk][off];
2391                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2392                                                  rxdp->Buffer0_ptr,
2393                                                  BUF0_LEN,
2394                                                  PCI_DMA_FROMDEVICE);
2395                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2396                                                  rxdp->Buffer1_ptr,
2397                                                  BUF1_LEN,
2398                                                  PCI_DMA_FROMDEVICE);
2399                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2400                                                  rxdp->Buffer2_ptr,
2401                                                  dev->mtu + BUF0_LEN + 4,
2402                                                  PCI_DMA_FROMDEVICE);
2403 #endif
2404                                 dev_kfree_skb(skb);
2405                                 atomic_dec(&sp->rx_bufs_left[i]);
2406                                 buf_cnt++;
2407                         }
2408                         memset(rxdp, 0, sizeof(RxD_t));
2409                 }
2410                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2411                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2412                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2413                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2414                 atomic_set(&sp->rx_bufs_left[i], 0);
2415                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2416                           dev->name, buf_cnt, i);
2417         }
2418 }
2419
2420 /**
2421  * s2io_poll - Rx interrupt handler for NAPI support
2422  * @dev : pointer to the device structure.
2423  * @budget : The number of packets that were budgeted to be processed
2424  * during  one pass through the 'Poll" function.
2425  * Description:
2426  * Comes into picture only if NAPI support has been incorporated. It does
2427  * the same thing that rx_intr_handler does, but not in a interrupt context
2428  * also It will process only a given number of packets.
2429  * Return value:
2430  * 0 on success and 1 if there are No Rx packets to be processed.
2431  */
2432
2433 #if defined(CONFIG_S2IO_NAPI)
2434 static int s2io_poll(struct net_device *dev, int *budget)
2435 {
2436         nic_t *nic = dev->priv;
2437         int pkt_cnt = 0, org_pkts_to_process;
2438         mac_info_t *mac_control;
2439         struct config_param *config;
2440         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2441         u64 val64;
2442         int i;
2443
2444         atomic_inc(&nic->isr_cnt);
2445         mac_control = &nic->mac_control;
2446         config = &nic->config;
2447
2448         nic->pkts_to_process = *budget;
2449         if (nic->pkts_to_process > dev->quota)
2450                 nic->pkts_to_process = dev->quota;
2451         org_pkts_to_process = nic->pkts_to_process;
2452
2453         val64 = readq(&bar0->rx_traffic_int);
2454         writeq(val64, &bar0->rx_traffic_int);
2455
2456         for (i = 0; i < config->rx_ring_num; i++) {
2457                 rx_intr_handler(&mac_control->rings[i]);
2458                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2459                 if (!nic->pkts_to_process) {
2460                         /* Quota for the current iteration has been met */
2461                         goto no_rx;
2462                 }
2463         }
2464         if (!pkt_cnt)
2465                 pkt_cnt = 1;
2466
2467         dev->quota -= pkt_cnt;
2468         *budget -= pkt_cnt;
2469         netif_rx_complete(dev);
2470
2471         for (i = 0; i < config->rx_ring_num; i++) {
2472                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2473                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2474                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2475                         break;
2476                 }
2477         }
2478         /* Re enable the Rx interrupts. */
2479         en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2480         atomic_dec(&nic->isr_cnt);
2481         return 0;
2482
2483 no_rx:
2484         dev->quota -= pkt_cnt;
2485         *budget -= pkt_cnt;
2486
2487         for (i = 0; i < config->rx_ring_num; i++) {
2488                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2489                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2490                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2491                         break;
2492                 }
2493         }
2494         atomic_dec(&nic->isr_cnt);
2495         return 1;
2496 }
2497 #endif
2498
2499 /**
2500  *  rx_intr_handler - Rx interrupt handler
2501  *  @nic: device private variable.
2502  *  Description:
2503  *  If the interrupt is because of a received frame or if the
2504  *  receive ring contains fresh as yet un-processed frames,this function is
2505  *  called. It picks out the RxD at which place the last Rx processing had
2506  *  stopped and sends the skb to the OSM's Rx handler and then increments
2507  *  the offset.
2508  *  Return Value:
2509  *  NONE.
2510  */
2511 static void rx_intr_handler(ring_info_t *ring_data)
2512 {
2513         nic_t *nic = ring_data->nic;
2514         struct net_device *dev = (struct net_device *) nic->dev;
2515         int get_block, get_offset, put_block, put_offset, ring_bufs;
2516         rx_curr_get_info_t get_info, put_info;
2517         RxD_t *rxdp;
2518         struct sk_buff *skb;
2519 #ifndef CONFIG_S2IO_NAPI
2520         int pkt_cnt = 0;
2521 #endif
2522         spin_lock(&nic->rx_lock);
2523         if (atomic_read(&nic->card_state) == CARD_DOWN) {
2524                 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2525                           __FUNCTION__, dev->name);
2526                 spin_unlock(&nic->rx_lock);
2527                 return;
2528         }
2529
2530         get_info = ring_data->rx_curr_get_info;
2531         get_block = get_info.block_index;
2532         put_info = ring_data->rx_curr_put_info;
2533         put_block = put_info.block_index;
2534         ring_bufs = get_info.ring_len+1;
2535         rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2536                     get_info.offset;
2537         get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2538                 get_info.offset;
2539 #ifndef CONFIG_S2IO_NAPI
2540         spin_lock(&nic->put_lock);
2541         put_offset = ring_data->put_pos;
2542         spin_unlock(&nic->put_lock);
2543 #else
2544         put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2545                 put_info.offset;
2546 #endif
2547         while (RXD_IS_UP2DT(rxdp) &&
2548                (((get_offset + 1) % ring_bufs) != put_offset)) {
2549                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2550                 if (skb == NULL) {
2551                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2552                                   dev->name);
2553                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2554                         spin_unlock(&nic->rx_lock);
2555                         return;
2556                 }
2557 #ifndef CONFIG_2BUFF_MODE
2558                 pci_unmap_single(nic->pdev, (dma_addr_t)
2559                                  rxdp->Buffer0_ptr,
2560                                  dev->mtu +
2561                                  HEADER_ETHERNET_II_802_3_SIZE +
2562                                  HEADER_802_2_SIZE +
2563                                  HEADER_SNAP_SIZE,
2564                                  PCI_DMA_FROMDEVICE);
2565 #else
2566                 pci_unmap_single(nic->pdev, (dma_addr_t)
2567                                  rxdp->Buffer0_ptr,
2568                                  BUF0_LEN, PCI_DMA_FROMDEVICE);
2569                 pci_unmap_single(nic->pdev, (dma_addr_t)
2570                                  rxdp->Buffer1_ptr,
2571                                  BUF1_LEN, PCI_DMA_FROMDEVICE);
2572                 pci_unmap_single(nic->pdev, (dma_addr_t)
2573                                  rxdp->Buffer2_ptr,
2574                                  dev->mtu + BUF0_LEN + 4,
2575                                  PCI_DMA_FROMDEVICE);
2576 #endif
2577                 rx_osm_handler(ring_data, rxdp);
2578                 get_info.offset++;
2579                 ring_data->rx_curr_get_info.offset =
2580                     get_info.offset;
2581                 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2582                     get_info.offset;
2583                 if (get_info.offset &&
2584                     (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2585                         get_info.offset = 0;
2586                         ring_data->rx_curr_get_info.offset
2587                             = get_info.offset;
2588                         get_block++;
2589                         get_block %= ring_data->block_count;
2590                         ring_data->rx_curr_get_info.block_index
2591                             = get_block;
2592                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2593                 }
2594
2595                 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2596                             get_info.offset;
2597 #ifdef CONFIG_S2IO_NAPI
2598                 nic->pkts_to_process -= 1;
2599                 if (!nic->pkts_to_process)
2600                         break;
2601 #else
2602                 pkt_cnt++;
2603                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2604                         break;
2605 #endif
2606         }
2607         spin_unlock(&nic->rx_lock);
2608 }
2609
2610 /**
2611  *  tx_intr_handler - Transmit interrupt handler
2612  *  @nic : device private variable
2613  *  Description:
2614  *  If an interrupt was raised to indicate DMA complete of the
2615  *  Tx packet, this function is called. It identifies the last TxD
2616  *  whose buffer was freed and frees all skbs whose data have already
2617  *  DMA'ed into the NICs internal memory.
2618  *  Return Value:
2619  *  NONE
2620  */
2621
2622 static void tx_intr_handler(fifo_info_t *fifo_data)
2623 {
2624         nic_t *nic = fifo_data->nic;
2625         struct net_device *dev = (struct net_device *) nic->dev;
2626         tx_curr_get_info_t get_info, put_info;
2627         struct sk_buff *skb;
2628         TxD_t *txdlp;
2629         u16 j, frg_cnt;
2630
2631         get_info = fifo_data->tx_curr_get_info;
2632         put_info = fifo_data->tx_curr_put_info;
2633         txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2634             list_virt_addr;
2635         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2636                (get_info.offset != put_info.offset) &&
2637                (txdlp->Host_Control)) {
2638                 /* Check for TxD errors */
2639                 if (txdlp->Control_1 & TXD_T_CODE) {
2640                         unsigned long long err;
2641                         err = txdlp->Control_1 & TXD_T_CODE;
2642                         if ((err >> 48) == 0xA) {
2643                                 DBG_PRINT(TX_DBG, "TxD returned due \
2644 to loss of link\n");
2645                         }
2646                         else {
2647                                 DBG_PRINT(ERR_DBG, "***TxD error \
2648 %llx\n", err);
2649                         }
2650                 }
2651
2652                 skb = (struct sk_buff *) ((unsigned long)
2653                                 txdlp->Host_Control);
2654                 if (skb == NULL) {
2655                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
2656                         __FUNCTION__);
2657                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2658                         return;
2659                 }
2660
2661                 frg_cnt = skb_shinfo(skb)->nr_frags;
2662                 nic->tx_pkt_count++;
2663
2664                 pci_unmap_single(nic->pdev, (dma_addr_t)
2665                                  txdlp->Buffer_Pointer,
2666                                  skb->len - skb->data_len,
2667                                  PCI_DMA_TODEVICE);
2668                 if (frg_cnt) {
2669                         TxD_t *temp;
2670                         temp = txdlp;
2671                         txdlp++;
2672                         for (j = 0; j < frg_cnt; j++, txdlp++) {
2673                                 skb_frag_t *frag =
2674                                     &skb_shinfo(skb)->frags[j];
2675                                 if (!txdlp->Buffer_Pointer)
2676                                         break;
2677                                 pci_unmap_page(nic->pdev,
2678                                                (dma_addr_t)
2679                                                txdlp->
2680                                                Buffer_Pointer,
2681                                                frag->size,
2682                                                PCI_DMA_TODEVICE);
2683                         }
2684                         txdlp = temp;
2685                 }
2686                 memset(txdlp, 0,
2687                        (sizeof(TxD_t) * fifo_data->max_txds));
2688
2689                 /* Updating the statistics block */
2690                 nic->stats.tx_bytes += skb->len;
2691                 dev_kfree_skb_irq(skb);
2692
2693                 get_info.offset++;
2694                 get_info.offset %= get_info.fifo_len + 1;
2695                 txdlp = (TxD_t *) fifo_data->list_info
2696                     [get_info.offset].list_virt_addr;
2697                 fifo_data->tx_curr_get_info.offset =
2698                     get_info.offset;
2699         }
2700
2701         spin_lock(&nic->tx_lock);
2702         if (netif_queue_stopped(dev))
2703                 netif_wake_queue(dev);
2704         spin_unlock(&nic->tx_lock);
2705 }
2706
2707 /**
2708  *  alarm_intr_handler - Alarm Interrrupt handler
2709  *  @nic: device private variable
2710  *  Description: If the interrupt was neither because of Rx packet or Tx
2711  *  complete, this function is called. If the interrupt was to indicate
2712  *  a loss of link, the OSM link status handler is invoked for any other
2713  *  alarm interrupt the block that raised the interrupt is displayed
2714  *  and a H/W reset is issued.
2715  *  Return Value:
2716  *  NONE
2717 */
2718
2719 static void alarm_intr_handler(struct s2io_nic *nic)
2720 {
2721         struct net_device *dev = (struct net_device *) nic->dev;
2722         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2723         register u64 val64 = 0, err_reg = 0;
2724
2725         /* Handling link status change error Intr */
2726         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2727                 err_reg = readq(&bar0->mac_rmac_err_reg);
2728                 writeq(err_reg, &bar0->mac_rmac_err_reg);
2729                 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2730                         schedule_work(&nic->set_link_task);
2731                 }
2732         }
2733
2734         /* Handling Ecc errors */
2735         val64 = readq(&bar0->mc_err_reg);
2736         writeq(val64, &bar0->mc_err_reg);
2737         if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2738                 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2739                         nic->mac_control.stats_info->sw_stat.
2740                                 double_ecc_errs++;
2741                         DBG_PRINT(INIT_DBG, "%s: Device indicates ",
2742                                   dev->name);
2743                         DBG_PRINT(INIT_DBG, "double ECC error!!\n");
2744                         if (nic->device_type != XFRAME_II_DEVICE) {
2745                                 /* Reset XframeI only if critical error */
2746                                 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
2747                                              MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
2748                                         netif_stop_queue(dev);
2749                                         schedule_work(&nic->rst_timer_task);
2750                                 }
2751                         }
2752                 } else {
2753                         nic->mac_control.stats_info->sw_stat.
2754                                 single_ecc_errs++;
2755                 }
2756         }
2757
2758         /* In case of a serious error, the device will be Reset. */
2759         val64 = readq(&bar0->serr_source);
2760         if (val64 & SERR_SOURCE_ANY) {
2761                 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2762                 DBG_PRINT(ERR_DBG, "serious error %llx!!\n", 
2763                           (unsigned long long)val64);
2764                 netif_stop_queue(dev);
2765                 schedule_work(&nic->rst_timer_task);
2766         }
2767
2768         /*
2769          * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2770          * Error occurs, the adapter will be recycled by disabling the
2771          * adapter enable bit and enabling it again after the device
2772          * becomes Quiescent.
2773          */
2774         val64 = readq(&bar0->pcc_err_reg);
2775         writeq(val64, &bar0->pcc_err_reg);
2776         if (val64 & PCC_FB_ECC_DB_ERR) {
2777                 u64 ac = readq(&bar0->adapter_control);
2778                 ac &= ~(ADAPTER_CNTL_EN);
2779                 writeq(ac, &bar0->adapter_control);
2780                 ac = readq(&bar0->adapter_control);
2781                 schedule_work(&nic->set_link_task);
2782         }
2783
2784         /* Other type of interrupts are not being handled now,  TODO */
2785 }
2786
2787 /**
2788  *  wait_for_cmd_complete - waits for a command to complete.
2789  *  @sp : private member of the device structure, which is a pointer to the
2790  *  s2io_nic structure.
2791  *  Description: Function that waits for a command to Write into RMAC
2792  *  ADDR DATA registers to be completed and returns either success or
2793  *  error depending on whether the command was complete or not.
2794  *  Return value:
2795  *   SUCCESS on success and FAILURE on failure.
2796  */
2797
2798 int wait_for_cmd_complete(nic_t * sp)
2799 {
2800         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2801         int ret = FAILURE, cnt = 0;
2802         u64 val64;
2803
2804         while (TRUE) {
2805                 val64 = readq(&bar0->rmac_addr_cmd_mem);
2806                 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2807                         ret = SUCCESS;
2808                         break;
2809                 }
2810                 msleep(50);
2811                 if (cnt++ > 10)
2812                         break;
2813         }
2814
2815         return ret;
2816 }
2817
2818 /**
2819  *  s2io_reset - Resets the card.
2820  *  @sp : private member of the device structure.
2821  *  Description: Function to Reset the card. This function then also
2822  *  restores the previously saved PCI configuration space registers as
2823  *  the card reset also resets the configuration space.
2824  *  Return value:
2825  *  void.
2826  */
2827
2828 void s2io_reset(nic_t * sp)
2829 {
2830         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2831         u64 val64;
2832         u16 subid, pci_cmd;
2833
2834         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
2835         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
2836
2837         val64 = SW_RESET_ALL;
2838         writeq(val64, &bar0->sw_reset);
2839
2840         /*
2841          * At this stage, if the PCI write is indeed completed, the
2842          * card is reset and so is the PCI Config space of the device.
2843          * So a read cannot be issued at this stage on any of the
2844          * registers to ensure the write into "sw_reset" register
2845          * has gone through.
2846          * Question: Is there any system call that will explicitly force
2847          * all the write commands still pending on the bus to be pushed
2848          * through?
2849          * As of now I'am just giving a 250ms delay and hoping that the
2850          * PCI write to sw_reset register is done by this time.
2851          */
2852         msleep(250);
2853
2854         /* Restore the PCI state saved during initialization. */
2855         pci_restore_state(sp->pdev);
2856         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
2857                                      pci_cmd);
2858         s2io_init_pci(sp);
2859
2860         msleep(250);
2861
2862         /* Set swapper to enable I/O register access */
2863         s2io_set_swapper(sp);
2864
2865         /* Restore the MSIX table entries from local variables */
2866         restore_xmsi_data(sp);
2867
2868         /* Clear certain PCI/PCI-X fields after reset */
2869         if (sp->device_type == XFRAME_II_DEVICE) {
2870                 /* Clear parity err detect bit */
2871                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
2872
2873                 /* Clearing PCIX Ecc status register */
2874                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
2875
2876                 /* Clearing PCI_STATUS error reflected here */
2877                 writeq(BIT(62), &bar0->txpic_int_reg);
2878         }
2879
2880         /* Reset device statistics maintained by OS */
2881         memset(&sp->stats, 0, sizeof (struct net_device_stats));
2882
2883         /* SXE-002: Configure link and activity LED to turn it off */
2884         subid = sp->pdev->subsystem_device;
2885         if (((subid & 0xFF) >= 0x07) &&
2886             (sp->device_type == XFRAME_I_DEVICE)) {
2887                 val64 = readq(&bar0->gpio_control);
2888                 val64 |= 0x0000800000000000ULL;
2889                 writeq(val64, &bar0->gpio_control);
2890                 val64 = 0x0411040400000000ULL;
2891                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2892         }
2893
2894         /*
2895          * Clear spurious ECC interrupts that would have occured on
2896          * XFRAME II cards after reset.
2897          */
2898         if (sp->device_type == XFRAME_II_DEVICE) {
2899                 val64 = readq(&bar0->pcc_err_reg);
2900                 writeq(val64, &bar0->pcc_err_reg);
2901         }
2902
2903         sp->device_enabled_once = FALSE;
2904 }
2905
2906 /**
2907  *  s2io_set_swapper - to set the swapper controle on the card
2908  *  @sp : private member of the device structure,
2909  *  pointer to the s2io_nic structure.
2910  *  Description: Function to set the swapper control on the card
2911  *  correctly depending on the 'endianness' of the system.
2912  *  Return value:
2913  *  SUCCESS on success and FAILURE on failure.
2914  */
2915
2916 int s2io_set_swapper(nic_t * sp)
2917 {
2918         struct net_device *dev = sp->dev;
2919         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2920         u64 val64, valt, valr;
2921
2922         /*
2923          * Set proper endian settings and verify the same by reading
2924          * the PIF Feed-back register.
2925          */
2926
2927         val64 = readq(&bar0->pif_rd_swapper_fb);
2928         if (val64 != 0x0123456789ABCDEFULL) {
2929                 int i = 0;
2930                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
2931                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
2932                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
2933                                 0};                     /* FE=0, SE=0 */
2934
2935                 while(i<4) {
2936                         writeq(value[i], &bar0->swapper_ctrl);
2937                         val64 = readq(&bar0->pif_rd_swapper_fb);
2938                         if (val64 == 0x0123456789ABCDEFULL)
2939                                 break;
2940                         i++;
2941                 }
2942                 if (i == 4) {
2943                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2944                                 dev->name);
2945                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2946                                 (unsigned long long) val64);
2947                         return FAILURE;
2948                 }
2949                 valr = value[i];
2950         } else {
2951                 valr = readq(&bar0->swapper_ctrl);
2952         }
2953
2954         valt = 0x0123456789ABCDEFULL;
2955         writeq(valt, &bar0->xmsi_address);
2956         val64 = readq(&bar0->xmsi_address);
2957
2958         if(val64 != valt) {
2959                 int i = 0;
2960                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
2961                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
2962                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
2963                                 0};                     /* FE=0, SE=0 */
2964
2965                 while(i<4) {
2966                         writeq((value[i] | valr), &bar0->swapper_ctrl);
2967                         writeq(valt, &bar0->xmsi_address);
2968                         val64 = readq(&bar0->xmsi_address);
2969                         if(val64 == valt)
2970                                 break;
2971                         i++;
2972                 }
2973                 if(i == 4) {
2974                         unsigned long long x = val64;
2975                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2976                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2977                         return FAILURE;
2978                 }
2979         }
2980         val64 = readq(&bar0->swapper_ctrl);
2981         val64 &= 0xFFFF000000000000ULL;
2982
2983 #ifdef  __BIG_ENDIAN
2984         /*
2985          * The device by default set to a big endian format, so a
2986          * big endian driver need not set anything.
2987          */
2988         val64 |= (SWAPPER_CTRL_TXP_FE |
2989                  SWAPPER_CTRL_TXP_SE |
2990                  SWAPPER_CTRL_TXD_R_FE |
2991                  SWAPPER_CTRL_TXD_W_FE |
2992                  SWAPPER_CTRL_TXF_R_FE |
2993                  SWAPPER_CTRL_RXD_R_FE |
2994                  SWAPPER_CTRL_RXD_W_FE |
2995                  SWAPPER_CTRL_RXF_W_FE |
2996                  SWAPPER_CTRL_XMSI_FE |
2997                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2998         if (nic->intr_type == INTA)
2999                 val64 |= SWAPPER_CTRL_XMSI_SE;
3000         writeq(val64, &bar0->swapper_ctrl);
3001 #else
3002         /*
3003          * Initially we enable all bits to make it accessible by the
3004          * driver, then we selectively enable only those bits that
3005          * we want to set.
3006          */
3007         val64 |= (SWAPPER_CTRL_TXP_FE |
3008                  SWAPPER_CTRL_TXP_SE |
3009                  SWAPPER_CTRL_TXD_R_FE |
3010                  SWAPPER_CTRL_TXD_R_SE |
3011                  SWAPPER_CTRL_TXD_W_FE |
3012                  SWAPPER_CTRL_TXD_W_SE |
3013                  SWAPPER_CTRL_TXF_R_FE |
3014                  SWAPPER_CTRL_RXD_R_FE |
3015                  SWAPPER_CTRL_RXD_R_SE |
3016                  SWAPPER_CTRL_RXD_W_FE |
3017                  SWAPPER_CTRL_RXD_W_SE |
3018                  SWAPPER_CTRL_RXF_W_FE |
3019                  SWAPPER_CTRL_XMSI_FE |
3020                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3021         if (sp->intr_type == INTA)
3022                 val64 |= SWAPPER_CTRL_XMSI_SE;
3023         writeq(val64, &bar0->swapper_ctrl);
3024 #endif
3025         val64 = readq(&bar0->swapper_ctrl);
3026
3027         /*
3028          * Verifying if endian settings are accurate by reading a
3029          * feedback register.
3030          */
3031         val64 = readq(&bar0->pif_rd_swapper_fb);
3032         if (val64 != 0x0123456789ABCDEFULL) {
3033                 /* Endian settings are incorrect, calls for another dekko. */
3034                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3035                           dev->name);
3036                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3037                           (unsigned long long) val64);
3038                 return FAILURE;
3039         }
3040
3041         return SUCCESS;
3042 }
3043
3044 int wait_for_msix_trans(nic_t *nic, int i)
3045 {
3046         XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3047         u64 val64;
3048         int ret = 0, cnt = 0;
3049
3050         do {
3051                 val64 = readq(&bar0->xmsi_access);
3052                 if (!(val64 & BIT(15)))
3053                         break;
3054                 mdelay(1);
3055                 cnt++;
3056         } while(cnt < 5);
3057         if (cnt == 5) {
3058                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3059                 ret = 1;
3060         }
3061
3062         return ret;
3063 }
3064
3065 void restore_xmsi_data(nic_t *nic)
3066 {
3067         XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3068         u64 val64;
3069         int i;
3070
3071         for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3072                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3073                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3074                 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3075                 writeq(val64, &bar0->xmsi_access);
3076                 if (wait_for_msix_trans(nic, i)) {
3077                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3078                         continue;
3079                 }
3080         }
3081 }
3082
3083 void store_xmsi_data(nic_t *nic)
3084 {
3085         XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3086         u64 val64, addr, data;
3087         int i;
3088
3089         /* Store and display */
3090         for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3091                 val64 = (BIT(15) | vBIT(i, 26, 6));
3092                 writeq(val64, &bar0->xmsi_access);
3093                 if (wait_for_msix_trans(nic, i)) {
3094                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3095                         continue;
3096                 }
3097                 addr = readq(&bar0->xmsi_address);
3098                 data = readq(&bar0->xmsi_data);
3099                 if (addr && data) {
3100                         nic->msix_info[i].addr = addr;
3101                         nic->msix_info[i].data = data;
3102                 }
3103         }
3104 }
3105
3106 int s2io_enable_msi(nic_t *nic)
3107 {
3108         XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3109         u16 msi_ctrl, msg_val;
3110         struct config_param *config = &nic->config;
3111         struct net_device *dev = nic->dev;
3112         u64 val64, tx_mat, rx_mat;
3113         int i, err;
3114
3115         val64 = readq(&bar0->pic_control);
3116         val64 &= ~BIT(1);
3117         writeq(val64, &bar0->pic_control);
3118
3119         err = pci_enable_msi(nic->pdev);
3120         if (err) {
3121                 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3122                           nic->dev->name);
3123                 return err;
3124         }
3125
3126         /*
3127          * Enable MSI and use MSI-1 in stead of the standard MSI-0
3128          * for interrupt handling.
3129          */
3130         pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3131         msg_val ^= 0x1;
3132         pci_write_config_word(nic->pdev, 0x4c, msg_val);
3133         pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3134
3135         pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3136         msi_ctrl |= 0x10;
3137         pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3138
3139         /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3140         tx_mat = readq(&bar0->tx_mat0_n[0]);
3141         for (i=0; i<config->tx_fifo_num; i++) {
3142                 tx_mat |= TX_MAT_SET(i, 1);
3143         }
3144         writeq(tx_mat, &bar0->tx_mat0_n[0]);
3145
3146         rx_mat = readq(&bar0->rx_mat);
3147         for (i=0; i<config->rx_ring_num; i++) {
3148                 rx_mat |= RX_MAT_SET(i, 1);
3149         }
3150         writeq(rx_mat, &bar0->rx_mat);
3151
3152         dev->irq = nic->pdev->irq;
3153         return 0;
3154 }
3155
3156 int s2io_enable_msi_x(nic_t *nic)
3157 {
3158         XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3159         u64 tx_mat, rx_mat;
3160         u16 msi_control; /* Temp variable */
3161         int ret, i, j, msix_indx = 1;
3162
3163         nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3164                                GFP_KERNEL);
3165         if (nic->entries == NULL) {
3166                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3167                 return -ENOMEM;
3168         }
3169         memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3170
3171         nic->s2io_entries =
3172                 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3173                                    GFP_KERNEL);
3174         if (nic->s2io_entries == NULL) {
3175                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3176                 kfree(nic->entries);
3177                 return -ENOMEM;
3178         }
3179         memset(nic->s2io_entries, 0,
3180                MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3181
3182         for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3183                 nic->entries[i].entry = i;
3184                 nic->s2io_entries[i].entry = i;
3185                 nic->s2io_entries[i].arg = NULL;
3186                 nic->s2io_entries[i].in_use = 0;
3187         }
3188
3189         tx_mat = readq(&bar0->tx_mat0_n[0]);
3190         for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3191                 tx_mat |= TX_MAT_SET(i, msix_indx);
3192                 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3193                 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3194                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3195         }
3196         writeq(tx_mat, &bar0->tx_mat0_n[0]);
3197
3198         if (!nic->config.bimodal) {
3199                 rx_mat = readq(&bar0->rx_mat);
3200                 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3201                         rx_mat |= RX_MAT_SET(j, msix_indx);
3202                         nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3203                         nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3204                         nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3205                 }
3206                 writeq(rx_mat, &bar0->rx_mat);
3207         } else {
3208                 tx_mat = readq(&bar0->tx_mat0_n[7]);
3209                 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3210                         tx_mat |= TX_MAT_SET(i, msix_indx);
3211                         nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3212                         nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3213                         nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3214                 }
3215                 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3216         }
3217
3218         ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3219         if (ret) {
3220                 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3221                 kfree(nic->entries);
3222                 kfree(nic->s2io_entries);
3223                 nic->entries = NULL;
3224                 nic->s2io_entries = NULL;
3225                 return -ENOMEM;
3226         }
3227
3228         /*
3229          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3230          * in the herc NIC. (Temp change, needs to be removed later)
3231          */
3232         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3233         msi_control |= 0x1; /* Enable MSI */
3234         pci_write_config_word(nic->pdev, 0x42, msi_control);
3235
3236         return 0;
3237 }
3238
3239 /* ********************************************************* *
3240  * Functions defined below concern the OS part of the driver *
3241  * ********************************************************* */
3242
3243 /**
3244  *  s2io_open - open entry point of the driver
3245  *  @dev : pointer to the device structure.
3246  *  Description:
3247  *  This function is the open entry point of the driver. It mainly calls a
3248  *  function to allocate Rx buffers and inserts them into the buffer
3249  *  descriptors and then enables the Rx part of the NIC.
3250  *  Return value:
3251  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3252  *   file on failure.
3253  */
3254
3255 int s2io_open(struct net_device *dev)
3256 {
3257         nic_t *sp = dev->priv;
3258         int err = 0;
3259         int i;
3260         u16 msi_control; /* Temp variable */
3261
3262         /*
3263          * Make sure you have link off by default every time
3264          * Nic is initialized
3265          */
3266         netif_carrier_off(dev);
3267         sp->last_link_state = 0;
3268
3269         /* Initialize H/W and enable interrupts */
3270         if (s2io_card_up(sp)) {
3271                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3272                           dev->name);
3273                 err = -ENODEV;
3274                 goto hw_init_failed;
3275         }
3276
3277         /* Store the values of the MSIX table in the nic_t structure */
3278         store_xmsi_data(sp);
3279
3280         /* After proper initialization of H/W, register ISR */
3281         if (sp->intr_type == MSI) {
3282                 err = request_irq((int) sp->pdev->irq, s2io_msi_handle, 
3283                         SA_SHIRQ, sp->name, dev);
3284                 if (err) {
3285                         DBG_PRINT(ERR_DBG, "%s: MSI registration \
3286 failed\n", dev->name);
3287                         goto isr_registration_failed;
3288                 }
3289         }
3290         if (sp->intr_type == MSI_X) {
3291                 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
3292                         if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
3293                                 sprintf(sp->desc1, "%s:MSI-X-%d-TX",
3294                                         dev->name, i);
3295                                 err = request_irq(sp->entries[i].vector,
3296                                           s2io_msix_fifo_handle, 0, sp->desc1,
3297                                           sp->s2io_entries[i].arg);
3298                                 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc1, 
3299                                                         sp->msix_info[i].addr);
3300                         } else {
3301                                 sprintf(sp->desc2, "%s:MSI-X-%d-RX",
3302                                         dev->name, i);
3303                                 err = request_irq(sp->entries[i].vector,
3304                                           s2io_msix_ring_handle, 0, sp->desc2,
3305                                           sp->s2io_entries[i].arg);
3306                                 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc2, 
3307                                                         sp->msix_info[i].addr);
3308                         }
3309                         if (err) {
3310                                 DBG_PRINT(ERR_DBG, "%s: MSI-X-%d registration \
3311 failed\n", dev->name, i);
3312                                 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
3313                                 goto isr_registration_failed;
3314                         }
3315                         sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
3316                 }
3317         }
3318         if (sp->intr_type == INTA) {
3319                 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
3320                                 sp->name, dev);
3321                 if (err) {
3322                         DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
3323                                   dev->name);
3324                         goto isr_registration_failed;
3325                 }
3326         }
3327
3328         if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3329                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3330                 err = -ENODEV;
3331                 goto setting_mac_address_failed;
3332         }
3333
3334         netif_start_queue(dev);
3335         return 0;
3336
3337 setting_mac_address_failed:
3338         if (sp->intr_type != MSI_X)
3339                 free_irq(sp->pdev->irq, dev);
3340 isr_registration_failed:
3341         del_timer_sync(&sp->alarm_timer);
3342         if (sp->intr_type == MSI_X) {
3343                 if (sp->device_type == XFRAME_II_DEVICE) {
3344                         for (i=1; (sp->s2io_entries[i].in_use == 
3345                                 MSIX_REGISTERED_SUCCESS); i++) {
3346                                 int vector = sp->entries[i].vector;
3347                                 void *arg = sp->s2io_entries[i].arg;
3348
3349                                 free_irq(vector, arg);
3350                         }
3351                         pci_disable_msix(sp->pdev);
3352
3353                         /* Temp */
3354                         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3355                         msi_control &= 0xFFFE; /* Disable MSI */
3356                         pci_write_config_word(sp->pdev, 0x42, msi_control);
3357                 }
3358         }
3359         else if (sp->intr_type == MSI)
3360                 pci_disable_msi(sp->pdev);
3361         s2io_reset(sp);
3362 hw_init_failed:
3363         if (sp->intr_type == MSI_X) {
3364                 if (sp->entries)
3365                         kfree(sp->entries);
3366                 if (sp->s2io_entries)
3367                         kfree(sp->s2io_entries);
3368         }
3369         return err;
3370 }
3371
3372 /**
3373  *  s2io_close -close entry point of the driver
3374  *  @dev : device pointer.
3375  *  Description:
3376  *  This is the stop entry point of the driver. It needs to undo exactly
3377  *  whatever was done by the open entry point,thus it's usually referred to
3378  *  as the close function.Among other things this function mainly stops the
3379  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3380  *  Return value:
3381  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3382  *  file on failure.
3383  */
3384
3385 int s2io_close(struct net_device *dev)
3386 {
3387         nic_t *sp = dev->priv;
3388         int i;
3389         u16 msi_control;
3390
3391         flush_scheduled_work();
3392         netif_stop_queue(dev);
3393         /* Reset card, kill tasklet and free Tx and Rx buffers. */
3394         s2io_card_down(sp);
3395
3396         if (sp->intr_type == MSI_X) {
3397                 if (sp->device_type == XFRAME_II_DEVICE) {
3398                         for (i=1; (sp->s2io_entries[i].in_use == 
3399                                         MSIX_REGISTERED_SUCCESS); i++) {
3400                                 int vector = sp->entries[i].vector;
3401                                 void *arg = sp->s2io_entries[i].arg;
3402
3403                                 free_irq(vector, arg);
3404                         }
3405                         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3406                         msi_control &= 0xFFFE; /* Disable MSI */
3407                         pci_write_config_word(sp->pdev, 0x42, msi_control);
3408
3409                         pci_disable_msix(sp->pdev);
3410                 }
3411         }
3412         else {
3413                 free_irq(sp->pdev->irq, dev);
3414                 if (sp->intr_type == MSI)
3415                         pci_disable_msi(sp->pdev);
3416         }       
3417         sp->device_close_flag = TRUE;   /* Device is shut down. */
3418         return 0;
3419 }
3420
3421 /**
3422  *  s2io_xmit - Tx entry point of te driver
3423  *  @skb : the socket buffer containing the Tx data.
3424  *  @dev : device pointer.
3425  *  Description :
3426  *  This function is the Tx entry point of the driver. S2IO NIC supports
3427  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
3428  *  NOTE: when device cant queue the pkt,just the trans_start variable will
3429  *  not be upadted.
3430  *  Return value:
3431  *  0 on success & 1 on failure.
3432  */
3433
3434 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3435 {
3436         nic_t *sp = dev->priv;
3437         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3438         register u64 val64;
3439         TxD_t *txdp;
3440         TxFIFO_element_t __iomem *tx_fifo;
3441         unsigned long flags;
3442 #ifdef NETIF_F_TSO
3443         int mss;
3444 #endif
3445         u16 vlan_tag = 0;
3446         int vlan_priority = 0;
3447         mac_info_t *mac_control;
3448         struct config_param *config;
3449
3450         mac_control = &sp->mac_control;
3451         config = &sp->config;
3452
3453         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3454         spin_lock_irqsave(&sp->tx_lock, flags);
3455         if (atomic_read(&sp->card_state) == CARD_DOWN) {
3456                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3457                           dev->name);
3458                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3459                 dev_kfree_skb(skb);
3460                 return 0;
3461         }
3462
3463         queue = 0;
3464
3465         /* Get Fifo number to Transmit based on vlan priority */
3466         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3467                 vlan_tag = vlan_tx_tag_get(skb);
3468                 vlan_priority = vlan_tag >> 13;
3469                 queue = config->fifo_mapping[vlan_priority];
3470         }
3471
3472         put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3473         get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3474         txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3475                 list_virt_addr;
3476
3477         queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3478         /* Avoid "put" pointer going beyond "get" pointer */
3479         if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
3480                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3481                 netif_stop_queue(dev);
3482                 dev_kfree_skb(skb);
3483                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3484                 return 0;
3485         }
3486
3487         /* A buffer with no data will be dropped */
3488         if (!skb->len) {
3489                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3490                 dev_kfree_skb(skb);
3491                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3492                 return 0;
3493         }
3494
3495 #ifdef NETIF_F_TSO
3496         mss = skb_shinfo(skb)->tso_size;
3497         if (mss) {
3498                 txdp->Control_1 |= TXD_TCP_LSO_EN;
3499                 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
3500         }
3501 #endif
3502
3503         frg_cnt = skb_shinfo(skb)->nr_frags;
3504         frg_len = skb->len - skb->data_len;
3505
3506         txdp->Buffer_Pointer = pci_map_single
3507             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3508         txdp->Host_Control = (unsigned long) skb;
3509         if (skb->ip_summed == CHECKSUM_HW) {
3510                 txdp->Control_2 |=
3511                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3512                      TXD_TX_CKO_UDP_EN);
3513         }
3514
3515         txdp->Control_2 |= config->tx_intr_type;
3516
3517         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3518                 txdp->Control_2 |= TXD_VLAN_ENABLE;
3519                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3520         }
3521
3522         txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
3523                             TXD_GATHER_CODE_FIRST);
3524         txdp->Control_1 |= TXD_LIST_OWN_XENA;
3525
3526         /* For fragmented SKB. */
3527         for (i = 0; i < frg_cnt; i++) {
3528                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3529                 /* A '0' length fragment will be ignored */
3530                 if (!frag->size)
3531                         continue;
3532                 txdp++;
3533                 txdp->Buffer_Pointer = (u64) pci_map_page
3534                     (sp->pdev, frag->page, frag->page_offset,
3535                      frag->size, PCI_DMA_TODEVICE);
3536                 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
3537         }
3538         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3539
3540         tx_fifo = mac_control->tx_FIFO_start[queue];
3541         val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
3542         writeq(val64, &tx_fifo->TxDL_Pointer);
3543
3544         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3545                  TX_FIFO_LAST_LIST);
3546
3547 #ifdef NETIF_F_TSO
3548         if (mss)
3549                 val64 |= TX_FIFO_SPECIAL_FUNC;
3550 #endif
3551         writeq(val64, &tx_fifo->List_Control);
3552
3553         mmiowb();
3554
3555         put_off++;
3556         put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3557         mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
3558
3559         /* Avoid "put" pointer going beyond "get" pointer */
3560         if (((put_off + 1) % queue_len) == get_off) {
3561                 DBG_PRINT(TX_DBG,
3562                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3563                           put_off, get_off);
3564                 netif_stop_queue(dev);
3565         }
3566
3567         dev->trans_start = jiffies;
3568         spin_unlock_irqrestore(&sp->tx_lock, flags);
3569
3570         return 0;
3571 }
3572
3573 static void
3574 s2io_alarm_handle(unsigned long data)
3575 {
3576         nic_t *sp = (nic_t *)data;
3577
3578         alarm_intr_handler(sp);
3579         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3580 }
3581
3582 static irqreturn_t
3583 s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
3584 {
3585         struct net_device *dev = (struct net_device *) dev_id;
3586         nic_t *sp = dev->priv;
3587         int i;
3588         int ret;
3589         mac_info_t *mac_control;
3590         struct config_param *config;
3591
3592         atomic_inc(&sp->isr_cnt);
3593         mac_control = &sp->mac_control;
3594         config = &sp->config;
3595         DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
3596
3597         /* If Intr is because of Rx Traffic */
3598         for (i = 0; i < config->rx_ring_num; i++)
3599                 rx_intr_handler(&mac_control->rings[i]);
3600
3601         /* If Intr is because of Tx Traffic */
3602         for (i = 0; i < config->tx_fifo_num; i++)
3603                 tx_intr_handler(&mac_control->fifos[i]);
3604
3605         /*
3606          * If the Rx buffer count is below the panic threshold then
3607          * reallocate the buffers from the interrupt handler itself,
3608          * else schedule a tasklet to reallocate the buffers.
3609          */
3610         for (i = 0; i < config->rx_ring_num; i++) {
3611                 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3612                 int level = rx_buffer_level(sp, rxb_size, i);
3613
3614                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3615                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3616                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
3617                         if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3618                                 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3619                                           dev->name);
3620                                 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3621                                 clear_bit(0, (&sp->tasklet_status));
3622                                 atomic_dec(&sp->isr_cnt);
3623                                 return IRQ_HANDLED;
3624                         }
3625                         clear_bit(0, (&sp->tasklet_status));
3626                 } else if (level == LOW) {
3627                         tasklet_schedule(&sp->task);
3628                 }
3629         }
3630
3631         atomic_dec(&sp->isr_cnt);
3632         return IRQ_HANDLED;
3633 }
3634
3635 static irqreturn_t
3636 s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs)
3637 {
3638         ring_info_t *ring = (ring_info_t *)dev_id;
3639         nic_t *sp = ring->nic;
3640         int rxb_size, level, rng_n;
3641
3642         atomic_inc(&sp->isr_cnt);
3643         rx_intr_handler(ring);
3644
3645         rng_n = ring->ring_no;
3646         rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
3647         level = rx_buffer_level(sp, rxb_size, rng_n);
3648
3649         if ((level == PANIC) && (!TASKLET_IN_USE)) {
3650                 int ret;
3651                 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
3652                 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3653                 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
3654                         DBG_PRINT(ERR_DBG, "Out of memory in %s",
3655                                   __FUNCTION__);
3656                         clear_bit(0, (&sp->tasklet_status));
3657                         return IRQ_HANDLED;
3658                 }
3659                 clear_bit(0, (&sp->tasklet_status));
3660         } else if (level == LOW) {
3661                 tasklet_schedule(&sp->task);
3662         }
3663         atomic_dec(&sp->isr_cnt);
3664
3665         return IRQ_HANDLED;
3666 }
3667
3668 static irqreturn_t
3669 s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs)
3670 {
3671         fifo_info_t *fifo = (fifo_info_t *)dev_id;
3672         nic_t *sp = fifo->nic;
3673
3674         atomic_inc(&sp->isr_cnt);
3675         tx_intr_handler(fifo);
3676         atomic_dec(&sp->isr_cnt);
3677         return IRQ_HANDLED;
3678 }
3679
3680 static void s2io_txpic_intr_handle(nic_t *sp)
3681 {
3682         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3683         u64 val64;
3684
3685         val64 = readq(&bar0->pic_int_status);
3686         if (val64 & PIC_INT_GPIO) {
3687                 val64 = readq(&bar0->gpio_int_reg);
3688                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
3689                     (val64 & GPIO_INT_REG_LINK_UP)) {
3690                         val64 |=  GPIO_INT_REG_LINK_DOWN;
3691                         val64 |= GPIO_INT_REG_LINK_UP;
3692                         writeq(val64, &bar0->gpio_int_reg);
3693                         goto masking;
3694                 }
3695
3696                 if (((sp->last_link_state == LINK_UP) &&
3697                         (val64 & GPIO_INT_REG_LINK_DOWN)) ||
3698                 ((sp->last_link_state == LINK_DOWN) &&
3699                 (val64 & GPIO_INT_REG_LINK_UP))) {
3700                         val64 = readq(&bar0->gpio_int_mask);
3701                         val64 |=  GPIO_INT_MASK_LINK_DOWN;
3702                         val64 |= GPIO_INT_MASK_LINK_UP;
3703                         writeq(val64, &bar0->gpio_int_mask);
3704                         s2io_set_link((unsigned long)sp);
3705                 }
3706 masking:
3707                 if (sp->last_link_state == LINK_UP) {
3708                         /*enable down interrupt */
3709                         val64 = readq(&bar0->gpio_int_mask);
3710                         /* unmasks link down intr */
3711                         val64 &=  ~GPIO_INT_MASK_LINK_DOWN;
3712                         /* masks link up intr */
3713                         val64 |= GPIO_INT_MASK_LINK_UP;
3714                         writeq(val64, &bar0->gpio_int_mask);
3715                 } else {
3716                         /*enable UP Interrupt */
3717                         val64 = readq(&bar0->gpio_int_mask);
3718                         /* unmasks link up interrupt */
3719                         val64 &= ~GPIO_INT_MASK_LINK_UP;
3720                         /* masks link down interrupt */
3721                         val64 |=  GPIO_INT_MASK_LINK_DOWN;
3722                         writeq(val64, &bar0->gpio_int_mask);
3723                 }
3724         }
3725 }
3726
3727 /**
3728  *  s2io_isr - ISR handler of the device .
3729  *  @irq: the irq of the device.
3730  *  @dev_id: a void pointer to the dev structure of the NIC.
3731  *  @pt_regs: pointer to the registers pushed on the stack.
3732  *  Description:  This function is the ISR handler of the device. It
3733  *  identifies the reason for the interrupt and calls the relevant
3734  *  service routines. As a contongency measure, this ISR allocates the
3735  *  recv buffers, if their numbers are below the panic value which is
3736  *  presently set to 25% of the original number of rcv buffers allocated.
3737  *  Return value:
3738  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
3739  *   IRQ_NONE: will be returned if interrupt is not from our device
3740  */
3741 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
3742 {
3743         struct net_device *dev = (struct net_device *) dev_id;
3744         nic_t *sp = dev->priv;
3745         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3746         int i;
3747         u64 reason = 0, val64;
3748         mac_info_t *mac_control;
3749         struct config_param *config;
3750
3751         atomic_inc(&sp->isr_cnt);
3752         mac_control = &sp->mac_control;
3753         config = &sp->config;
3754
3755         /*
3756          * Identify the cause for interrupt and call the appropriate
3757          * interrupt handler. Causes for the interrupt could be;
3758          * 1. Rx of packet.
3759          * 2. Tx complete.
3760          * 3. Link down.
3761          * 4. Error in any functional blocks of the NIC.
3762          */
3763         reason = readq(&bar0->general_int_status);
3764
3765         if (!reason) {
3766                 /* The interrupt was not raised by Xena. */
3767                 atomic_dec(&sp->isr_cnt);
3768                 return IRQ_NONE;
3769         }
3770
3771 #ifdef CONFIG_S2IO_NAPI
3772         if (reason & GEN_INTR_RXTRAFFIC) {
3773                 if (netif_rx_schedule_prep(dev)) {
3774                         en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
3775                                               DISABLE_INTRS);
3776                         __netif_rx_schedule(dev);
3777                 }
3778         }
3779 #else
3780         /* If Intr is because of Rx Traffic */
3781         if (reason & GEN_INTR_RXTRAFFIC) {
3782                 /*
3783                  * rx_traffic_int reg is an R1 register, writing all 1's
3784                  * will ensure that the actual interrupt causing bit get's
3785                  * cleared and hence a read can be avoided.
3786                  */
3787                 val64 = 0xFFFFFFFFFFFFFFFFULL;
3788                 writeq(val64, &bar0->rx_traffic_int);
3789                 for (i = 0; i < config->rx_ring_num; i++) {
3790                         rx_intr_handler(&mac_control->rings[i]);
3791                 }
3792         }
3793 #endif
3794
3795         /* If Intr is because of Tx Traffic */
3796         if (reason & GEN_INTR_TXTRAFFIC) {
3797                 /*
3798                  * tx_traffic_int reg is an R1 register, writing all 1's
3799                  * will ensure that the actual interrupt causing bit get's
3800                  * cleared and hence a read can be avoided.
3801                  */
3802                 val64 = 0xFFFFFFFFFFFFFFFFULL;
3803                 writeq(val64, &bar0->tx_traffic_int);
3804
3805                 for (i = 0; i < config->tx_fifo_num; i++)
3806                         tx_intr_handler(&mac_control->fifos[i]);
3807         }
3808
3809         if (reason & GEN_INTR_TXPIC)
3810                 s2io_txpic_intr_handle(sp);
3811         /*
3812          * If the Rx buffer count is below the panic threshold then
3813          * reallocate the buffers from the interrupt handler itself,
3814          * else schedule a tasklet to reallocate the buffers.
3815          */
3816 #ifndef CONFIG_S2IO_NAPI
3817         for (i = 0; i < config->rx_ring_num; i++) {
3818                 int ret;
3819                 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3820                 int level = rx_buffer_level(sp, rxb_size, i);
3821
3822                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3823                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3824                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
3825                         if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3826                                 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3827                                           dev->name);
3828                                 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3829                                 clear_bit(0, (&sp->tasklet_status));
3830                                 atomic_dec(&sp->isr_cnt);
3831                                 return IRQ_HANDLED;
3832                         }
3833                         clear_bit(0, (&sp->tasklet_status));
3834                 } else if (level == LOW) {
3835                         tasklet_schedule(&sp->task);
3836                 }
3837         }
3838 #endif
3839
3840         atomic_dec(&sp->isr_cnt);
3841         return IRQ_HANDLED;
3842 }
3843
3844 /**
3845  * s2io_updt_stats -
3846  */
3847 static void s2io_updt_stats(nic_t *sp)
3848 {
3849         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3850         u64 val64;
3851         int cnt = 0;
3852
3853         if (atomic_read(&sp->card_state) == CARD_UP) {
3854                 /* Apprx 30us on a 133 MHz bus */
3855                 val64 = SET_UPDT_CLICKS(10) |
3856                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3857                 writeq(val64, &bar0->stat_cfg);
3858                 do {
3859                         udelay(100);
3860                         val64 = readq(&bar0->stat_cfg);
3861                         if (!(val64 & BIT(0)))
3862                                 break;
3863                         cnt++;
3864                         if (cnt == 5)
3865                                 break; /* Updt failed */
3866                 } while(1);
3867         }
3868 }
3869
3870 /**
3871  *  s2io_get_stats - Updates the device statistics structure.
3872  *  @dev : pointer to the device structure.
3873  *  Description:
3874  *  This function updates the device statistics structure in the s2io_nic
3875  *  structure and returns a pointer to the same.
3876  *  Return value:
3877  *  pointer to the updated net_device_stats structure.
3878  */
3879
3880 struct net_device_stats *s2io_get_stats(struct net_device *dev)
3881 {
3882         nic_t *sp = dev->priv;
3883         mac_info_t *mac_control;
3884         struct config_param *config;
3885
3886
3887         mac_control = &sp->mac_control;
3888         config = &sp->config;
3889
3890         /* Configure Stats for immediate updt */
3891         s2io_updt_stats(sp);
3892
3893         sp->stats.tx_packets =
3894                 le32_to_cpu(mac_control->stats_info->tmac_frms);
3895         sp->stats.tx_errors =
3896                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3897         sp->stats.rx_errors =
3898                 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3899         sp->stats.multicast =
3900                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
3901         sp->stats.rx_length_errors =
3902                 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
3903
3904         return (&sp->stats);
3905 }
3906
3907 /**
3908  *  s2io_set_multicast - entry point for multicast address enable/disable.
3909  *  @dev : pointer to the device structure
3910  *  Description:
3911  *  This function is a driver entry point which gets called by the kernel
3912  *  whenever multicast addresses must be enabled/disabled. This also gets
3913  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
3914  *  determine, if multicast address must be enabled or if promiscuous mode
3915  *  is to be disabled etc.
3916  *  Return value:
3917  *  void.
3918  */
3919
3920 static void s2io_set_multicast(struct net_device *dev)
3921 {
3922         int i, j, prev_cnt;
3923         struct dev_mc_list *mclist;
3924         nic_t *sp = dev->priv;
3925         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3926         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3927             0xfeffffffffffULL;
3928         u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3929         void __iomem *add;
3930
3931         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3932                 /*  Enable all Multicast addresses */
3933                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3934                        &bar0->rmac_addr_data0_mem);
3935                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3936                        &bar0->rmac_addr_data1_mem);
3937                 val64 = RMAC_ADDR_CMD_MEM_WE |
3938                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3939                     RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3940                 writeq(val64, &bar0->rmac_addr_cmd_mem);
3941                 /* Wait till command completes */
3942                 wait_for_cmd_complete(sp);
3943
3944                 sp->m_cast_flg = 1;
3945                 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3946         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3947                 /*  Disable all Multicast addresses */
3948                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3949                        &bar0->rmac_addr_data0_mem);
3950                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3951                        &bar0->rmac_addr_data1_mem);
3952                 val64 = RMAC_ADDR_CMD_MEM_WE |
3953                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3954                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3955                 writeq(val64, &bar0->rmac_addr_cmd_mem);
3956                 /* Wait till command completes */
3957                 wait_for_cmd_complete(sp);
3958
3959                 sp->m_cast_flg = 0;
3960                 sp->all_multi_pos = 0;
3961         }
3962
3963         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3964                 /*  Put the NIC into promiscuous mode */
3965                 add = &bar0->mac_cfg;
3966                 val64 = readq(&bar0->mac_cfg);
3967                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3968
3969                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3970                 writel((u32) val64, add);
3971                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3972                 writel((u32) (val64 >> 32), (add + 4));
3973
3974                 val64 = readq(&bar0->mac_cfg);
3975                 sp->promisc_flg = 1;
3976                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
3977                           dev->name);
3978         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3979                 /*  Remove the NIC from promiscuous mode */
3980                 add = &bar0->mac_cfg;
3981                 val64 = readq(&bar0->mac_cfg);
3982                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3983
3984                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3985                 writel((u32) val64, add);
3986                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3987                 writel((u32) (val64 >> 32), (add + 4));
3988
3989                 val64 = readq(&bar0->mac_cfg);
3990                 sp->promisc_flg = 0;
3991                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
3992                           dev->name);
3993         }
3994
3995         /*  Update individual M_CAST address list */
3996         if ((!sp->m_cast_flg) && dev->mc_count) {
3997                 if (dev->mc_count >
3998                     (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3999                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4000                                   dev->name);
4001                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
4002                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4003                         return;
4004                 }
4005
4006                 prev_cnt = sp->mc_addr_count;
4007                 sp->mc_addr_count = dev->mc_count;
4008
4009                 /* Clear out the previous list of Mc in the H/W. */
4010                 for (i = 0; i < prev_cnt; i++) {
4011                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4012                                &bar0->rmac_addr_data0_mem);
4013                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4014                                 &bar0->rmac_addr_data1_mem);
4015                         val64 = RMAC_ADDR_CMD_MEM_WE |
4016                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4017                             RMAC_ADDR_CMD_MEM_OFFSET
4018                             (MAC_MC_ADDR_START_OFFSET + i);
4019                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4020
4021                         /* Wait for command completes */
4022                         if (wait_for_cmd_complete(sp)) {
4023                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4024                                           dev->name);
4025                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4026                                 return;
4027                         }
4028                 }
4029
4030                 /* Create the new Rx filter list and update the same in H/W. */
4031                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4032                      i++, mclist = mclist->next) {
4033                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4034                                ETH_ALEN);
4035                         for (j = 0; j < ETH_ALEN; j++) {
4036                                 mac_addr |= mclist->dmi_addr[j];
4037                                 mac_addr <<= 8;
4038                         }
4039                         mac_addr >>= 8;
4040                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4041                                &bar0->rmac_addr_data0_mem);
4042                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4043                                 &bar0->rmac_addr_data1_mem);
4044                         val64 = RMAC_ADDR_CMD_MEM_WE |
4045                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4046                             RMAC_ADDR_CMD_MEM_OFFSET
4047                             (i + MAC_MC_ADDR_START_OFFSET);
4048                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4049
4050                         /* Wait for command completes */
4051                         if (wait_for_cmd_complete(sp)) {
4052                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4053                                           dev->name);
4054                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4055                                 return;
4056                         }
4057                 }
4058         }
4059 }
4060
4061 /**
4062  *  s2io_set_mac_addr - Programs the Xframe mac address
4063  *  @dev : pointer to the device structure.
4064  *  @addr: a uchar pointer to the new mac address which is to be set.
4065  *  Description : This procedure will program the Xframe to receive
4066  *  frames with new Mac Address
4067  *  Return value: SUCCESS on success and an appropriate (-)ve integer
4068  *  as defined in errno.h file on failure.
4069  */
4070
4071 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4072 {
4073         nic_t *sp = dev->priv;
4074         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4075         register u64 val64, mac_addr = 0;
4076         int i;
4077
4078         /*
4079          * Set the new MAC address as the new unicast filter and reflect this
4080          * change on the device address registered with the OS. It will be
4081          * at offset 0.
4082          */
4083         for (i = 0; i < ETH_ALEN; i++) {
4084                 mac_addr <<= 8;
4085                 mac_addr |= addr[i];
4086         }
4087
4088         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4089                &bar0->rmac_addr_data0_mem);
4090
4091         val64 =
4092             RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4093             RMAC_ADDR_CMD_MEM_OFFSET(0);
4094         writeq(val64, &bar0->rmac_addr_cmd_mem);
4095         /* Wait till command completes */
4096         if (wait_for_cmd_complete(sp)) {
4097                 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4098                 return FAILURE;
4099         }
4100
4101         return SUCCESS;
4102 }
4103
4104 /**
4105  * s2io_ethtool_sset - Sets different link parameters.
4106  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
4107  * @info: pointer to the structure with parameters given by ethtool to set
4108  * link information.
4109  * Description:
4110  * The function sets different link parameters provided by the user onto
4111  * the NIC.
4112  * Return value:
4113  * 0 on success.
4114 */
4115
4116 static int s2io_ethtool_sset(struct net_device *dev,
4117                              struct ethtool_cmd *info)
4118 {
4119         nic_t *sp = dev->priv;
4120         if ((info->autoneg == AUTONEG_ENABLE) ||
4121             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4122                 return -EINVAL;
4123         else {
4124                 s2io_close(sp->dev);
4125                 s2io_open(sp->dev);
4126         }
4127
4128         return 0;
4129 }
4130
4131 /**
4132  * s2io_ethtol_gset - Return link specific information.
4133  * @sp : private member of the device structure, pointer to the
4134  *      s2io_nic structure.
4135  * @info : pointer to the structure with parameters given by ethtool
4136  * to return link information.
4137  * Description:
4138  * Returns link specific information like speed, duplex etc.. to ethtool.
4139  * Return value :
4140  * return 0 on success.
4141  */
4142
4143 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4144 {
4145         nic_t *sp = dev->priv;
4146         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4147         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4148         info->port = PORT_FIBRE;
4149         /* info->transceiver?? TODO */
4150
4151         if (netif_carrier_ok(sp->dev)) {
4152                 info->speed = 10000;
4153                 info->duplex = DUPLEX_FULL;
4154         } else {
4155                 info->speed = -1;
4156                 info->duplex = -1;
4157         }
4158
4159         info->autoneg = AUTONEG_DISABLE;
4160         return 0;
4161 }
4162
4163 /**
4164  * s2io_ethtool_gdrvinfo - Returns driver specific information.
4165  * @sp : private member of the device structure, which is a pointer to the
4166  * s2io_nic structure.
4167  * @info : pointer to the structure with parameters given by ethtool to
4168  * return driver information.
4169  * Description:
4170  * Returns driver specefic information like name, version etc.. to ethtool.
4171  * Return value:
4172  *  void
4173  */
4174
4175 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4176                                   struct ethtool_drvinfo *info)
4177 {
4178         nic_t *sp = dev->priv;
4179
4180         strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
4181         strncpy(info->version, s2io_driver_version,
4182                 sizeof(s2io_driver_version));
4183         strncpy(info->fw_version, "", 32);
4184         strncpy(info->bus_info, pci_name(sp->pdev), 32);
4185         info->regdump_len = XENA_REG_SPACE;
4186         info->eedump_len = XENA_EEPROM_SPACE;
4187         info->testinfo_len = S2IO_TEST_LEN;
4188         info->n_stats = S2IO_STAT_LEN;
4189 }
4190
4191 /**
4192  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4193  *  @sp: private member of the device structure, which is a pointer to the
4194  *  s2io_nic structure.
4195  *  @regs : pointer to the structure with parameters given by ethtool for
4196  *  dumping the registers.
4197  *  @reg_space: The input argumnet into which all the registers are dumped.
4198  *  Description:
4199  *  Dumps the entire register space of xFrame NIC into the user given
4200  *  buffer area.
4201  * Return value :
4202  * void .
4203 */
4204
4205 static void s2io_ethtool_gregs(struct net_device *dev,
4206                                struct ethtool_regs *regs, void *space)
4207 {
4208         int i;
4209         u64 reg;
4210         u8 *reg_space = (u8 *) space;
4211         nic_t *sp = dev->priv;
4212
4213         regs->len = XENA_REG_SPACE;
4214         regs->version = sp->pdev->subsystem_device;
4215
4216         for (i = 0; i < regs->len; i += 8) {
4217                 reg = readq(sp->bar0 + i);
4218                 memcpy((reg_space + i), &reg, 8);
4219         }
4220 }
4221
4222 /**
4223  *  s2io_phy_id  - timer function that alternates adapter LED.
4224  *  @data : address of the private member of the device structure, which
4225  *  is a pointer to the s2io_nic structure, provided as an u32.
4226  * Description: This is actually the timer function that alternates the
4227  * adapter LED bit of the adapter control bit to set/reset every time on
4228  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4229  *  once every second.
4230 */
4231 static void s2io_phy_id(unsigned long data)
4232 {
4233         nic_t *sp = (nic_t *) data;
4234         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4235         u64 val64 = 0;
4236         u16 subid;
4237
4238         subid = sp->pdev->subsystem_device;
4239         if ((sp->device_type == XFRAME_II_DEVICE) ||
4240                    ((subid & 0xFF) >= 0x07)) {
4241                 val64 = readq(&bar0->gpio_control);
4242                 val64 ^= GPIO_CTRL_GPIO_0;
4243                 writeq(val64, &bar0->gpio_control);
4244         } else {
4245                 val64 = readq(&bar0->adapter_control);
4246                 val64 ^= ADAPTER_LED_ON;
4247                 writeq(val64, &bar0->adapter_control);
4248         }
4249
4250         mod_timer(&sp->id_timer, jiffies + HZ / 2);
4251 }
4252
4253 /**
4254  * s2io_ethtool_idnic - To physically identify the nic on the system.
4255  * @sp : private member of the device structure, which is a pointer to the
4256  * s2io_nic structure.
4257  * @id : pointer to the structure with identification parameters given by
4258  * ethtool.
4259  * Description: Used to physically identify the NIC on the system.
4260  * The Link LED will blink for a time specified by the user for
4261  * identification.
4262  * NOTE: The Link has to be Up to be able to blink the LED. Hence
4263  * identification is possible only if it's link is up.
4264  * Return value:
4265  * int , returns 0 on success
4266  */
4267
4268 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4269 {
4270         u64 val64 = 0, last_gpio_ctrl_val;
4271         nic_t *sp = dev->priv;
4272         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4273         u16 subid;
4274
4275         subid = sp->pdev->subsystem_device;
4276         last_gpio_ctrl_val = readq(&bar0->gpio_control);
4277         if ((sp->device_type == XFRAME_I_DEVICE) &&
4278                 ((subid & 0xFF) < 0x07)) {
4279                 val64 = readq(&bar0->adapter_control);
4280                 if (!(val64 & ADAPTER_CNTL_EN)) {
4281                         printk(KERN_ERR
4282                                "Adapter Link down, cannot blink LED\n");
4283                         return -EFAULT;
4284                 }
4285         }
4286         if (sp->id_timer.function == NULL) {
4287                 init_timer(&sp->id_timer);
4288                 sp->id_timer.function = s2io_phy_id;
4289                 sp->id_timer.data = (unsigned long) sp;
4290         }
4291         mod_timer(&sp->id_timer, jiffies);
4292         if (data)
4293                 msleep_interruptible(data * HZ);
4294         else
4295                 msleep_interruptible(MAX_FLICKER_TIME);
4296         del_timer_sync(&sp->id_timer);
4297
4298         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
4299                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4300                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4301         }
4302
4303         return 0;
4304 }
4305
4306 /**
4307  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
4308  * @sp : private member of the device structure, which is a pointer to the
4309  *      s2io_nic structure.
4310  * @ep : pointer to the structure with pause parameters given by ethtool.
4311  * Description:
4312  * Returns the Pause frame generation and reception capability of the NIC.
4313  * Return value:
4314  *  void
4315  */
4316 static void s2io_ethtool_getpause_data(struct net_device *dev,
4317                                        struct ethtool_pauseparam *ep)
4318 {
4319         u64 val64;
4320         nic_t *sp = dev->priv;
4321         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4322
4323         val64 = readq(&bar0->rmac_pause_cfg);
4324         if (val64 & RMAC_PAUSE_GEN_ENABLE)
4325                 ep->tx_pause = TRUE;
4326         if (val64 & RMAC_PAUSE_RX_ENABLE)
4327                 ep->rx_pause = TRUE;
4328         ep->autoneg = FALSE;
4329 }
4330
4331 /**
4332  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
4333  * @sp : private member of the device structure, which is a pointer to the
4334  *      s2io_nic structure.
4335  * @ep : pointer to the structure with pause parameters given by ethtool.
4336  * Description:
4337  * It can be used to set or reset Pause frame generation or reception
4338  * support of the NIC.
4339  * Return value:
4340  * int, returns 0 on Success
4341  */
4342
4343 static int s2io_ethtool_setpause_data(struct net_device *dev,
4344                                struct ethtool_pauseparam *ep)
4345 {
4346         u64 val64;
4347         nic_t *sp = dev->priv;
4348         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4349
4350         val64 = readq(&bar0->rmac_pause_cfg);
4351         if (ep->tx_pause)
4352                 val64 |= RMAC_PAUSE_GEN_ENABLE;
4353         else
4354                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4355         if (ep->rx_pause)
4356                 val64 |= RMAC_PAUSE_RX_ENABLE;
4357         else
4358                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4359         writeq(val64, &bar0->rmac_pause_cfg);
4360         return 0;
4361 }
4362
4363 /**
4364  * read_eeprom - reads 4 bytes of data from user given offset.
4365  * @sp : private member of the device structure, which is a pointer to the
4366  *      s2io_nic structure.
4367  * @off : offset at which the data must be written
4368  * @data : Its an output parameter where the data read at the given
4369  *      offset is stored.
4370  * Description:
4371  * Will read 4 bytes of data from the user given offset and return the
4372  * read data.
4373  * NOTE: Will allow to read only part of the EEPROM visible through the
4374  *   I2C bus.
4375  * Return value:
4376  *  -1 on failure and 0 on success.
4377  */
4378
4379 #define S2IO_DEV_ID             5
4380 static int read_eeprom(nic_t * sp, int off, u32 * data)
4381 {
4382         int ret = -1;
4383         u32 exit_cnt = 0;
4384         u64 val64;
4385         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4386
4387         val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4388             I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
4389             I2C_CONTROL_CNTL_START;
4390         SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4391
4392         while (exit_cnt < 5) {
4393                 val64 = readq(&bar0->i2c_control);
4394                 if (I2C_CONTROL_CNTL_END(val64)) {
4395                         *data = I2C_CONTROL_GET_DATA(val64);
4396                         ret = 0;
4397                         break;
4398                 }
4399                 msleep(50);
4400                 exit_cnt++;
4401         }
4402
4403         return ret;
4404 }
4405
4406 /**
4407  *  write_eeprom - actually writes the relevant part of the data value.
4408  *  @sp : private member of the device structure, which is a pointer to the
4409  *       s2io_nic structure.
4410  *  @off : offset at which the data must be written
4411  *  @data : The data that is to be written
4412  *  @cnt : Number of bytes of the data that are actually to be written into
4413  *  the Eeprom. (max of 3)
4414  * Description:
4415  *  Actually writes the relevant part of the data value into the Eeprom
4416  *  through the I2C bus.
4417  * Return value:
4418  *  0 on success, -1 on failure.
4419  */
4420
4421 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
4422 {
4423         int exit_cnt = 0, ret = -1;
4424         u64 val64;
4425         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4426
4427         val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4428             I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
4429             I2C_CONTROL_CNTL_START;
4430         SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4431
4432         while (exit_cnt < 5) {
4433                 val64 = readq(&bar0->i2c_control);
4434                 if (I2C_CONTROL_CNTL_END(val64)) {
4435                         if (!(val64 & I2C_CONTROL_NACK))
4436                                 ret = 0;
4437                         break;
4438                 }
4439                 msleep(50);
4440                 exit_cnt++;
4441         }
4442
4443         return ret;
4444 }
4445
4446 /**
4447  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
4448  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
4449  *  @eeprom : pointer to the user level structure provided by ethtool,
4450  *  containing all relevant information.
4451  *  @data_buf : user defined value to be written into Eeprom.
4452  *  Description: Reads the values stored in the Eeprom at given offset
4453  *  for a given length. Stores these values int the input argument data
4454  *  buffer 'data_buf' and returns these to the caller (ethtool.)
4455  *  Return value:
4456  *  int  0 on success
4457  */
4458
4459 static int s2io_ethtool_geeprom(struct net_device *dev,
4460                          struct ethtool_eeprom *eeprom, u8 * data_buf)
4461 {
4462         u32 data, i, valid;
4463         nic_t *sp = dev->priv;
4464
4465         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
4466
4467         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
4468                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
4469
4470         for (i = 0; i < eeprom->len; i += 4) {
4471                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
4472                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
4473                         return -EFAULT;
4474                 }
4475                 valid = INV(data);
4476                 memcpy((data_buf + i), &valid, 4);
4477         }
4478         return 0;
4479 }
4480
4481 /**
4482  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
4483  *  @sp : private member of the device structure, which is a pointer to the
4484  *  s2io_nic structure.
4485  *  @eeprom : pointer to the user level structure provided by ethtool,
4486  *  containing all relevant information.
4487  *  @data_buf ; user defined value to be written into Eeprom.
4488  *  Description:
4489  *  Tries to write the user provided value in the Eeprom, at the offset
4490  *  given by the user.
4491  *  Return value:
4492  *  0 on success, -EFAULT on failure.
4493  */
4494
4495 static int s2io_ethtool_seeprom(struct net_device *dev,
4496                                 struct ethtool_eeprom *eeprom,
4497                                 u8 * data_buf)
4498 {
4499         int len = eeprom->len, cnt = 0;
4500         u32 valid = 0, data;
4501         nic_t *sp = dev->priv;
4502
4503         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
4504                 DBG_PRINT(ERR_DBG,
4505                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
4506                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
4507                           eeprom->magic);
4508                 return -EFAULT;
4509         }
4510
4511         while (len) {
4512                 data = (u32) data_buf[cnt] & 0x000000FF;
4513                 if (data) {
4514                         valid = (u32) (data << 24);
4515                 } else
4516                         valid = data;
4517
4518                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
4519                         DBG_PRINT(ERR_DBG,
4520                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
4521                         DBG_PRINT(ERR_DBG,
4522                                   "write into the specified offset\n");
4523                         return -EFAULT;
4524                 }
4525                 cnt++;
4526                 len--;
4527         }
4528
4529         return 0;
4530 }
4531
4532 /**
4533  * s2io_register_test - reads and writes into all clock domains.
4534  * @sp : private member of the device structure, which is a pointer to the
4535  * s2io_nic structure.
4536  * @data : variable that returns the result of each of the test conducted b
4537  * by the driver.
4538  * Description:
4539  * Read and write into all clock domains. The NIC has 3 clock domains,
4540  * see that registers in all the three regions are accessible.
4541  * Return value:
4542  * 0 on success.
4543  */
4544
4545 static int s2io_register_test(nic_t * sp, uint64_t * data)
4546 {
4547         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4548         u64 val64 = 0;
4549         int fail = 0;
4550
4551         val64 = readq(&bar0->pif_rd_swapper_fb);
4552         if (val64 != 0x123456789abcdefULL) {
4553                 fail = 1;
4554                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
4555         }
4556
4557         val64 = readq(&bar0->rmac_pause_cfg);
4558         if (val64 != 0xc000ffff00000000ULL) {
4559                 fail = 1;
4560                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
4561         }
4562
4563         val64 = readq(&bar0->rx_queue_cfg);
4564         if (val64 != 0x0808080808080808ULL) {
4565                 fail = 1;
4566                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
4567         }
4568
4569         val64 = readq(&bar0->xgxs_efifo_cfg);
4570         if (val64 != 0x000000001923141EULL) {
4571                 fail = 1;
4572                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
4573         }
4574
4575         val64 = 0x5A5A5A5A5A5A5A5AULL;
4576         writeq(val64, &bar0->xmsi_data);
4577         val64 = readq(&bar0->xmsi_data);
4578         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
4579                 fail = 1;
4580                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
4581         }
4582
4583         val64 = 0xA5A5A5A5A5A5A5A5ULL;
4584         writeq(val64, &bar0->xmsi_data);
4585         val64 = readq(&bar0->xmsi_data);
4586         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
4587                 fail = 1;
4588                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
4589         }
4590
4591         *data = fail;
4592         return 0;
4593 }
4594
4595 /**
4596  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
4597  * @sp : private member of the device structure, which is a pointer to the
4598  * s2io_nic structure.
4599  * @data:variable that returns the result of each of the test conducted by
4600  * the driver.
4601  * Description:
4602  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
4603  * register.
4604  * Return value:
4605  * 0 on success.
4606  */
4607
4608 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
4609 {
4610         int fail = 0;
4611         u32 ret_data;
4612
4613         /* Test Write Error at offset 0 */
4614         if (!write_eeprom(sp, 0, 0, 3))
4615                 fail = 1;
4616
4617         /* Test Write at offset 4f0 */
4618         if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
4619                 fail = 1;
4620         if (read_eeprom(sp, 0x4F0, &ret_data))
4621                 fail = 1;
4622
4623         if (ret_data != 0x01234567)
4624                 fail = 1;
4625
4626         /* Reset the EEPROM data go FFFF */
4627         write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
4628
4629         /* Test Write Request Error at offset 0x7c */
4630         if (!write_eeprom(sp, 0x07C, 0, 3))
4631                 fail = 1;
4632
4633         /* Test Write Request at offset 0x7fc */
4634         if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
4635                 fail = 1;
4636         if (read_eeprom(sp, 0x7FC, &ret_data))
4637                 fail = 1;
4638
4639         if (ret_data != 0x01234567)
4640                 fail = 1;
4641
4642         /* Reset the EEPROM data go FFFF */
4643         write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
4644
4645         /* Test Write Error at offset 0x80 */
4646         if (!write_eeprom(sp, 0x080, 0, 3))
4647                 fail = 1;
4648
4649         /* Test Write Error at offset 0xfc */
4650         if (!write_eeprom(sp, 0x0FC, 0, 3))
4651                 fail = 1;
4652
4653         /* Test Write Error at offset 0x100 */
4654         if (!write_eeprom(sp, 0x100, 0, 3))
4655                 fail = 1;
4656
4657         /* Test Write Error at offset 4ec */
4658         if (!write_eeprom(sp, 0x4EC, 0, 3))
4659                 fail = 1;
4660
4661         *data = fail;
4662         return 0;
4663 }
4664
4665 /**
4666  * s2io_bist_test - invokes the MemBist test of the card .
4667  * @sp : private member of the device structure, which is a pointer to the
4668  * s2io_nic structure.
4669  * @data:variable that returns the result of each of the test conducted by
4670  * the driver.
4671  * Description:
4672  * This invokes the MemBist test of the card. We give around
4673  * 2 secs time for the Test to complete. If it's still not complete
4674  * within this peiod, we consider that the test failed.
4675  * Return value:
4676  * 0 on success and -1 on failure.
4677  */
4678
4679 static int s2io_bist_test(nic_t * sp, uint64_t * data)
4680 {
4681         u8 bist = 0;
4682         int cnt = 0, ret = -1;
4683
4684         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4685         bist |= PCI_BIST_START;
4686         pci_write_config_word(sp->pdev, PCI_BIST, bist);
4687
4688         while (cnt < 20) {
4689                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4690                 if (!(bist & PCI_BIST_START)) {
4691                         *data = (bist & PCI_BIST_CODE_MASK);
4692                         ret = 0;
4693                         break;
4694                 }
4695                 msleep(100);
4696                 cnt++;
4697         }
4698
4699         return ret;
4700 }
4701
4702 /**
4703  * s2io-link_test - verifies the link state of the nic
4704  * @sp ; private member of the device structure, which is a pointer to the
4705  * s2io_nic structure.
4706  * @data: variable that returns the result of each of the test conducted by
4707  * the driver.
4708  * Description:
4709  * The function verifies the link state of the NIC and updates the input
4710  * argument 'data' appropriately.
4711  * Return value:
4712  * 0 on success.
4713  */
4714
4715 static int s2io_link_test(nic_t * sp, uint64_t * data)
4716 {
4717         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4718         u64 val64;
4719
4720         val64 = readq(&bar0->adapter_status);
4721         if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
4722                 *data = 1;
4723
4724         return 0;
4725 }
4726
4727 /**
4728  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
4729  * @sp - private member of the device structure, which is a pointer to the
4730  * s2io_nic structure.
4731  * @data - variable that returns the result of each of the test
4732  * conducted by the driver.
4733  * Description:
4734  *  This is one of the offline test that tests the read and write
4735  *  access to the RldRam chip on the NIC.
4736  * Return value:
4737  *  0 on success.
4738  */
4739
4740 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
4741 {
4742         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4743         u64 val64;
4744         int cnt, iteration = 0, test_pass = 0;
4745
4746         val64 = readq(&bar0->adapter_control);
4747         val64 &= ~ADAPTER_ECC_EN;
4748         writeq(val64, &bar0->adapter_control);
4749
4750         val64 = readq(&bar0->mc_rldram_test_ctrl);
4751         val64 |= MC_RLDRAM_TEST_MODE;
4752         writeq(val64, &bar0->mc_rldram_test_ctrl);
4753
4754         val64 = readq(&bar0->mc_rldram_mrs);
4755         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
4756         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4757
4758         val64 |= MC_RLDRAM_MRS_ENABLE;
4759         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4760
4761         while (iteration < 2) {
4762                 val64 = 0x55555555aaaa0000ULL;
4763                 if (iteration == 1) {
4764                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
4765                 }
4766                 writeq(val64, &bar0->mc_rldram_test_d0);
4767
4768                 val64 = 0xaaaa5a5555550000ULL;
4769                 if (iteration == 1) {
4770                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
4771                 }
4772                 writeq(val64, &bar0->mc_rldram_test_d1);
4773
4774                 val64 = 0x55aaaaaaaa5a0000ULL;
4775                 if (iteration == 1) {
4776                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
4777                 }
4778                 writeq(val64, &bar0->mc_rldram_test_d2);
4779
4780                 val64 = (u64) (0x0000003fffff0000ULL);
4781                 writeq(val64, &bar0->mc_rldram_test_add);
4782
4783
4784                 val64 = MC_RLDRAM_TEST_MODE;
4785                 writeq(val64, &bar0->mc_rldram_test_ctrl);
4786
4787                 val64 |=
4788                     MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
4789                     MC_RLDRAM_TEST_GO;
4790                 writeq(val64, &bar0->mc_rldram_test_ctrl);
4791
4792                 for (cnt = 0; cnt < 5; cnt++) {
4793                         val64 = readq(&bar0->mc_rldram_test_ctrl);
4794                         if (val64 & MC_RLDRAM_TEST_DONE)
4795                                 break;
4796                         msleep(200);
4797                 }
4798
4799                 if (cnt == 5)
4800                         break;
4801
4802                 val64 = MC_RLDRAM_TEST_MODE;
4803                 writeq(val64, &bar0->mc_rldram_test_ctrl);
4804
4805                 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
4806                 writeq(val64, &bar0->mc_rldram_test_ctrl);
4807
4808                 for (cnt = 0; cnt < 5; cnt++) {
4809                         val64 = readq(&bar0->mc_rldram_test_ctrl);
4810                         if (val64 & MC_RLDRAM_TEST_DONE)
4811                                 break;
4812                         msleep(500);
4813                 }
4814
4815                 if (cnt == 5)
4816                         break;
4817
4818                 val64 = readq(&bar0->mc_rldram_test_ctrl);
4819                 if (val64 & MC_RLDRAM_TEST_PASS)
4820                         test_pass = 1;
4821
4822                 iteration++;
4823         }
4824
4825         if (!test_pass)
4826                 *data = 1;
4827         else
4828                 *data = 0;
4829
4830         return 0;
4831 }
4832
4833 /**
4834  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4835  *  @sp : private member of the device structure, which is a pointer to the
4836  *  s2io_nic structure.
4837  *  @ethtest : pointer to a ethtool command specific structure that will be
4838  *  returned to the user.
4839  *  @data : variable that returns the result of each of the test
4840  * conducted by the driver.
4841  * Description:
4842  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
4843  *  the health of the card.
4844  * Return value:
4845  *  void
4846  */
4847
4848 static void s2io_ethtool_test(struct net_device *dev,
4849                               struct ethtool_test *ethtest,
4850                               uint64_t * data)
4851 {
4852         nic_t *sp = dev->priv;
4853         int orig_state = netif_running(sp->dev);
4854
4855         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4856                 /* Offline Tests. */
4857                 if (orig_state)
4858                         s2io_close(sp->dev);
4859
4860                 if (s2io_register_test(sp, &data[0]))
4861                         ethtest->flags |= ETH_TEST_FL_FAILED;
4862
4863                 s2io_reset(sp);
4864
4865                 if (s2io_rldram_test(sp, &data[3]))
4866                         ethtest->flags |= ETH_TEST_FL_FAILED;
4867
4868                 s2io_reset(sp);
4869
4870                 if (s2io_eeprom_test(sp, &data[1]))
4871                         ethtest->flags |= ETH_TEST_FL_FAILED;
4872
4873                 if (s2io_bist_test(sp, &data[4]))
4874                         ethtest->flags |= ETH_TEST_FL_FAILED;
4875
4876                 if (orig_state)
4877                         s2io_open(sp->dev);
4878
4879                 data[2] = 0;
4880         } else {
4881                 /* Online Tests. */
4882                 if (!orig_state) {
4883                         DBG_PRINT(ERR_DBG,
4884                                   "%s: is not up, cannot run test\n",
4885                                   dev->name);
4886                         data[0] = -1;
4887                         data[1] = -1;
4888                         data[2] = -1;
4889                         data[3] = -1;
4890                         data[4] = -1;
4891                 }
4892
4893                 if (s2io_link_test(sp, &data[2]))
4894                         ethtest->flags |= ETH_TEST_FL_FAILED;
4895
4896                 data[0] = 0;
4897                 data[1] = 0;
4898                 data[3] = 0;
4899                 data[4] = 0;
4900         }
4901 }
4902
4903 static void s2io_get_ethtool_stats(struct net_device *dev,
4904                                    struct ethtool_stats *estats,
4905                                    u64 * tmp_stats)
4906 {
4907         int i = 0;
4908         nic_t *sp = dev->priv;
4909         StatInfo_t *stat_info = sp->mac_control.stats_info;
4910
4911         s2io_updt_stats(sp);
4912         tmp_stats[i++] =
4913                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
4914                 le32_to_cpu(stat_info->tmac_frms);
4915         tmp_stats[i++] =
4916                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
4917                 le32_to_cpu(stat_info->tmac_data_octets);
4918         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
4919         tmp_stats[i++] =
4920                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
4921                 le32_to_cpu(stat_info->tmac_mcst_frms);
4922         tmp_stats[i++] =
4923                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
4924                 le32_to_cpu(stat_info->tmac_bcst_frms);
4925         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
4926         tmp_stats[i++] =
4927                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
4928                 le32_to_cpu(stat_info->tmac_any_err_frms);
4929         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
4930         tmp_stats[i++] =
4931                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
4932                 le32_to_cpu(stat_info->tmac_vld_ip);
4933         tmp_stats[i++] =
4934                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
4935                 le32_to_cpu(stat_info->tmac_drop_ip);
4936         tmp_stats[i++] =
4937                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
4938                 le32_to_cpu(stat_info->tmac_icmp);
4939         tmp_stats[i++] =
4940                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
4941                 le32_to_cpu(stat_info->tmac_rst_tcp);
4942         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
4943         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
4944                 le32_to_cpu(stat_info->tmac_udp);
4945         tmp_stats[i++] =
4946                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
4947                 le32_to_cpu(stat_info->rmac_vld_frms);
4948         tmp_stats[i++] =
4949                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
4950                 le32_to_cpu(stat_info->rmac_data_octets);
4951         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4952         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
4953         tmp_stats[i++] =
4954                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
4955                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4956         tmp_stats[i++] =
4957                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
4958                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
4959         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4960         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4961         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
4962         tmp_stats[i++] =
4963                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
4964                 le32_to_cpu(stat_info->rmac_discarded_frms);
4965         tmp_stats[i++] =
4966                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
4967                 le32_to_cpu(stat_info->rmac_usized_frms);
4968         tmp_stats[i++] =
4969                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
4970                 le32_to_cpu(stat_info->rmac_osized_frms);
4971         tmp_stats[i++] =
4972                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
4973                 le32_to_cpu(stat_info->rmac_frag_frms);
4974         tmp_stats[i++] =
4975                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
4976                 le32_to_cpu(stat_info->rmac_jabber_frms);
4977         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
4978                 le32_to_cpu(stat_info->rmac_ip);
4979         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4980         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
4981         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
4982                 le32_to_cpu(stat_info->rmac_drop_ip);
4983         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
4984                 le32_to_cpu(stat_info->rmac_icmp);
4985         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
4986         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
4987                 le32_to_cpu(stat_info->rmac_udp);
4988         tmp_stats[i++] =
4989                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
4990                 le32_to_cpu(stat_info->rmac_err_drp_udp);
4991         tmp_stats[i++] =
4992                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
4993                 le32_to_cpu(stat_info->rmac_pause_cnt);
4994         tmp_stats[i++] =
4995                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
4996                 le32_to_cpu(stat_info->rmac_accepted_ip);
4997         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
4998         tmp_stats[i++] = 0;
4999         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5000         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5001 }
5002
5003 int s2io_ethtool_get_regs_len(struct net_device *dev)
5004 {
5005         return (XENA_REG_SPACE);
5006 }
5007
5008
5009 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5010 {
5011         nic_t *sp = dev->priv;
5012
5013         return (sp->rx_csum);
5014 }
5015 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5016 {
5017         nic_t *sp = dev->priv;
5018
5019         if (data)
5020                 sp->rx_csum = 1;
5021         else
5022                 sp->rx_csum = 0;
5023
5024         return 0;
5025 }
5026 int s2io_get_eeprom_len(struct net_device *dev)
5027 {
5028         return (XENA_EEPROM_SPACE);
5029 }
5030
5031 int s2io_ethtool_self_test_count(struct net_device *dev)
5032 {
5033         return (S2IO_TEST_LEN);
5034 }
5035 void s2io_ethtool_get_strings(struct net_device *dev,
5036                               u32 stringset, u8 * data)
5037 {
5038         switch (stringset) {
5039         case ETH_SS_TEST:
5040                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5041                 break;
5042         case ETH_SS_STATS:
5043                 memcpy(data, &ethtool_stats_keys,
5044                        sizeof(ethtool_stats_keys));
5045         }
5046 }
5047 static int s2io_ethtool_get_stats_count(struct net_device *dev)
5048 {
5049         return (S2IO_STAT_LEN);
5050 }
5051
5052 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
5053 {
5054         if (data)
5055                 dev->features |= NETIF_F_IP_CSUM;
5056         else
5057                 dev->features &= ~NETIF_F_IP_CSUM;
5058
5059         return 0;
5060 }
5061
5062
5063 static struct ethtool_ops netdev_ethtool_ops = {
5064         .get_settings = s2io_ethtool_gset,
5065         .set_settings = s2io_ethtool_sset,
5066         .get_drvinfo = s2io_ethtool_gdrvinfo,
5067         .get_regs_len = s2io_ethtool_get_regs_len,
5068         .get_regs = s2io_ethtool_gregs,
5069         .get_link = ethtool_op_get_link,
5070         .get_eeprom_len = s2io_get_eeprom_len,
5071         .get_eeprom = s2io_ethtool_geeprom,
5072         .set_eeprom = s2io_ethtool_seeprom,
5073         .get_pauseparam = s2io_ethtool_getpause_data,
5074         .set_pauseparam = s2io_ethtool_setpause_data,
5075         .get_rx_csum = s2io_ethtool_get_rx_csum,
5076         .set_rx_csum = s2io_ethtool_set_rx_csum,
5077         .get_tx_csum = ethtool_op_get_tx_csum,
5078         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5079         .get_sg = ethtool_op_get_sg,
5080         .set_sg = ethtool_op_set_sg,
5081 #ifdef NETIF_F_TSO
5082         .get_tso = ethtool_op_get_tso,
5083         .set_tso = ethtool_op_set_tso,
5084 #endif
5085         .self_test_count = s2io_ethtool_self_test_count,
5086         .self_test = s2io_ethtool_test,
5087         .get_strings = s2io_ethtool_get_strings,
5088         .phys_id = s2io_ethtool_idnic,
5089         .get_stats_count = s2io_ethtool_get_stats_count,
5090         .get_ethtool_stats = s2io_get_ethtool_stats
5091 };
5092
5093 /**
5094  *  s2io_ioctl - Entry point for the Ioctl
5095  *  @dev :  Device pointer.
5096  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
5097  *  a proprietary structure used to pass information to the driver.
5098  *  @cmd :  This is used to distinguish between the different commands that
5099  *  can be passed to the IOCTL functions.
5100  *  Description:
5101  *  Currently there are no special functionality supported in IOCTL, hence
5102  *  function always return EOPNOTSUPPORTED
5103  */
5104
5105 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5106 {
5107         return -EOPNOTSUPP;
5108 }
5109
5110 /**
5111  *  s2io_change_mtu - entry point to change MTU size for the device.
5112  *   @dev : device pointer.
5113  *   @new_mtu : the new MTU size for the device.
5114  *   Description: A driver entry point to change MTU size for the device.
5115  *   Before changing the MTU the device must be stopped.
5116  *  Return value:
5117  *   0 on success and an appropriate (-)ve integer as defined in errno.h
5118  *   file on failure.
5119  */
5120
5121 int s2io_change_mtu(struct net_device *dev, int new_mtu)
5122 {
5123         nic_t *sp = dev->priv;
5124
5125         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5126                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
5127                           dev->name);
5128                 return -EPERM;
5129         }
5130
5131         dev->mtu = new_mtu;
5132         if (netif_running(dev)) {
5133                 s2io_card_down(sp);
5134                 netif_stop_queue(dev);
5135                 if (s2io_card_up(sp)) {
5136                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5137                                   __FUNCTION__);
5138                 }
5139                 if (netif_queue_stopped(dev))
5140                         netif_wake_queue(dev);
5141         } else { /* Device is down */
5142                 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5143                 u64 val64 = new_mtu;
5144
5145                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
5146         }
5147
5148         return 0;
5149 }
5150
5151 /**
5152  *  s2io_tasklet - Bottom half of the ISR.
5153  *  @dev_adr : address of the device structure in dma_addr_t format.
5154  *  Description:
5155  *  This is the tasklet or the bottom half of the ISR. This is
5156  *  an extension of the ISR which is scheduled by the scheduler to be run
5157  *  when the load on the CPU is low. All low priority tasks of the ISR can
5158  *  be pushed into the tasklet. For now the tasklet is used only to
5159  *  replenish the Rx buffers in the Rx buffer descriptors.
5160  *  Return value:
5161  *  void.
5162  */
5163
5164 static void s2io_tasklet(unsigned long dev_addr)
5165 {
5166         struct net_device *dev = (struct net_device *) dev_addr;
5167         nic_t *sp = dev->priv;
5168         int i, ret;
5169         mac_info_t *mac_control;
5170         struct config_param *config;
5171
5172         mac_control = &sp->mac_control;
5173         config = &sp->config;
5174
5175         if (!TASKLET_IN_USE) {
5176                 for (i = 0; i < config->rx_ring_num; i++) {
5177                         ret = fill_rx_buffers(sp, i);
5178                         if (ret == -ENOMEM) {
5179                                 DBG_PRINT(ERR_DBG, "%s: Out of ",
5180                                           dev->name);
5181                                 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
5182                                 break;
5183                         } else if (ret == -EFILL) {
5184                                 DBG_PRINT(ERR_DBG,
5185                                           "%s: Rx Ring %d is full\n",
5186                                           dev->name, i);
5187                                 break;
5188                         }
5189                 }
5190                 clear_bit(0, (&sp->tasklet_status));
5191         }
5192 }
5193
5194 /**
5195  * s2io_set_link - Set the LInk status
5196  * @data: long pointer to device private structue
5197  * Description: Sets the link status for the adapter
5198  */
5199
5200 static void s2io_set_link(unsigned long data)
5201 {
5202         nic_t *nic = (nic_t *) data;
5203         struct net_device *dev = nic->dev;
5204         XENA_dev_config_t __iomem *bar0 = nic->bar0;
5205         register u64 val64;
5206         u16 subid;
5207
5208         if (test_and_set_bit(0, &(nic->link_state))) {
5209                 /* The card is being reset, no point doing anything */
5210                 return;
5211         }
5212
5213         subid = nic->pdev->subsystem_device;
5214         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
5215                 /*
5216                  * Allow a small delay for the NICs self initiated
5217                  * cleanup to complete.
5218                  */
5219                 msleep(100);
5220         }
5221
5222         val64 = readq(&bar0->adapter_status);
5223         if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
5224                 if (LINK_IS_UP(val64)) {
5225                         val64 = readq(&bar0->adapter_control);
5226                         val64 |= ADAPTER_CNTL_EN;
5227                         writeq(val64, &bar0->adapter_control);
5228                         if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5229                                                              subid)) {
5230                                 val64 = readq(&bar0->gpio_control);
5231                                 val64 |= GPIO_CTRL_GPIO_0;
5232                                 writeq(val64, &bar0->gpio_control);
5233                                 val64 = readq(&bar0->gpio_control);
5234                         } else {
5235                                 val64 |= ADAPTER_LED_ON;
5236                                 writeq(val64, &bar0->adapter_control);
5237                         }
5238                         if (s2io_link_fault_indication(nic) ==
5239                                                 MAC_RMAC_ERR_TIMER) {
5240                                 val64 = readq(&bar0->adapter_status);
5241                                 if (!LINK_IS_UP(val64)) {
5242                                         DBG_PRINT(ERR_DBG, "%s:", dev->name);
5243                                         DBG_PRINT(ERR_DBG, " Link down");
5244                                         DBG_PRINT(ERR_DBG, "after ");
5245                                         DBG_PRINT(ERR_DBG, "enabling ");
5246                                         DBG_PRINT(ERR_DBG, "device \n");
5247                                 }
5248                         }
5249                         if (nic->device_enabled_once == FALSE) {
5250                                 nic->device_enabled_once = TRUE;
5251                         }
5252                         s2io_link(nic, LINK_UP);
5253                 } else {
5254                         if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5255                                                               subid)) {
5256                                 val64 = readq(&bar0->gpio_control);
5257                                 val64 &= ~GPIO_CTRL_GPIO_0;
5258                                 writeq(val64, &bar0->gpio_control);
5259                                 val64 = readq(&bar0->gpio_control);
5260                         }
5261                         s2io_link(nic, LINK_DOWN);
5262                 }
5263         } else {                /* NIC is not Quiescent. */
5264                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
5265                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
5266                 netif_stop_queue(dev);
5267         }
5268         clear_bit(0, &(nic->link_state));
5269 }
5270
5271 static void s2io_card_down(nic_t * sp)
5272 {
5273         int cnt = 0;
5274         XENA_dev_config_t __iomem *bar0 = sp->bar0;
5275         unsigned long flags;
5276         register u64 val64 = 0;
5277
5278         del_timer_sync(&sp->alarm_timer);
5279         /* If s2io_set_link task is executing, wait till it completes. */
5280         while (test_and_set_bit(0, &(sp->link_state))) {
5281                 msleep(50);
5282         }
5283         atomic_set(&sp->card_state, CARD_DOWN);
5284
5285         /* disable Tx and Rx traffic on the NIC */
5286         stop_nic(sp);
5287
5288         /* Kill tasklet. */
5289         tasklet_kill(&sp->task);
5290
5291         /* Check if the device is Quiescent and then Reset the NIC */
5292         do {
5293                 val64 = readq(&bar0->adapter_status);
5294                 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
5295                         break;
5296                 }
5297
5298                 msleep(50);
5299                 cnt++;
5300                 if (cnt == 10) {
5301                         DBG_PRINT(ERR_DBG,
5302                                   "s2io_close:Device not Quiescent ");
5303                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
5304                                   (unsigned long long) val64);
5305                         break;
5306                 }
5307         } while (1);
5308         s2io_reset(sp);
5309
5310         /* Waiting till all Interrupt handlers are complete */
5311         cnt = 0;
5312         do {
5313                 msleep(10);
5314                 if (!atomic_read(&sp->isr_cnt))
5315                         break;
5316                 cnt++;
5317         } while(cnt < 5);
5318
5319         spin_lock_irqsave(&sp->tx_lock, flags);
5320         /* Free all Tx buffers */
5321         free_tx_buffers(sp);
5322         spin_unlock_irqrestore(&sp->tx_lock, flags);
5323
5324         /* Free all Rx buffers */
5325         spin_lock_irqsave(&sp->rx_lock, flags);
5326         free_rx_buffers(sp);
5327         spin_unlock_irqrestore(&sp->rx_lock, flags);
5328
5329         clear_bit(0, &(sp->link_state));
5330 }
5331
5332 static int s2io_card_up(nic_t * sp)
5333 {
5334         int i, ret = 0;
5335         mac_info_t *mac_control;
5336         struct config_param *config;
5337         struct net_device *dev = (struct net_device *) sp->dev;
5338
5339         /* Initialize the H/W I/O registers */
5340         if (init_nic(sp) != 0) {
5341                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
5342                           dev->name);
5343                 return -ENODEV;
5344         }
5345
5346         if (sp->intr_type == MSI)
5347                 ret = s2io_enable_msi(sp);
5348         else if (sp->intr_type == MSI_X)
5349                 ret = s2io_enable_msi_x(sp);
5350         if (ret) {
5351                 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
5352                 sp->intr_type = INTA;
5353         }
5354
5355         /*
5356          * Initializing the Rx buffers. For now we are considering only 1
5357          * Rx ring and initializing buffers into 30 Rx blocks
5358          */
5359         mac_control = &sp->mac_control;
5360         config = &sp->config;
5361
5362         for (i = 0; i < config->rx_ring_num; i++) {
5363                 if ((ret = fill_rx_buffers(sp, i))) {
5364                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
5365                                   dev->name);
5366                         s2io_reset(sp);
5367                         free_rx_buffers(sp);
5368                         return -ENOMEM;
5369                 }
5370                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
5371                           atomic_read(&sp->rx_bufs_left[i]));
5372         }
5373
5374         /* Setting its receive mode */
5375         s2io_set_multicast(dev);
5376
5377         /* Enable tasklet for the device */
5378         tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
5379
5380         /* Enable Rx Traffic and interrupts on the NIC */
5381         if (start_nic(sp)) {
5382                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
5383                 tasklet_kill(&sp->task);
5384                 s2io_reset(sp);
5385                 free_irq(dev->irq, dev);
5386                 free_rx_buffers(sp);
5387                 return -ENODEV;
5388         }
5389
5390         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
5391
5392         atomic_set(&sp->card_state, CARD_UP);
5393         return 0;
5394 }
5395
5396 /**
5397  * s2io_restart_nic - Resets the NIC.
5398  * @data : long pointer to the device private structure
5399  * Description:
5400  * This function is scheduled to be run by the s2io_tx_watchdog
5401  * function after 0.5 secs to reset the NIC. The idea is to reduce
5402  * the run time of the watch dog routine which is run holding a
5403  * spin lock.
5404  */
5405
5406 static void s2io_restart_nic(unsigned long data)
5407 {
5408         struct net_device *dev = (struct net_device *) data;
5409         nic_t *sp = dev->priv;
5410
5411         s2io_card_down(sp);
5412         if (s2io_card_up(sp)) {
5413                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5414                           dev->name);
5415         }
5416         netif_wake_queue(dev);
5417         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
5418                   dev->name);
5419
5420 }
5421
5422 /**
5423  *  s2io_tx_watchdog - Watchdog for transmit side.
5424  *  @dev : Pointer to net device structure
5425  *  Description:
5426  *  This function is triggered if the Tx Queue is stopped
5427  *  for a pre-defined amount of time when the Interface is still up.
5428  *  If the Interface is jammed in such a situation, the hardware is
5429  *  reset (by s2io_close) and restarted again (by s2io_open) to
5430  *  overcome any problem that might have been caused in the hardware.
5431  *  Return value:
5432  *  void
5433  */
5434
5435 static void s2io_tx_watchdog(struct net_device *dev)
5436 {
5437         nic_t *sp = dev->priv;
5438
5439         if (netif_carrier_ok(dev)) {
5440                 schedule_work(&sp->rst_timer_task);
5441         }
5442 }
5443
5444 /**
5445  *   rx_osm_handler - To perform some OS related operations on SKB.
5446  *   @sp: private member of the device structure,pointer to s2io_nic structure.
5447  *   @skb : the socket buffer pointer.
5448  *   @len : length of the packet
5449  *   @cksum : FCS checksum of the frame.
5450  *   @ring_no : the ring from which this RxD was extracted.
5451  *   Description:
5452  *   This function is called by the Tx interrupt serivce routine to perform
5453  *   some OS related operations on the SKB before passing it to the upper
5454  *   layers. It mainly checks if the checksum is OK, if so adds it to the
5455  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
5456  *   to the upper layer. If the checksum is wrong, it increments the Rx
5457  *   packet error count, frees the SKB and returns error.
5458  *   Return value:
5459  *   SUCCESS on success and -1 on failure.
5460  */
5461 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
5462 {
5463         nic_t *sp = ring_data->nic;
5464         struct net_device *dev = (struct net_device *) sp->dev;
5465         struct sk_buff *skb = (struct sk_buff *)
5466                 ((unsigned long) rxdp->Host_Control);
5467         int ring_no = ring_data->ring_no;
5468         u16 l3_csum, l4_csum;
5469 #ifdef CONFIG_2BUFF_MODE
5470         int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
5471         int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
5472         int get_block = ring_data->rx_curr_get_info.block_index;
5473         int get_off = ring_data->rx_curr_get_info.offset;
5474         buffAdd_t *ba = &ring_data->ba[get_block][get_off];
5475         unsigned char *buff;
5476 #else
5477         u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
5478 #endif
5479         skb->dev = dev;
5480         if (rxdp->Control_1 & RXD_T_CODE) {
5481                 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
5482                 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
5483                           dev->name, err);
5484                 dev_kfree_skb(skb);
5485                 sp->stats.rx_crc_errors++;
5486                 atomic_dec(&sp->rx_bufs_left[ring_no]);
5487                 rxdp->Host_Control = 0;
5488                 return 0;
5489         }
5490
5491         /* Updating statistics */
5492         rxdp->Host_Control = 0;
5493         sp->rx_pkt_count++;
5494         sp->stats.rx_packets++;
5495 #ifndef CONFIG_2BUFF_MODE
5496         sp->stats.rx_bytes += len;
5497 #else
5498         sp->stats.rx_bytes += buf0_len + buf2_len;
5499 #endif
5500
5501 #ifndef CONFIG_2BUFF_MODE
5502         skb_put(skb, len);
5503 #else
5504         buff = skb_push(skb, buf0_len);
5505         memcpy(buff, ba->ba_0, buf0_len);
5506         skb_put(skb, buf2_len);
5507 #endif
5508
5509         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
5510             (sp->rx_csum)) {
5511                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
5512                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
5513                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
5514                         /*
5515                          * NIC verifies if the Checksum of the received
5516                          * frame is Ok or not and accordingly returns
5517                          * a flag in the RxD.
5518                          */
5519                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5520                 } else {
5521                         /*
5522                          * Packet with erroneous checksum, let the
5523                          * upper layers deal with it.
5524                          */
5525                         skb->ip_summed = CHECKSUM_NONE;
5526                 }
5527         } else {
5528                 skb->ip_summed = CHECKSUM_NONE;
5529         }
5530
5531         skb->protocol = eth_type_trans(skb, dev);
5532 #ifdef CONFIG_S2IO_NAPI
5533         if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5534                 /* Queueing the vlan frame to the upper layer */
5535                 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
5536                         RXD_GET_VLAN_TAG(rxdp->Control_2));
5537         } else {
5538                 netif_receive_skb(skb);
5539         }
5540 #else
5541         if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5542                 /* Queueing the vlan frame to the upper layer */
5543                 vlan_hwaccel_rx(skb, sp->vlgrp,
5544                         RXD_GET_VLAN_TAG(rxdp->Control_2));
5545         } else {
5546                 netif_rx(skb);
5547         }
5548 #endif
5549         dev->last_rx = jiffies;
5550         atomic_dec(&sp->rx_bufs_left[ring_no]);
5551         return SUCCESS;
5552 }
5553
5554 /**
5555  *  s2io_link - stops/starts the Tx queue.
5556  *  @sp : private member of the device structure, which is a pointer to the
5557  *  s2io_nic structure.
5558  *  @link : inidicates whether link is UP/DOWN.
5559  *  Description:
5560  *  This function stops/starts the Tx queue depending on whether the link
5561  *  status of the NIC is is down or up. This is called by the Alarm
5562  *  interrupt handler whenever a link change interrupt comes up.
5563  *  Return value:
5564  *  void.
5565  */
5566
5567 void s2io_link(nic_t * sp, int link)
5568 {
5569         struct net_device *dev = (struct net_device *) sp->dev;
5570
5571         if (link != sp->last_link_state) {
5572                 if (link == LINK_DOWN) {
5573                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
5574                         netif_carrier_off(dev);
5575                 } else {
5576                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
5577                         netif_carrier_on(dev);
5578                 }
5579         }
5580         sp->last_link_state = link;
5581 }
5582
5583 /**
5584  *  get_xena_rev_id - to identify revision ID of xena.
5585  *  @pdev : PCI Dev structure
5586  *  Description:
5587  *  Function to identify the Revision ID of xena.
5588  *  Return value:
5589  *  returns the revision ID of the device.
5590  */
5591
5592 int get_xena_rev_id(struct pci_dev *pdev)
5593 {
5594         u8 id = 0;
5595         int ret;
5596         ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
5597         return id;
5598 }
5599
5600 /**
5601  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
5602  *  @sp : private member of the device structure, which is a pointer to the
5603  *  s2io_nic structure.
5604  *  Description:
5605  *  This function initializes a few of the PCI and PCI-X configuration registers
5606  *  with recommended values.
5607  *  Return value:
5608  *  void
5609  */
5610
5611 static void s2io_init_pci(nic_t * sp)
5612 {
5613         u16 pci_cmd = 0, pcix_cmd = 0;
5614
5615         /* Enable Data Parity Error Recovery in PCI-X command register. */
5616         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5617                              &(pcix_cmd));
5618         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5619                               (pcix_cmd | 1));
5620         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5621                              &(pcix_cmd));
5622
5623         /* Set the PErr Response bit in PCI command register. */
5624         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5625         pci_write_config_word(sp->pdev, PCI_COMMAND,
5626                               (pci_cmd | PCI_COMMAND_PARITY));
5627         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5628
5629         /* Forcibly disabling relaxed ordering capability of the card. */
5630         pcix_cmd &= 0xfffd;
5631         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5632                               pcix_cmd);
5633         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5634                              &(pcix_cmd));
5635 }
5636
5637 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
5638 MODULE_LICENSE("GPL");
5639 module_param(tx_fifo_num, int, 0);
5640 module_param(rx_ring_num, int, 0);
5641 module_param_array(tx_fifo_len, uint, NULL, 0);
5642 module_param_array(rx_ring_sz, uint, NULL, 0);
5643 module_param_array(rts_frm_len, uint, NULL, 0);
5644 module_param(use_continuous_tx_intrs, int, 1);
5645 module_param(rmac_pause_time, int, 0);
5646 module_param(mc_pause_threshold_q0q3, int, 0);
5647 module_param(mc_pause_threshold_q4q7, int, 0);
5648 module_param(shared_splits, int, 0);
5649 module_param(tmac_util_period, int, 0);
5650 module_param(rmac_util_period, int, 0);
5651 module_param(bimodal, bool, 0);
5652 #ifndef CONFIG_S2IO_NAPI
5653 module_param(indicate_max_pkts, int, 0);
5654 #endif
5655 module_param(rxsync_frequency, int, 0);
5656 module_param(intr_type, int, 0);
5657
5658 /**
5659  *  s2io_init_nic - Initialization of the adapter .
5660  *  @pdev : structure containing the PCI related information of the device.
5661  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
5662  *  Description:
5663  *  The function initializes an adapter identified by the pci_dec structure.
5664  *  All OS related initialization including memory and device structure and
5665  *  initlaization of the device private variable is done. Also the swapper
5666  *  control register is initialized to enable read and write into the I/O
5667  *  registers of the device.
5668  *  Return value:
5669  *  returns 0 on success and negative on failure.
5670  */
5671
5672 static int __devinit
5673 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5674 {
5675         nic_t *sp;
5676         struct net_device *dev;
5677         int i, j, ret;
5678         int dma_flag = FALSE;
5679         u32 mac_up, mac_down;
5680         u64 val64 = 0, tmp64 = 0;
5681         XENA_dev_config_t __iomem *bar0 = NULL;
5682         u16 subid;
5683         mac_info_t *mac_control;
5684         struct config_param *config;
5685         int mode;
5686         u8 dev_intr_type = intr_type;
5687
5688 #ifdef CONFIG_S2IO_NAPI
5689         if (dev_intr_type != INTA) {
5690                 DBG_PRINT(ERR_DBG, "NAPI cannot be enabled when MSI/MSI-X \
5691 is enabled. Defaulting to INTA\n");
5692                 dev_intr_type = INTA;
5693         }
5694         else
5695                 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
5696 #endif
5697
5698         if ((ret = pci_enable_device(pdev))) {
5699                 DBG_PRINT(ERR_DBG,
5700                           "s2io_init_nic: pci_enable_device failed\n");
5701                 return ret;
5702         }
5703
5704         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
5705                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
5706                 dma_flag = TRUE;
5707                 if (pci_set_consistent_dma_mask
5708                     (pdev, DMA_64BIT_MASK)) {
5709                         DBG_PRINT(ERR_DBG,
5710                                   "Unable to obtain 64bit DMA for \
5711                                         consistent allocations\n");
5712                         pci_disable_device(pdev);
5713                         return -ENOMEM;
5714                 }
5715         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
5716                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
5717         } else {
5718                 pci_disable_device(pdev);
5719                 return -ENOMEM;
5720         }
5721
5722         if ((dev_intr_type == MSI_X) && 
5723                         ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
5724                         (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
5725                 DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. \
5726 Defaulting to INTA\n");
5727                 dev_intr_type = INTA;
5728         }
5729         if (dev_intr_type != MSI_X) {
5730                 if (pci_request_regions(pdev, s2io_driver_name)) {
5731                         DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
5732                             pci_disable_device(pdev);
5733                         return -ENODEV;
5734                 }
5735         }
5736         else {
5737                 if (!(request_mem_region(pci_resource_start(pdev, 0),
5738                          pci_resource_len(pdev, 0), s2io_driver_name))) {
5739                         DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
5740                         pci_disable_device(pdev);
5741                         return -ENODEV;
5742                 }
5743                 if (!(request_mem_region(pci_resource_start(pdev, 2),
5744                          pci_resource_len(pdev, 2), s2io_driver_name))) {
5745                         DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
5746                         release_mem_region(pci_resource_start(pdev, 0),
5747                                    pci_resource_len(pdev, 0));
5748                         pci_disable_device(pdev);
5749                         return -ENODEV;
5750                 }
5751         }
5752
5753         dev = alloc_etherdev(sizeof(nic_t));
5754         if (dev == NULL) {
5755                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
5756                 pci_disable_device(pdev);
5757                 pci_release_regions(pdev);
5758                 return -ENODEV;
5759         }
5760
5761         pci_set_master(pdev);
5762         pci_set_drvdata(pdev, dev);
5763         SET_MODULE_OWNER(dev);
5764         SET_NETDEV_DEV(dev, &pdev->dev);
5765
5766         /*  Private member variable initialized to s2io NIC structure */
5767         sp = dev->priv;
5768         memset(sp, 0, sizeof(nic_t));
5769         sp->dev = dev;
5770         sp->pdev = pdev;
5771         sp->high_dma_flag = dma_flag;
5772         sp->device_enabled_once = FALSE;
5773         sp->intr_type = dev_intr_type;
5774
5775         if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
5776                 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
5777                 sp->device_type = XFRAME_II_DEVICE;
5778         else
5779                 sp->device_type = XFRAME_I_DEVICE;
5780
5781                 
5782         /* Initialize some PCI/PCI-X fields of the NIC. */
5783         s2io_init_pci(sp);
5784
5785         /*
5786          * Setting the device configuration parameters.
5787          * Most of these parameters can be specified by the user during
5788          * module insertion as they are module loadable parameters. If
5789          * these parameters are not not specified during load time, they
5790          * are initialized with default values.
5791          */
5792         mac_control = &sp->mac_control;
5793         config = &sp->config;
5794
5795         /* Tx side parameters. */
5796         if (tx_fifo_len[0] == 0)
5797                 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
5798         config->tx_fifo_num = tx_fifo_num;
5799         for (i = 0; i < MAX_TX_FIFOS; i++) {
5800                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
5801                 config->tx_cfg[i].fifo_priority = i;
5802         }
5803
5804         /* mapping the QoS priority to the configured fifos */
5805         for (i = 0; i < MAX_TX_FIFOS; i++)
5806                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
5807
5808         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
5809         for (i = 0; i < config->tx_fifo_num; i++) {
5810                 config->tx_cfg[i].f_no_snoop =
5811                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
5812                 if (config->tx_cfg[i].fifo_len < 65) {
5813                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
5814                         break;
5815                 }
5816         }
5817         config->max_txds = MAX_SKB_FRAGS + 1;
5818
5819         /* Rx side parameters. */
5820         if (rx_ring_sz[0] == 0)
5821                 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
5822         config->rx_ring_num = rx_ring_num;
5823         for (i = 0; i < MAX_RX_RINGS; i++) {
5824                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
5825                     (MAX_RXDS_PER_BLOCK + 1);
5826                 config->rx_cfg[i].ring_priority = i;
5827         }
5828
5829         for (i = 0; i < rx_ring_num; i++) {
5830                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
5831                 config->rx_cfg[i].f_no_snoop =
5832                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
5833         }
5834
5835         /*  Setting Mac Control parameters */
5836         mac_control->rmac_pause_time = rmac_pause_time;
5837         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
5838         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
5839
5840
5841         /* Initialize Ring buffer parameters. */
5842         for (i = 0; i < config->rx_ring_num; i++)
5843                 atomic_set(&sp->rx_bufs_left[i], 0);
5844
5845         /* Initialize the number of ISRs currently running */
5846         atomic_set(&sp->isr_cnt, 0);
5847
5848         /*  initialize the shared memory used by the NIC and the host */
5849         if (init_shared_mem(sp)) {
5850                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
5851                           __FUNCTION__);
5852                 ret = -ENOMEM;
5853                 goto mem_alloc_failed;
5854         }
5855
5856         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
5857                                      pci_resource_len(pdev, 0));
5858         if (!sp->bar0) {
5859                 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
5860                           dev->name);
5861                 ret = -ENOMEM;
5862                 goto bar0_remap_failed;
5863         }
5864
5865         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
5866                                      pci_resource_len(pdev, 2));
5867         if (!sp->bar1) {
5868                 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
5869                           dev->name);
5870                 ret = -ENOMEM;
5871                 goto bar1_remap_failed;
5872         }
5873
5874         dev->irq = pdev->irq;
5875         dev->base_addr = (unsigned long) sp->bar0;
5876
5877         /* Initializing the BAR1 address as the start of the FIFO pointer. */
5878         for (j = 0; j < MAX_TX_FIFOS; j++) {
5879                 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
5880                     (sp->bar1 + (j * 0x00020000));
5881         }
5882
5883         /*  Driver entry points */
5884         dev->open = &s2io_open;
5885         dev->stop = &s2io_close;
5886         dev->hard_start_xmit = &s2io_xmit;
5887         dev->get_stats = &s2io_get_stats;
5888         dev->set_multicast_list = &s2io_set_multicast;
5889         dev->do_ioctl = &s2io_ioctl;
5890         dev->change_mtu = &s2io_change_mtu;
5891         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
5892         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5893         dev->vlan_rx_register = s2io_vlan_rx_register;
5894         dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
5895
5896         /*
5897          * will use eth_mac_addr() for  dev->set_mac_address
5898          * mac address will be set every time dev->open() is called
5899          */
5900 #if defined(CONFIG_S2IO_NAPI)
5901         dev->poll = s2io_poll;
5902         dev->weight = 32;
5903 #endif
5904
5905         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
5906         if (sp->high_dma_flag == TRUE)
5907                 dev->features |= NETIF_F_HIGHDMA;
5908 #ifdef NETIF_F_TSO
5909         dev->features |= NETIF_F_TSO;
5910 #endif
5911
5912         dev->tx_timeout = &s2io_tx_watchdog;
5913         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
5914         INIT_WORK(&sp->rst_timer_task,
5915                   (void (*)(void *)) s2io_restart_nic, dev);
5916         INIT_WORK(&sp->set_link_task,
5917                   (void (*)(void *)) s2io_set_link, sp);
5918
5919         pci_save_state(sp->pdev);
5920
5921         /* Setting swapper control on the NIC, for proper reset operation */
5922         if (s2io_set_swapper(sp)) {
5923                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
5924                           dev->name);
5925                 ret = -EAGAIN;
5926                 goto set_swap_failed;
5927         }
5928
5929         /* Verify if the Herc works on the slot its placed into */
5930         if (sp->device_type & XFRAME_II_DEVICE) {
5931                 mode = s2io_verify_pci_mode(sp);
5932                 if (mode < 0) {
5933                         DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
5934                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
5935                         ret = -EBADSLT;
5936                         goto set_swap_failed;
5937                 }
5938         }
5939
5940         /* Not needed for Herc */
5941         if (sp->device_type & XFRAME_I_DEVICE) {
5942                 /*
5943                  * Fix for all "FFs" MAC address problems observed on
5944                  * Alpha platforms
5945                  */
5946                 fix_mac_address(sp);
5947                 s2io_reset(sp);
5948         }
5949
5950         /*
5951          * MAC address initialization.
5952          * For now only one mac address will be read and used.
5953          */
5954         bar0 = sp->bar0;
5955         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5956             RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
5957         writeq(val64, &bar0->rmac_addr_cmd_mem);
5958         wait_for_cmd_complete(sp);
5959
5960         tmp64 = readq(&bar0->rmac_addr_data0_mem);
5961         mac_down = (u32) tmp64;
5962         mac_up = (u32) (tmp64 >> 32);
5963
5964         memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
5965
5966         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
5967         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
5968         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
5969         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
5970         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
5971         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
5972
5973         /*  Set the factory defined MAC address initially   */
5974         dev->addr_len = ETH_ALEN;
5975         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
5976
5977         /*
5978          * Initialize the tasklet status and link state flags
5979          * and the card state parameter
5980          */
5981         atomic_set(&(sp->card_state), 0);
5982         sp->tasklet_status = 0;
5983         sp->link_state = 0;
5984
5985         /* Initialize spinlocks */
5986         spin_lock_init(&sp->tx_lock);
5987 #ifndef CONFIG_S2IO_NAPI
5988         spin_lock_init(&sp->put_lock);
5989 #endif
5990         spin_lock_init(&sp->rx_lock);
5991
5992         /*
5993          * SXE-002: Configure link and activity LED to init state
5994          * on driver load.
5995          */
5996         subid = sp->pdev->subsystem_device;
5997         if ((subid & 0xFF) >= 0x07) {
5998                 val64 = readq(&bar0->gpio_control);
5999                 val64 |= 0x0000800000000000ULL;
6000                 writeq(val64, &bar0->gpio_control);
6001                 val64 = 0x0411040400000000ULL;
6002                 writeq(val64, (void __iomem *) bar0 + 0x2700);
6003                 val64 = readq(&bar0->gpio_control);
6004         }
6005
6006         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
6007
6008         if (register_netdev(dev)) {
6009                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
6010                 ret = -ENODEV;
6011                 goto register_failed;
6012         }
6013
6014         if (sp->device_type & XFRAME_II_DEVICE) {
6015                 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
6016                           dev->name);
6017                 DBG_PRINT(ERR_DBG, "(rev %d), %s",
6018                                 get_xena_rev_id(sp->pdev),
6019                                 s2io_driver_version);
6020 #ifdef CONFIG_2BUFF_MODE
6021                 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
6022 #endif
6023                 switch(sp->intr_type) {
6024                         case INTA:
6025                                 DBG_PRINT(ERR_DBG, ", Intr type INTA");
6026                                 break;
6027                         case MSI:
6028                                 DBG_PRINT(ERR_DBG, ", Intr type MSI");
6029                                 break;
6030                         case MSI_X:
6031                                 DBG_PRINT(ERR_DBG, ", Intr type MSI-X");
6032                                 break;
6033                 }
6034
6035                 DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
6036                 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
6037                           sp->def_mac_addr[0].mac_addr[0],
6038                           sp->def_mac_addr[0].mac_addr[1],
6039                           sp->def_mac_addr[0].mac_addr[2],
6040                           sp->def_mac_addr[0].mac_addr[3],
6041                           sp->def_mac_addr[0].mac_addr[4],
6042                           sp->def_mac_addr[0].mac_addr[5]);
6043                 mode = s2io_print_pci_mode(sp);
6044                 if (mode < 0) {
6045                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
6046                         ret = -EBADSLT;
6047                         goto set_swap_failed;
6048                 }
6049         } else {
6050                 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
6051                           dev->name);
6052                 DBG_PRINT(ERR_DBG, "(rev %d), %s",
6053                                         get_xena_rev_id(sp->pdev),
6054                                         s2io_driver_version);
6055 #ifdef CONFIG_2BUFF_MODE
6056                 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
6057 #endif
6058                 switch(sp->intr_type) {
6059                         case INTA:
6060                                 DBG_PRINT(ERR_DBG, ", Intr type INTA");
6061                                 break;
6062                         case MSI:
6063                                 DBG_PRINT(ERR_DBG, ", Intr type MSI");
6064                                 break;
6065                         case MSI_X:
6066                                 DBG_PRINT(ERR_DBG, ", Intr type MSI-X");
6067                                 break;
6068                 }
6069                 DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
6070                 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
6071                           sp->def_mac_addr[0].mac_addr[0],
6072                           sp->def_mac_addr[0].mac_addr[1],
6073                           sp->def_mac_addr[0].mac_addr[2],
6074                           sp->def_mac_addr[0].mac_addr[3],
6075                           sp->def_mac_addr[0].mac_addr[4],
6076                           sp->def_mac_addr[0].mac_addr[5]);
6077         }
6078
6079         /* Initialize device name */
6080         strcpy(sp->name, dev->name);
6081         if (sp->device_type & XFRAME_II_DEVICE)
6082                 strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
6083         else
6084                 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
6085
6086         /* Initialize bimodal Interrupts */
6087         sp->config.bimodal = bimodal;
6088         if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
6089                 sp->config.bimodal = 0;
6090                 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
6091                         dev->name);
6092         }
6093
6094         /*
6095          * Make Link state as off at this point, when the Link change
6096          * interrupt comes the state will be automatically changed to
6097          * the right state.
6098          */
6099         netif_carrier_off(dev);
6100
6101         return 0;
6102
6103       register_failed:
6104       set_swap_failed:
6105         iounmap(sp->bar1);
6106       bar1_remap_failed:
6107         iounmap(sp->bar0);
6108       bar0_remap_failed:
6109       mem_alloc_failed:
6110         free_shared_mem(sp);
6111         pci_disable_device(pdev);
6112         if (dev_intr_type != MSI_X)
6113                 pci_release_regions(pdev);
6114         else {
6115                 release_mem_region(pci_resource_start(pdev, 0),
6116                         pci_resource_len(pdev, 0));
6117                 release_mem_region(pci_resource_start(pdev, 2),
6118                         pci_resource_len(pdev, 2));
6119         }
6120         pci_set_drvdata(pdev, NULL);
6121         free_netdev(dev);
6122
6123         return ret;
6124 }
6125
6126 /**
6127  * s2io_rem_nic - Free the PCI device
6128  * @pdev: structure containing the PCI related information of the device.
6129  * Description: This function is called by the Pci subsystem to release a
6130  * PCI device and free up all resource held up by the device. This could
6131  * be in response to a Hot plug event or when the driver is to be removed
6132  * from memory.
6133  */
6134
6135 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
6136 {
6137         struct net_device *dev =
6138             (struct net_device *) pci_get_drvdata(pdev);
6139         nic_t *sp;
6140
6141         if (dev == NULL) {
6142                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
6143                 return;
6144         }
6145
6146         sp = dev->priv;
6147         unregister_netdev(dev);
6148
6149         free_shared_mem(sp);
6150         iounmap(sp->bar0);
6151         iounmap(sp->bar1);
6152         pci_disable_device(pdev);
6153         if (sp->intr_type != MSI_X)
6154                 pci_release_regions(pdev);
6155         else {
6156                 release_mem_region(pci_resource_start(pdev, 0),
6157                         pci_resource_len(pdev, 0));
6158                 release_mem_region(pci_resource_start(pdev, 2),
6159                         pci_resource_len(pdev, 2));
6160         }
6161         pci_set_drvdata(pdev, NULL);
6162         free_netdev(dev);
6163 }
6164
6165 /**
6166  * s2io_starter - Entry point for the driver
6167  * Description: This function is the entry point for the driver. It verifies
6168  * the module loadable parameters and initializes PCI configuration space.
6169  */
6170
6171 int __init s2io_starter(void)
6172 {
6173         return pci_module_init(&s2io_driver);
6174 }
6175
6176 /**
6177  * s2io_closer - Cleanup routine for the driver
6178  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
6179  */
6180
6181 void s2io_closer(void)
6182 {
6183         pci_unregister_driver(&s2io_driver);
6184         DBG_PRINT(INIT_DBG, "cleanup done\n");
6185 }
6186
6187 module_init(s2io_starter);
6188 module_exit(s2io_closer);