]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/s2io.c
[PATCH] S2io: Hardware fixes
[net-next-2.6.git] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3  * Copyright(c) 2002-2005 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  * rx_ring_num : This can be used to program the number of receive rings used
30  * in the driver.
31  * rx_ring_len: This defines the number of descriptors each ring can have. This
32  * is also an array of size 8.
33  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34  * tx_fifo_len: This too is an array of 8. Each element defines the number of
35  * Tx descriptors that can be associated with each corresponding FIFO.
36  ************************************************************************/
37
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
58
59 #include <asm/system.h>
60 #include <asm/uaccess.h>
61 #include <asm/io.h>
62
63 /* local include */
64 #include "s2io.h"
65 #include "s2io-regs.h"
66
67 /* S2io Driver name & version. */
68 static char s2io_driver_name[] = "Neterion";
69 static char s2io_driver_version[] = "Version 1.7.7";
70
71 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
72 {
73         int ret;
74
75         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
76                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
77
78         return ret;
79 }
80
81 /*
82  * Cards with following subsystem_id have a link state indication
83  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
84  * macro below identifies these cards given the subsystem_id.
85  */
86 #define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
87                 (((subid >= 0x600B) && (subid <= 0x600D)) || \
88                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0
89
90 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
91                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
92 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
93 #define PANIC   1
94 #define LOW     2
95 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
96 {
97         int level = 0;
98         mac_info_t *mac_control;
99
100         mac_control = &sp->mac_control;
101         if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
102                 level = LOW;
103                 if ((mac_control->rings[ring].pkt_cnt - rxb_size) <
104                                 MAX_RXDS_PER_BLOCK) {
105                         level = PANIC;
106                 }
107         }
108
109         return level;
110 }
111
112 /* Ethtool related variables and Macros. */
113 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
114         "Register test\t(offline)",
115         "Eeprom test\t(offline)",
116         "Link test\t(online)",
117         "RLDRAM test\t(offline)",
118         "BIST Test\t(offline)"
119 };
120
121 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
122         {"tmac_frms"},
123         {"tmac_data_octets"},
124         {"tmac_drop_frms"},
125         {"tmac_mcst_frms"},
126         {"tmac_bcst_frms"},
127         {"tmac_pause_ctrl_frms"},
128         {"tmac_any_err_frms"},
129         {"tmac_vld_ip_octets"},
130         {"tmac_vld_ip"},
131         {"tmac_drop_ip"},
132         {"tmac_icmp"},
133         {"tmac_rst_tcp"},
134         {"tmac_tcp"},
135         {"tmac_udp"},
136         {"rmac_vld_frms"},
137         {"rmac_data_octets"},
138         {"rmac_fcs_err_frms"},
139         {"rmac_drop_frms"},
140         {"rmac_vld_mcst_frms"},
141         {"rmac_vld_bcst_frms"},
142         {"rmac_in_rng_len_err_frms"},
143         {"rmac_long_frms"},
144         {"rmac_pause_ctrl_frms"},
145         {"rmac_discarded_frms"},
146         {"rmac_usized_frms"},
147         {"rmac_osized_frms"},
148         {"rmac_frag_frms"},
149         {"rmac_jabber_frms"},
150         {"rmac_ip"},
151         {"rmac_ip_octets"},
152         {"rmac_hdr_err_ip"},
153         {"rmac_drop_ip"},
154         {"rmac_icmp"},
155         {"rmac_tcp"},
156         {"rmac_udp"},
157         {"rmac_err_drp_udp"},
158         {"rmac_pause_cnt"},
159         {"rmac_accepted_ip"},
160         {"rmac_err_tcp"},
161 };
162
163 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
164 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
165
166 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
167 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
168
169 /*
170  * Constants to be programmed into the Xena's registers, to configure
171  * the XAUI.
172  */
173
174 #define SWITCH_SIGN     0xA5A5A5A5A5A5A5A5ULL
175 #define END_SIGN        0x0
176
177 static u64 default_mdio_cfg[] = {
178         /* Reset PMA PLL */
179         0xC001010000000000ULL, 0xC0010100000000E0ULL,
180         0xC0010100008000E4ULL,
181         /* Remove Reset from PMA PLL */
182         0xC001010000000000ULL, 0xC0010100000000E0ULL,
183         0xC0010100000000E4ULL,
184         END_SIGN
185 };
186
187 static u64 default_dtx_cfg[] = {
188         0x8000051500000000ULL, 0x80000515000000E0ULL,
189         0x80000515D93500E4ULL, 0x8001051500000000ULL,
190         0x80010515000000E0ULL, 0x80010515001E00E4ULL,
191         0x8002051500000000ULL, 0x80020515000000E0ULL,
192         0x80020515F21000E4ULL,
193         /* Set PADLOOPBACKN */
194         0x8002051500000000ULL, 0x80020515000000E0ULL,
195         0x80020515B20000E4ULL, 0x8003051500000000ULL,
196         0x80030515000000E0ULL, 0x80030515B20000E4ULL,
197         0x8004051500000000ULL, 0x80040515000000E0ULL,
198         0x80040515B20000E4ULL, 0x8005051500000000ULL,
199         0x80050515000000E0ULL, 0x80050515B20000E4ULL,
200         SWITCH_SIGN,
201         /* Remove PADLOOPBACKN */
202         0x8002051500000000ULL, 0x80020515000000E0ULL,
203         0x80020515F20000E4ULL, 0x8003051500000000ULL,
204         0x80030515000000E0ULL, 0x80030515F20000E4ULL,
205         0x8004051500000000ULL, 0x80040515000000E0ULL,
206         0x80040515F20000E4ULL, 0x8005051500000000ULL,
207         0x80050515000000E0ULL, 0x80050515F20000E4ULL,
208         END_SIGN
209 };
210
211 /*
212  * Constants for Fixing the MacAddress problem seen mostly on
213  * Alpha machines.
214  */
215 static u64 fix_mac[] = {
216         0x0060000000000000ULL, 0x0060600000000000ULL,
217         0x0040600000000000ULL, 0x0000600000000000ULL,
218         0x0020600000000000ULL, 0x0060600000000000ULL,
219         0x0020600000000000ULL, 0x0060600000000000ULL,
220         0x0020600000000000ULL, 0x0060600000000000ULL,
221         0x0020600000000000ULL, 0x0060600000000000ULL,
222         0x0020600000000000ULL, 0x0060600000000000ULL,
223         0x0020600000000000ULL, 0x0060600000000000ULL,
224         0x0020600000000000ULL, 0x0060600000000000ULL,
225         0x0020600000000000ULL, 0x0060600000000000ULL,
226         0x0020600000000000ULL, 0x0060600000000000ULL,
227         0x0020600000000000ULL, 0x0060600000000000ULL,
228         0x0020600000000000ULL, 0x0000600000000000ULL,
229         0x0040600000000000ULL, 0x0060600000000000ULL,
230         END_SIGN
231 };
232
233 /* Module Loadable parameters. */
234 static unsigned int tx_fifo_num = 1;
235 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
236     {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
237 static unsigned int rx_ring_num = 1;
238 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
239     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
240 static unsigned int Stats_refresh_time = 4;
241 static unsigned int rts_frm_len[MAX_RX_RINGS] =
242     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
243 static unsigned int use_continuous_tx_intrs = 1;
244 static unsigned int rmac_pause_time = 65535;
245 static unsigned int mc_pause_threshold_q0q3 = 187;
246 static unsigned int mc_pause_threshold_q4q7 = 187;
247 static unsigned int shared_splits;
248 static unsigned int tmac_util_period = 5;
249 static unsigned int rmac_util_period = 5;
250 #ifndef CONFIG_S2IO_NAPI
251 static unsigned int indicate_max_pkts;
252 #endif
253
254 /*
255  * S2IO device table.
256  * This table lists all the devices that this driver supports.
257  */
258 static struct pci_device_id s2io_tbl[] __devinitdata = {
259         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
260          PCI_ANY_ID, PCI_ANY_ID},
261         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
262          PCI_ANY_ID, PCI_ANY_ID},
263         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
264          PCI_ANY_ID, PCI_ANY_ID},
265         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
266          PCI_ANY_ID, PCI_ANY_ID},
267         {0,}
268 };
269
270 MODULE_DEVICE_TABLE(pci, s2io_tbl);
271
272 static struct pci_driver s2io_driver = {
273       .name = "S2IO",
274       .id_table = s2io_tbl,
275       .probe = s2io_init_nic,
276       .remove = __devexit_p(s2io_rem_nic),
277 };
278
279 /* A simplifier macro used both by init and free shared_mem Fns(). */
280 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
281
282 /**
283  * init_shared_mem - Allocation and Initialization of Memory
284  * @nic: Device private variable.
285  * Description: The function allocates all the memory areas shared
286  * between the NIC and the driver. This includes Tx descriptors,
287  * Rx descriptors and the statistics block.
288  */
289
290 static int init_shared_mem(struct s2io_nic *nic)
291 {
292         u32 size;
293         void *tmp_v_addr, *tmp_v_addr_next;
294         dma_addr_t tmp_p_addr, tmp_p_addr_next;
295         RxD_block_t *pre_rxd_blk = NULL;
296         int i, j, blk_cnt, rx_sz, tx_sz;
297         int lst_size, lst_per_page;
298         struct net_device *dev = nic->dev;
299 #ifdef CONFIG_2BUFF_MODE
300         u64 tmp;
301         buffAdd_t *ba;
302 #endif
303
304         mac_info_t *mac_control;
305         struct config_param *config;
306
307         mac_control = &nic->mac_control;
308         config = &nic->config;
309
310
311         /* Allocation and initialization of TXDLs in FIOFs */
312         size = 0;
313         for (i = 0; i < config->tx_fifo_num; i++) {
314                 size += config->tx_cfg[i].fifo_len;
315         }
316         if (size > MAX_AVAILABLE_TXDS) {
317                 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
318                           dev->name);
319                 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
320                 DBG_PRINT(ERR_DBG, "that can be used\n");
321                 return FAILURE;
322         }
323
324         lst_size = (sizeof(TxD_t) * config->max_txds);
325         tx_sz = lst_size * size;
326         lst_per_page = PAGE_SIZE / lst_size;
327
328         for (i = 0; i < config->tx_fifo_num; i++) {
329                 int fifo_len = config->tx_cfg[i].fifo_len;
330                 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
331                 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
332                                                           GFP_KERNEL);
333                 if (!mac_control->fifos[i].list_info) {
334                         DBG_PRINT(ERR_DBG,
335                                   "Malloc failed for list_info\n");
336                         return -ENOMEM;
337                 }
338                 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
339         }
340         for (i = 0; i < config->tx_fifo_num; i++) {
341                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
342                                                 lst_per_page);
343                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
344                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
345                     config->tx_cfg[i].fifo_len - 1;
346                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
347                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
348                     config->tx_cfg[i].fifo_len - 1;
349                 mac_control->fifos[i].fifo_no = i;
350                 mac_control->fifos[i].nic = nic;
351                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
352
353                 for (j = 0; j < page_num; j++) {
354                         int k = 0;
355                         dma_addr_t tmp_p;
356                         void *tmp_v;
357                         tmp_v = pci_alloc_consistent(nic->pdev,
358                                                      PAGE_SIZE, &tmp_p);
359                         if (!tmp_v) {
360                                 DBG_PRINT(ERR_DBG,
361                                           "pci_alloc_consistent ");
362                                 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
363                                 return -ENOMEM;
364                         }
365                         while (k < lst_per_page) {
366                                 int l = (j * lst_per_page) + k;
367                                 if (l == config->tx_cfg[i].fifo_len)
368                                         break;
369                                 mac_control->fifos[i].list_info[l].list_virt_addr =
370                                     tmp_v + (k * lst_size);
371                                 mac_control->fifos[i].list_info[l].list_phy_addr =
372                                     tmp_p + (k * lst_size);
373                                 k++;
374                         }
375                 }
376         }
377
378         /* Allocation and initialization of RXDs in Rings */
379         size = 0;
380         for (i = 0; i < config->rx_ring_num; i++) {
381                 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
382                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
383                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
384                                   i);
385                         DBG_PRINT(ERR_DBG, "RxDs per Block");
386                         return FAILURE;
387                 }
388                 size += config->rx_cfg[i].num_rxd;
389                 mac_control->rings[i].block_count =
390                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
391                 mac_control->rings[i].pkt_cnt =
392                     config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
393         }
394         size = (size * (sizeof(RxD_t)));
395         rx_sz = size;
396
397         for (i = 0; i < config->rx_ring_num; i++) {
398                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
399                 mac_control->rings[i].rx_curr_get_info.offset = 0;
400                 mac_control->rings[i].rx_curr_get_info.ring_len =
401                     config->rx_cfg[i].num_rxd - 1;
402                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
403                 mac_control->rings[i].rx_curr_put_info.offset = 0;
404                 mac_control->rings[i].rx_curr_put_info.ring_len =
405                     config->rx_cfg[i].num_rxd - 1;
406                 mac_control->rings[i].nic = nic;
407                 mac_control->rings[i].ring_no = i;
408
409                 blk_cnt =
410                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
411                 /*  Allocating all the Rx blocks */
412                 for (j = 0; j < blk_cnt; j++) {
413 #ifndef CONFIG_2BUFF_MODE
414                         size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
415 #else
416                         size = SIZE_OF_BLOCK;
417 #endif
418                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
419                                                           &tmp_p_addr);
420                         if (tmp_v_addr == NULL) {
421                                 /*
422                                  * In case of failure, free_shared_mem()
423                                  * is called, which should free any
424                                  * memory that was alloced till the
425                                  * failure happened.
426                                  */
427                                 mac_control->rings[i].rx_blocks[j].block_virt_addr =
428                                     tmp_v_addr;
429                                 return -ENOMEM;
430                         }
431                         memset(tmp_v_addr, 0, size);
432                         mac_control->rings[i].rx_blocks[j].block_virt_addr =
433                                 tmp_v_addr;
434                         mac_control->rings[i].rx_blocks[j].block_dma_addr =
435                                 tmp_p_addr;
436                 }
437                 /* Interlinking all Rx Blocks */
438                 for (j = 0; j < blk_cnt; j++) {
439                         tmp_v_addr =
440                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
441                         tmp_v_addr_next =
442                                 mac_control->rings[i].rx_blocks[(j + 1) %
443                                               blk_cnt].block_virt_addr;
444                         tmp_p_addr =
445                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
446                         tmp_p_addr_next =
447                                 mac_control->rings[i].rx_blocks[(j + 1) %
448                                               blk_cnt].block_dma_addr;
449
450                         pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
451                         pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
452                                                                  * marker.
453                                                                  */
454 #ifndef CONFIG_2BUFF_MODE
455                         pre_rxd_blk->reserved_2_pNext_RxD_block =
456                             (unsigned long) tmp_v_addr_next;
457 #endif
458                         pre_rxd_blk->pNext_RxD_Blk_physical =
459                             (u64) tmp_p_addr_next;
460                 }
461         }
462
463 #ifdef CONFIG_2BUFF_MODE
464         /*
465          * Allocation of Storages for buffer addresses in 2BUFF mode
466          * and the buffers as well.
467          */
468         for (i = 0; i < config->rx_ring_num; i++) {
469                 blk_cnt =
470                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
471                 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
472                                      GFP_KERNEL);
473                 if (!mac_control->rings[i].ba)
474                         return -ENOMEM;
475                 for (j = 0; j < blk_cnt; j++) {
476                         int k = 0;
477                         mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
478                                                  (MAX_RXDS_PER_BLOCK + 1)),
479                                                 GFP_KERNEL);
480                         if (!mac_control->rings[i].ba[j])
481                                 return -ENOMEM;
482                         while (k != MAX_RXDS_PER_BLOCK) {
483                                 ba = &mac_control->rings[i].ba[j][k];
484
485                                 ba->ba_0_org = (void *) kmalloc
486                                     (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
487                                 if (!ba->ba_0_org)
488                                         return -ENOMEM;
489                                 tmp = (u64) ba->ba_0_org;
490                                 tmp += ALIGN_SIZE;
491                                 tmp &= ~((u64) ALIGN_SIZE);
492                                 ba->ba_0 = (void *) tmp;
493
494                                 ba->ba_1_org = (void *) kmalloc
495                                     (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
496                                 if (!ba->ba_1_org)
497                                         return -ENOMEM;
498                                 tmp = (u64) ba->ba_1_org;
499                                 tmp += ALIGN_SIZE;
500                                 tmp &= ~((u64) ALIGN_SIZE);
501                                 ba->ba_1 = (void *) tmp;
502                                 k++;
503                         }
504                 }
505         }
506 #endif
507
508         /* Allocation and initialization of Statistics block */
509         size = sizeof(StatInfo_t);
510         mac_control->stats_mem = pci_alloc_consistent
511             (nic->pdev, size, &mac_control->stats_mem_phy);
512
513         if (!mac_control->stats_mem) {
514                 /*
515                  * In case of failure, free_shared_mem() is called, which
516                  * should free any memory that was alloced till the
517                  * failure happened.
518                  */
519                 return -ENOMEM;
520         }
521         mac_control->stats_mem_sz = size;
522
523         tmp_v_addr = mac_control->stats_mem;
524         mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
525         memset(tmp_v_addr, 0, size);
526         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
527                   (unsigned long long) tmp_p_addr);
528
529         return SUCCESS;
530 }
531
532 /**
533  * free_shared_mem - Free the allocated Memory
534  * @nic:  Device private variable.
535  * Description: This function is to free all memory locations allocated by
536  * the init_shared_mem() function and return it to the kernel.
537  */
538
539 static void free_shared_mem(struct s2io_nic *nic)
540 {
541         int i, j, blk_cnt, size;
542         void *tmp_v_addr;
543         dma_addr_t tmp_p_addr;
544         mac_info_t *mac_control;
545         struct config_param *config;
546         int lst_size, lst_per_page;
547
548
549         if (!nic)
550                 return;
551
552         mac_control = &nic->mac_control;
553         config = &nic->config;
554
555         lst_size = (sizeof(TxD_t) * config->max_txds);
556         lst_per_page = PAGE_SIZE / lst_size;
557
558         for (i = 0; i < config->tx_fifo_num; i++) {
559                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
560                                                 lst_per_page);
561                 for (j = 0; j < page_num; j++) {
562                         int mem_blks = (j * lst_per_page);
563                         if (!mac_control->fifos[i].list_info[mem_blks].
564                             list_virt_addr)
565                                 break;
566                         pci_free_consistent(nic->pdev, PAGE_SIZE,
567                                             mac_control->fifos[i].
568                                             list_info[mem_blks].
569                                             list_virt_addr,
570                                             mac_control->fifos[i].
571                                             list_info[mem_blks].
572                                             list_phy_addr);
573                 }
574                 kfree(mac_control->fifos[i].list_info);
575         }
576
577 #ifndef CONFIG_2BUFF_MODE
578         size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
579 #else
580         size = SIZE_OF_BLOCK;
581 #endif
582         for (i = 0; i < config->rx_ring_num; i++) {
583                 blk_cnt = mac_control->rings[i].block_count;
584                 for (j = 0; j < blk_cnt; j++) {
585                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
586                                 block_virt_addr;
587                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
588                                 block_dma_addr;
589                         if (tmp_v_addr == NULL)
590                                 break;
591                         pci_free_consistent(nic->pdev, size,
592                                             tmp_v_addr, tmp_p_addr);
593                 }
594         }
595
596 #ifdef CONFIG_2BUFF_MODE
597         /* Freeing buffer storage addresses in 2BUFF mode. */
598         for (i = 0; i < config->rx_ring_num; i++) {
599                 blk_cnt =
600                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
601                 for (j = 0; j < blk_cnt; j++) {
602                         int k = 0;
603                         if (!mac_control->rings[i].ba[j])
604                                 continue;
605                         while (k != MAX_RXDS_PER_BLOCK) {
606                                 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
607                                 kfree(ba->ba_0_org);
608                                 kfree(ba->ba_1_org);
609                                 k++;
610                         }
611                         kfree(mac_control->rings[i].ba[j]);
612                 }
613                 if (mac_control->rings[i].ba)
614                         kfree(mac_control->rings[i].ba);
615         }
616 #endif
617
618         if (mac_control->stats_mem) {
619                 pci_free_consistent(nic->pdev,
620                                     mac_control->stats_mem_sz,
621                                     mac_control->stats_mem,
622                                     mac_control->stats_mem_phy);
623         }
624 }
625
626 /**
627  *  init_nic - Initialization of hardware
628  *  @nic: device peivate variable
629  *  Description: The function sequentially configures every block
630  *  of the H/W from their reset values.
631  *  Return Value:  SUCCESS on success and
632  *  '-1' on failure (endian settings incorrect).
633  */
634
635 static int init_nic(struct s2io_nic *nic)
636 {
637         XENA_dev_config_t __iomem *bar0 = nic->bar0;
638         struct net_device *dev = nic->dev;
639         register u64 val64 = 0;
640         void __iomem *add;
641         u32 time;
642         int i, j;
643         mac_info_t *mac_control;
644         struct config_param *config;
645         int mdio_cnt = 0, dtx_cnt = 0;
646         unsigned long long mem_share;
647         int mem_size;
648
649         mac_control = &nic->mac_control;
650         config = &nic->config;
651
652         /* to set the swapper controle on the card */
653         if(s2io_set_swapper(nic)) {
654                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
655                 return -1;
656         }
657
658         /* Remove XGXS from reset state */
659         val64 = 0;
660         writeq(val64, &bar0->sw_reset);
661         msleep(500);
662         val64 = readq(&bar0->sw_reset);
663
664         /*  Enable Receiving broadcasts */
665         add = &bar0->mac_cfg;
666         val64 = readq(&bar0->mac_cfg);
667         val64 |= MAC_RMAC_BCAST_ENABLE;
668         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
669         writel((u32) val64, add);
670         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
671         writel((u32) (val64 >> 32), (add + 4));
672
673         /* Read registers in all blocks */
674         val64 = readq(&bar0->mac_int_mask);
675         val64 = readq(&bar0->mc_int_mask);
676         val64 = readq(&bar0->xgxs_int_mask);
677
678         /*  Set MTU */
679         val64 = dev->mtu;
680         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
681
682         /*
683          * Configuring the XAUI Interface of Xena.
684          * ***************************************
685          * To Configure the Xena's XAUI, one has to write a series
686          * of 64 bit values into two registers in a particular
687          * sequence. Hence a macro 'SWITCH_SIGN' has been defined
688          * which will be defined in the array of configuration values
689          * (default_dtx_cfg & default_mdio_cfg) at appropriate places
690          * to switch writing from one regsiter to another. We continue
691          * writing these values until we encounter the 'END_SIGN' macro.
692          * For example, After making a series of 21 writes into
693          * dtx_control register the 'SWITCH_SIGN' appears and hence we
694          * start writing into mdio_control until we encounter END_SIGN.
695          */
696         while (1) {
697               dtx_cfg:
698                 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
699                         if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
700                                 dtx_cnt++;
701                                 goto mdio_cfg;
702                         }
703                         SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
704                                           &bar0->dtx_control, UF);
705                         val64 = readq(&bar0->dtx_control);
706                         dtx_cnt++;
707                 }
708               mdio_cfg:
709                 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
710                         if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
711                                 mdio_cnt++;
712                                 goto dtx_cfg;
713                         }
714                         SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
715                                           &bar0->mdio_control, UF);
716                         val64 = readq(&bar0->mdio_control);
717                         mdio_cnt++;
718                 }
719                 if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
720                     (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
721                         break;
722                 } else {
723                         goto dtx_cfg;
724                 }
725         }
726
727         /*  Tx DMA Initialization */
728         val64 = 0;
729         writeq(val64, &bar0->tx_fifo_partition_0);
730         writeq(val64, &bar0->tx_fifo_partition_1);
731         writeq(val64, &bar0->tx_fifo_partition_2);
732         writeq(val64, &bar0->tx_fifo_partition_3);
733
734
735         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
736                 val64 |=
737                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
738                          13) | vBIT(config->tx_cfg[i].fifo_priority,
739                                     ((i * 32) + 5), 3);
740
741                 if (i == (config->tx_fifo_num - 1)) {
742                         if (i % 2 == 0)
743                                 i++;
744                 }
745
746                 switch (i) {
747                 case 1:
748                         writeq(val64, &bar0->tx_fifo_partition_0);
749                         val64 = 0;
750                         break;
751                 case 3:
752                         writeq(val64, &bar0->tx_fifo_partition_1);
753                         val64 = 0;
754                         break;
755                 case 5:
756                         writeq(val64, &bar0->tx_fifo_partition_2);
757                         val64 = 0;
758                         break;
759                 case 7:
760                         writeq(val64, &bar0->tx_fifo_partition_3);
761                         break;
762                 }
763         }
764
765         /* Enable Tx FIFO partition 0. */
766         val64 = readq(&bar0->tx_fifo_partition_0);
767         val64 |= BIT(0);        /* To enable the FIFO partition. */
768         writeq(val64, &bar0->tx_fifo_partition_0);
769
770         /*
771          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
772          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
773          */
774         if (get_xena_rev_id(nic->pdev) < 4)
775                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
776
777         val64 = readq(&bar0->tx_fifo_partition_0);
778         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
779                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
780
781         /*
782          * Initialization of Tx_PA_CONFIG register to ignore packet
783          * integrity checking.
784          */
785         val64 = readq(&bar0->tx_pa_cfg);
786         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
787             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
788         writeq(val64, &bar0->tx_pa_cfg);
789
790         /* Rx DMA intialization. */
791         val64 = 0;
792         for (i = 0; i < config->rx_ring_num; i++) {
793                 val64 |=
794                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
795                          3);
796         }
797         writeq(val64, &bar0->rx_queue_priority);
798
799         /*
800          * Allocating equal share of memory to all the
801          * configured Rings.
802          */
803         val64 = 0;
804         mem_size = 64;
805         for (i = 0; i < config->rx_ring_num; i++) {
806                 switch (i) {
807                 case 0:
808                         mem_share = (mem_size / config->rx_ring_num +
809                                      mem_size % config->rx_ring_num);
810                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
811                         continue;
812                 case 1:
813                         mem_share = (mem_size / config->rx_ring_num);
814                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
815                         continue;
816                 case 2:
817                         mem_share = (mem_size / config->rx_ring_num);
818                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
819                         continue;
820                 case 3:
821                         mem_share = (mem_size / config->rx_ring_num);
822                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
823                         continue;
824                 case 4:
825                         mem_share = (mem_size / config->rx_ring_num);
826                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
827                         continue;
828                 case 5:
829                         mem_share = (mem_size / config->rx_ring_num);
830                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
831                         continue;
832                 case 6:
833                         mem_share = (mem_size / config->rx_ring_num);
834                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
835                         continue;
836                 case 7:
837                         mem_share = (mem_size / config->rx_ring_num);
838                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
839                         continue;
840                 }
841         }
842         writeq(val64, &bar0->rx_queue_cfg);
843
844         /*
845          * Filling Tx round robin registers
846          * as per the number of FIFOs
847          */
848         switch (config->tx_fifo_num) {
849         case 1:
850                 val64 = 0x0000000000000000ULL;
851                 writeq(val64, &bar0->tx_w_round_robin_0);
852                 writeq(val64, &bar0->tx_w_round_robin_1);
853                 writeq(val64, &bar0->tx_w_round_robin_2);
854                 writeq(val64, &bar0->tx_w_round_robin_3);
855                 writeq(val64, &bar0->tx_w_round_robin_4);
856                 break;
857         case 2:
858                 val64 = 0x0000010000010000ULL;
859                 writeq(val64, &bar0->tx_w_round_robin_0);
860                 val64 = 0x0100000100000100ULL;
861                 writeq(val64, &bar0->tx_w_round_robin_1);
862                 val64 = 0x0001000001000001ULL;
863                 writeq(val64, &bar0->tx_w_round_robin_2);
864                 val64 = 0x0000010000010000ULL;
865                 writeq(val64, &bar0->tx_w_round_robin_3);
866                 val64 = 0x0100000000000000ULL;
867                 writeq(val64, &bar0->tx_w_round_robin_4);
868                 break;
869         case 3:
870                 val64 = 0x0001000102000001ULL;
871                 writeq(val64, &bar0->tx_w_round_robin_0);
872                 val64 = 0x0001020000010001ULL;
873                 writeq(val64, &bar0->tx_w_round_robin_1);
874                 val64 = 0x0200000100010200ULL;
875                 writeq(val64, &bar0->tx_w_round_robin_2);
876                 val64 = 0x0001000102000001ULL;
877                 writeq(val64, &bar0->tx_w_round_robin_3);
878                 val64 = 0x0001020000000000ULL;
879                 writeq(val64, &bar0->tx_w_round_robin_4);
880                 break;
881         case 4:
882                 val64 = 0x0001020300010200ULL;
883                 writeq(val64, &bar0->tx_w_round_robin_0);
884                 val64 = 0x0100000102030001ULL;
885                 writeq(val64, &bar0->tx_w_round_robin_1);
886                 val64 = 0x0200010000010203ULL;
887                 writeq(val64, &bar0->tx_w_round_robin_2);
888                 val64 = 0x0001020001000001ULL;
889                 writeq(val64, &bar0->tx_w_round_robin_3);
890                 val64 = 0x0203000100000000ULL;
891                 writeq(val64, &bar0->tx_w_round_robin_4);
892                 break;
893         case 5:
894                 val64 = 0x0001000203000102ULL;
895                 writeq(val64, &bar0->tx_w_round_robin_0);
896                 val64 = 0x0001020001030004ULL;
897                 writeq(val64, &bar0->tx_w_round_robin_1);
898                 val64 = 0x0001000203000102ULL;
899                 writeq(val64, &bar0->tx_w_round_robin_2);
900                 val64 = 0x0001020001030004ULL;
901                 writeq(val64, &bar0->tx_w_round_robin_3);
902                 val64 = 0x0001000000000000ULL;
903                 writeq(val64, &bar0->tx_w_round_robin_4);
904                 break;
905         case 6:
906                 val64 = 0x0001020304000102ULL;
907                 writeq(val64, &bar0->tx_w_round_robin_0);
908                 val64 = 0x0304050001020001ULL;
909                 writeq(val64, &bar0->tx_w_round_robin_1);
910                 val64 = 0x0203000100000102ULL;
911                 writeq(val64, &bar0->tx_w_round_robin_2);
912                 val64 = 0x0304000102030405ULL;
913                 writeq(val64, &bar0->tx_w_round_robin_3);
914                 val64 = 0x0001000200000000ULL;
915                 writeq(val64, &bar0->tx_w_round_robin_4);
916                 break;
917         case 7:
918                 val64 = 0x0001020001020300ULL;
919                 writeq(val64, &bar0->tx_w_round_robin_0);
920                 val64 = 0x0102030400010203ULL;
921                 writeq(val64, &bar0->tx_w_round_robin_1);
922                 val64 = 0x0405060001020001ULL;
923                 writeq(val64, &bar0->tx_w_round_robin_2);
924                 val64 = 0x0304050000010200ULL;
925                 writeq(val64, &bar0->tx_w_round_robin_3);
926                 val64 = 0x0102030000000000ULL;
927                 writeq(val64, &bar0->tx_w_round_robin_4);
928                 break;
929         case 8:
930                 val64 = 0x0001020300040105ULL;
931                 writeq(val64, &bar0->tx_w_round_robin_0);
932                 val64 = 0x0200030106000204ULL;
933                 writeq(val64, &bar0->tx_w_round_robin_1);
934                 val64 = 0x0103000502010007ULL;
935                 writeq(val64, &bar0->tx_w_round_robin_2);
936                 val64 = 0x0304010002060500ULL;
937                 writeq(val64, &bar0->tx_w_round_robin_3);
938                 val64 = 0x0103020400000000ULL;
939                 writeq(val64, &bar0->tx_w_round_robin_4);
940                 break;
941         }
942
943         /* Filling the Rx round robin registers as per the
944          * number of Rings and steering based on QoS.
945          */
946         switch (config->rx_ring_num) {
947         case 1:
948                 val64 = 0x8080808080808080ULL;
949                 writeq(val64, &bar0->rts_qos_steering);
950                 break;
951         case 2:
952                 val64 = 0x0000010000010000ULL;
953                 writeq(val64, &bar0->rx_w_round_robin_0);
954                 val64 = 0x0100000100000100ULL;
955                 writeq(val64, &bar0->rx_w_round_robin_1);
956                 val64 = 0x0001000001000001ULL;
957                 writeq(val64, &bar0->rx_w_round_robin_2);
958                 val64 = 0x0000010000010000ULL;
959                 writeq(val64, &bar0->rx_w_round_robin_3);
960                 val64 = 0x0100000000000000ULL;
961                 writeq(val64, &bar0->rx_w_round_robin_4);
962
963                 val64 = 0x8080808040404040ULL;
964                 writeq(val64, &bar0->rts_qos_steering);
965                 break;
966         case 3:
967                 val64 = 0x0001000102000001ULL;
968                 writeq(val64, &bar0->rx_w_round_robin_0);
969                 val64 = 0x0001020000010001ULL;
970                 writeq(val64, &bar0->rx_w_round_robin_1);
971                 val64 = 0x0200000100010200ULL;
972                 writeq(val64, &bar0->rx_w_round_robin_2);
973                 val64 = 0x0001000102000001ULL;
974                 writeq(val64, &bar0->rx_w_round_robin_3);
975                 val64 = 0x0001020000000000ULL;
976                 writeq(val64, &bar0->rx_w_round_robin_4);
977
978                 val64 = 0x8080804040402020ULL;
979                 writeq(val64, &bar0->rts_qos_steering);
980                 break;
981         case 4:
982                 val64 = 0x0001020300010200ULL;
983                 writeq(val64, &bar0->rx_w_round_robin_0);
984                 val64 = 0x0100000102030001ULL;
985                 writeq(val64, &bar0->rx_w_round_robin_1);
986                 val64 = 0x0200010000010203ULL;
987                 writeq(val64, &bar0->rx_w_round_robin_2);
988                 val64 = 0x0001020001000001ULL;  
989                 writeq(val64, &bar0->rx_w_round_robin_3);
990                 val64 = 0x0203000100000000ULL;
991                 writeq(val64, &bar0->rx_w_round_robin_4);
992
993                 val64 = 0x8080404020201010ULL;
994                 writeq(val64, &bar0->rts_qos_steering);
995                 break;
996         case 5:
997                 val64 = 0x0001000203000102ULL;
998                 writeq(val64, &bar0->rx_w_round_robin_0);
999                 val64 = 0x0001020001030004ULL;
1000                 writeq(val64, &bar0->rx_w_round_robin_1);
1001                 val64 = 0x0001000203000102ULL;
1002                 writeq(val64, &bar0->rx_w_round_robin_2);
1003                 val64 = 0x0001020001030004ULL;
1004                 writeq(val64, &bar0->rx_w_round_robin_3);
1005                 val64 = 0x0001000000000000ULL;
1006                 writeq(val64, &bar0->rx_w_round_robin_4);
1007
1008                 val64 = 0x8080404020201008ULL;
1009                 writeq(val64, &bar0->rts_qos_steering);
1010                 break;
1011         case 6:
1012                 val64 = 0x0001020304000102ULL;
1013                 writeq(val64, &bar0->rx_w_round_robin_0);
1014                 val64 = 0x0304050001020001ULL;
1015                 writeq(val64, &bar0->rx_w_round_robin_1);
1016                 val64 = 0x0203000100000102ULL;
1017                 writeq(val64, &bar0->rx_w_round_robin_2);
1018                 val64 = 0x0304000102030405ULL;
1019                 writeq(val64, &bar0->rx_w_round_robin_3);
1020                 val64 = 0x0001000200000000ULL;
1021                 writeq(val64, &bar0->rx_w_round_robin_4);
1022
1023                 val64 = 0x8080404020100804ULL;
1024                 writeq(val64, &bar0->rts_qos_steering);
1025                 break;
1026         case 7:
1027                 val64 = 0x0001020001020300ULL;
1028                 writeq(val64, &bar0->rx_w_round_robin_0);
1029                 val64 = 0x0102030400010203ULL;
1030                 writeq(val64, &bar0->rx_w_round_robin_1);
1031                 val64 = 0x0405060001020001ULL;
1032                 writeq(val64, &bar0->rx_w_round_robin_2);
1033                 val64 = 0x0304050000010200ULL;
1034                 writeq(val64, &bar0->rx_w_round_robin_3);
1035                 val64 = 0x0102030000000000ULL;
1036                 writeq(val64, &bar0->rx_w_round_robin_4);
1037
1038                 val64 = 0x8080402010080402ULL;
1039                 writeq(val64, &bar0->rts_qos_steering);
1040                 break;
1041         case 8:
1042                 val64 = 0x0001020300040105ULL;
1043                 writeq(val64, &bar0->rx_w_round_robin_0);
1044                 val64 = 0x0200030106000204ULL;
1045                 writeq(val64, &bar0->rx_w_round_robin_1);
1046                 val64 = 0x0103000502010007ULL;
1047                 writeq(val64, &bar0->rx_w_round_robin_2);
1048                 val64 = 0x0304010002060500ULL;
1049                 writeq(val64, &bar0->rx_w_round_robin_3);
1050                 val64 = 0x0103020400000000ULL;
1051                 writeq(val64, &bar0->rx_w_round_robin_4);
1052
1053                 val64 = 0x8040201008040201ULL;
1054                 writeq(val64, &bar0->rts_qos_steering);
1055                 break;
1056         }
1057
1058         /* UDP Fix */
1059         val64 = 0;
1060         for (i = 0; i < 8; i++)
1061                 writeq(val64, &bar0->rts_frm_len_n[i]);
1062
1063         /* Set the default rts frame length for the rings configured */
1064         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1065         for (i = 0 ; i < config->rx_ring_num ; i++)
1066                 writeq(val64, &bar0->rts_frm_len_n[i]);
1067
1068         /* Set the frame length for the configured rings
1069          * desired by the user
1070          */
1071         for (i = 0; i < config->rx_ring_num; i++) {
1072                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1073                  * specified frame length steering.
1074                  * If the user provides the frame length then program
1075                  * the rts_frm_len register for those values or else
1076                  * leave it as it is.
1077                  */
1078                 if (rts_frm_len[i] != 0) {
1079                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1080                                 &bar0->rts_frm_len_n[i]);
1081                 }
1082         }
1083
1084         /* Program statistics memory */
1085         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1086         val64 = SET_UPDT_PERIOD(Stats_refresh_time) |
1087                 STAT_CFG_STAT_RO | STAT_CFG_STAT_EN;
1088         writeq(val64, &bar0->stat_cfg);
1089
1090         /*
1091          * Initializing the sampling rate for the device to calculate the
1092          * bandwidth utilization.
1093          */
1094         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1095             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1096         writeq(val64, &bar0->mac_link_util);
1097
1098
1099         /*
1100          * Initializing the Transmit and Receive Traffic Interrupt
1101          * Scheme.
1102          */
1103         /*
1104          * TTI Initialization. Default Tx timer gets us about
1105          * 250 interrupts per sec. Continuous interrupts are enabled
1106          * by default.
1107          */
1108         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) |
1109             TTI_DATA1_MEM_TX_URNG_A(0xA) |
1110             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1111             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1112         if (use_continuous_tx_intrs)
1113                 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1114         writeq(val64, &bar0->tti_data1_mem);
1115
1116         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1117             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1118             TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1119         writeq(val64, &bar0->tti_data2_mem);
1120
1121         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1122         writeq(val64, &bar0->tti_command_mem);
1123
1124         /*
1125          * Once the operation completes, the Strobe bit of the command
1126          * register will be reset. We poll for this particular condition
1127          * We wait for a maximum of 500ms for the operation to complete,
1128          * if it's not complete by then we return error.
1129          */
1130         time = 0;
1131         while (TRUE) {
1132                 val64 = readq(&bar0->tti_command_mem);
1133                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1134                         break;
1135                 }
1136                 if (time > 10) {
1137                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1138                                   dev->name);
1139                         return -1;
1140                 }
1141                 msleep(50);
1142                 time++;
1143         }
1144
1145         /* RTI Initialization */
1146         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) |
1147             RTI_DATA1_MEM_RX_URNG_A(0xA) |
1148             RTI_DATA1_MEM_RX_URNG_B(0x10) |
1149             RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1150
1151         writeq(val64, &bar0->rti_data1_mem);
1152
1153         val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1154             RTI_DATA2_MEM_RX_UFC_B(0x2) |
1155             RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1156         writeq(val64, &bar0->rti_data2_mem);
1157
1158         val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
1159         writeq(val64, &bar0->rti_command_mem);
1160
1161         /*
1162          * Once the operation completes, the Strobe bit of the
1163          * command register will be reset. We poll for this
1164          * particular condition. We wait for a maximum of 500ms
1165          * for the operation to complete, if it's not complete
1166          * by then we return error.
1167          */
1168         time = 0;
1169         while (TRUE) {
1170                 val64 = readq(&bar0->rti_command_mem);
1171                 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1172                         break;
1173                 }
1174                 if (time > 10) {
1175                         DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1176                                   dev->name);
1177                         return -1;
1178                 }
1179                 time++;
1180                 msleep(50);
1181         }
1182
1183         /*
1184          * Initializing proper values as Pause threshold into all
1185          * the 8 Queues on Rx side.
1186          */
1187         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1188         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1189
1190         /* Disable RMAC PAD STRIPPING */
1191         add = (void *) &bar0->mac_cfg;
1192         val64 = readq(&bar0->mac_cfg);
1193         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1194         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1195         writel((u32) (val64), add);
1196         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1197         writel((u32) (val64 >> 32), (add + 4));
1198         val64 = readq(&bar0->mac_cfg);
1199
1200         /*
1201          * Set the time value to be inserted in the pause frame
1202          * generated by xena.
1203          */
1204         val64 = readq(&bar0->rmac_pause_cfg);
1205         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1206         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1207         writeq(val64, &bar0->rmac_pause_cfg);
1208
1209         /*
1210          * Set the Threshold Limit for Generating the pause frame
1211          * If the amount of data in any Queue exceeds ratio of
1212          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1213          * pause frame is generated
1214          */
1215         val64 = 0;
1216         for (i = 0; i < 4; i++) {
1217                 val64 |=
1218                     (((u64) 0xFF00 | nic->mac_control.
1219                       mc_pause_threshold_q0q3)
1220                      << (i * 2 * 8));
1221         }
1222         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1223
1224         val64 = 0;
1225         for (i = 0; i < 4; i++) {
1226                 val64 |=
1227                     (((u64) 0xFF00 | nic->mac_control.
1228                       mc_pause_threshold_q4q7)
1229                      << (i * 2 * 8));
1230         }
1231         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1232
1233         /*
1234          * TxDMA will stop Read request if the number of read split has
1235          * exceeded the limit pointed by shared_splits
1236          */
1237         val64 = readq(&bar0->pic_control);
1238         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1239         writeq(val64, &bar0->pic_control);
1240
1241         return SUCCESS;
1242 }
1243
1244 /**
1245  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1246  *  @nic: device private variable,
1247  *  @mask: A mask indicating which Intr block must be modified and,
1248  *  @flag: A flag indicating whether to enable or disable the Intrs.
1249  *  Description: This function will either disable or enable the interrupts
1250  *  depending on the flag argument. The mask argument can be used to
1251  *  enable/disable any Intr block.
1252  *  Return Value: NONE.
1253  */
1254
1255 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1256 {
1257         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1258         register u64 val64 = 0, temp64 = 0;
1259
1260         /*  Top level interrupt classification */
1261         /*  PIC Interrupts */
1262         if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1263                 /*  Enable PIC Intrs in the general intr mask register */
1264                 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1265                 if (flag == ENABLE_INTRS) {
1266                         temp64 = readq(&bar0->general_int_mask);
1267                         temp64 &= ~((u64) val64);
1268                         writeq(temp64, &bar0->general_int_mask);
1269                         /*
1270                          * Disabled all PCIX, Flash, MDIO, IIC and GPIO
1271                          * interrupts for now.
1272                          * TODO
1273                          */
1274                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1275                         /*
1276                          * No MSI Support is available presently, so TTI and
1277                          * RTI interrupts are also disabled.
1278                          */
1279                 } else if (flag == DISABLE_INTRS) {
1280                         /*
1281                          * Disable PIC Intrs in the general
1282                          * intr mask register
1283                          */
1284                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1285                         temp64 = readq(&bar0->general_int_mask);
1286                         val64 |= temp64;
1287                         writeq(val64, &bar0->general_int_mask);
1288                 }
1289         }
1290
1291         /*  DMA Interrupts */
1292         /*  Enabling/Disabling Tx DMA interrupts */
1293         if (mask & TX_DMA_INTR) {
1294                 /* Enable TxDMA Intrs in the general intr mask register */
1295                 val64 = TXDMA_INT_M;
1296                 if (flag == ENABLE_INTRS) {
1297                         temp64 = readq(&bar0->general_int_mask);
1298                         temp64 &= ~((u64) val64);
1299                         writeq(temp64, &bar0->general_int_mask);
1300                         /*
1301                          * Keep all interrupts other than PFC interrupt
1302                          * and PCC interrupt disabled in DMA level.
1303                          */
1304                         val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1305                                                       TXDMA_PCC_INT_M);
1306                         writeq(val64, &bar0->txdma_int_mask);
1307                         /*
1308                          * Enable only the MISC error 1 interrupt in PFC block
1309                          */
1310                         val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1311                         writeq(val64, &bar0->pfc_err_mask);
1312                         /*
1313                          * Enable only the FB_ECC error interrupt in PCC block
1314                          */
1315                         val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1316                         writeq(val64, &bar0->pcc_err_mask);
1317                 } else if (flag == DISABLE_INTRS) {
1318                         /*
1319                          * Disable TxDMA Intrs in the general intr mask
1320                          * register
1321                          */
1322                         writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1323                         writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1324                         temp64 = readq(&bar0->general_int_mask);
1325                         val64 |= temp64;
1326                         writeq(val64, &bar0->general_int_mask);
1327                 }
1328         }
1329
1330         /*  Enabling/Disabling Rx DMA interrupts */
1331         if (mask & RX_DMA_INTR) {
1332                 /*  Enable RxDMA Intrs in the general intr mask register */
1333                 val64 = RXDMA_INT_M;
1334                 if (flag == ENABLE_INTRS) {
1335                         temp64 = readq(&bar0->general_int_mask);
1336                         temp64 &= ~((u64) val64);
1337                         writeq(temp64, &bar0->general_int_mask);
1338                         /*
1339                          * All RxDMA block interrupts are disabled for now
1340                          * TODO
1341                          */
1342                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1343                 } else if (flag == DISABLE_INTRS) {
1344                         /*
1345                          * Disable RxDMA Intrs in the general intr mask
1346                          * register
1347                          */
1348                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1349                         temp64 = readq(&bar0->general_int_mask);
1350                         val64 |= temp64;
1351                         writeq(val64, &bar0->general_int_mask);
1352                 }
1353         }
1354
1355         /*  MAC Interrupts */
1356         /*  Enabling/Disabling MAC interrupts */
1357         if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1358                 val64 = TXMAC_INT_M | RXMAC_INT_M;
1359                 if (flag == ENABLE_INTRS) {
1360                         temp64 = readq(&bar0->general_int_mask);
1361                         temp64 &= ~((u64) val64);
1362                         writeq(temp64, &bar0->general_int_mask);
1363                         /*
1364                          * All MAC block error interrupts are disabled for now
1365                          * except the link status change interrupt.
1366                          * TODO
1367                          */
1368                         val64 = MAC_INT_STATUS_RMAC_INT;
1369                         temp64 = readq(&bar0->mac_int_mask);
1370                         temp64 &= ~((u64) val64);
1371                         writeq(temp64, &bar0->mac_int_mask);
1372
1373                         val64 = readq(&bar0->mac_rmac_err_mask);
1374                         val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1375                         writeq(val64, &bar0->mac_rmac_err_mask);
1376                 } else if (flag == DISABLE_INTRS) {
1377                         /*
1378                          * Disable MAC Intrs in the general intr mask register
1379                          */
1380                         writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1381                         writeq(DISABLE_ALL_INTRS,
1382                                &bar0->mac_rmac_err_mask);
1383
1384                         temp64 = readq(&bar0->general_int_mask);
1385                         val64 |= temp64;
1386                         writeq(val64, &bar0->general_int_mask);
1387                 }
1388         }
1389
1390         /*  XGXS Interrupts */
1391         if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1392                 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1393                 if (flag == ENABLE_INTRS) {
1394                         temp64 = readq(&bar0->general_int_mask);
1395                         temp64 &= ~((u64) val64);
1396                         writeq(temp64, &bar0->general_int_mask);
1397                         /*
1398                          * All XGXS block error interrupts are disabled for now
1399                          * TODO
1400                          */
1401                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1402                 } else if (flag == DISABLE_INTRS) {
1403                         /*
1404                          * Disable MC Intrs in the general intr mask register
1405                          */
1406                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1407                         temp64 = readq(&bar0->general_int_mask);
1408                         val64 |= temp64;
1409                         writeq(val64, &bar0->general_int_mask);
1410                 }
1411         }
1412
1413         /*  Memory Controller(MC) interrupts */
1414         if (mask & MC_INTR) {
1415                 val64 = MC_INT_M;
1416                 if (flag == ENABLE_INTRS) {
1417                         temp64 = readq(&bar0->general_int_mask);
1418                         temp64 &= ~((u64) val64);
1419                         writeq(temp64, &bar0->general_int_mask);
1420                         /*
1421                          * Enable all MC Intrs.
1422                          */
1423                         writeq(0x0, &bar0->mc_int_mask);
1424                         writeq(0x0, &bar0->mc_err_mask);
1425                 } else if (flag == DISABLE_INTRS) {
1426                         /*
1427                          * Disable MC Intrs in the general intr mask register
1428                          */
1429                         writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1430                         temp64 = readq(&bar0->general_int_mask);
1431                         val64 |= temp64;
1432                         writeq(val64, &bar0->general_int_mask);
1433                 }
1434         }
1435
1436
1437         /*  Tx traffic interrupts */
1438         if (mask & TX_TRAFFIC_INTR) {
1439                 val64 = TXTRAFFIC_INT_M;
1440                 if (flag == ENABLE_INTRS) {
1441                         temp64 = readq(&bar0->general_int_mask);
1442                         temp64 &= ~((u64) val64);
1443                         writeq(temp64, &bar0->general_int_mask);
1444                         /*
1445                          * Enable all the Tx side interrupts
1446                          * writing 0 Enables all 64 TX interrupt levels
1447                          */
1448                         writeq(0x0, &bar0->tx_traffic_mask);
1449                 } else if (flag == DISABLE_INTRS) {
1450                         /*
1451                          * Disable Tx Traffic Intrs in the general intr mask
1452                          * register.
1453                          */
1454                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1455                         temp64 = readq(&bar0->general_int_mask);
1456                         val64 |= temp64;
1457                         writeq(val64, &bar0->general_int_mask);
1458                 }
1459         }
1460
1461         /*  Rx traffic interrupts */
1462         if (mask & RX_TRAFFIC_INTR) {
1463                 val64 = RXTRAFFIC_INT_M;
1464                 if (flag == ENABLE_INTRS) {
1465                         temp64 = readq(&bar0->general_int_mask);
1466                         temp64 &= ~((u64) val64);
1467                         writeq(temp64, &bar0->general_int_mask);
1468                         /* writing 0 Enables all 8 RX interrupt levels */
1469                         writeq(0x0, &bar0->rx_traffic_mask);
1470                 } else if (flag == DISABLE_INTRS) {
1471                         /*
1472                          * Disable Rx Traffic Intrs in the general intr mask
1473                          * register.
1474                          */
1475                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1476                         temp64 = readq(&bar0->general_int_mask);
1477                         val64 |= temp64;
1478                         writeq(val64, &bar0->general_int_mask);
1479                 }
1480         }
1481 }
1482
1483 static int check_prc_pcc_state(u64 val64, int flag, int rev_id)
1484 {
1485         int ret = 0;
1486
1487         if (flag == FALSE) {
1488                 if (rev_id >= 4) {
1489                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1490                             ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1491                              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1492                                 ret = 1;
1493                         }
1494                 } else {
1495                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1496                             ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1497                              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1498                                 ret = 1;
1499                         }
1500                 }
1501         } else {
1502                 if (rev_id >= 4) {
1503                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1504                              ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1505                             (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1506                              ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1507                               ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1508                                 ret = 1;
1509                         }
1510                 } else {
1511                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1512                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1513                             (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1514                              ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1515                               ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1516                                 ret = 1;
1517                         }
1518                 }
1519         }
1520
1521         return ret;
1522 }
1523 /**
1524  *  verify_xena_quiescence - Checks whether the H/W is ready
1525  *  @val64 :  Value read from adapter status register.
1526  *  @flag : indicates if the adapter enable bit was ever written once
1527  *  before.
1528  *  Description: Returns whether the H/W is ready to go or not. Depending
1529  *  on whether adapter enable bit was written or not the comparison
1530  *  differs and the calling function passes the input argument flag to
1531  *  indicate this.
1532  *  Return: 1 If xena is quiescence
1533  *          0 If Xena is not quiescence
1534  */
1535
1536 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1537 {
1538         int ret = 0;
1539         u64 tmp64 = ~((u64) val64);
1540         int rev_id = get_xena_rev_id(sp->pdev);
1541
1542         if (!
1543             (tmp64 &
1544              (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1545               ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1546               ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1547               ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1548               ADAPTER_STATUS_P_PLL_LOCK))) {
1549                 ret = check_prc_pcc_state(val64, flag, rev_id);
1550         }
1551
1552         return ret;
1553 }
1554
1555 /**
1556  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
1557  * @sp: Pointer to device specifc structure
1558  * Description :
1559  * New procedure to clear mac address reading  problems on Alpha platforms
1560  *
1561  */
1562
1563 void fix_mac_address(nic_t * sp)
1564 {
1565         XENA_dev_config_t __iomem *bar0 = sp->bar0;
1566         u64 val64;
1567         int i = 0;
1568
1569         while (fix_mac[i] != END_SIGN) {
1570                 writeq(fix_mac[i++], &bar0->gpio_control);
1571                 udelay(10);
1572                 val64 = readq(&bar0->gpio_control);
1573         }
1574 }
1575
1576 /**
1577  *  start_nic - Turns the device on
1578  *  @nic : device private variable.
1579  *  Description:
1580  *  This function actually turns the device on. Before this  function is
1581  *  called,all Registers are configured from their reset states
1582  *  and shared memory is allocated but the NIC is still quiescent. On
1583  *  calling this function, the device interrupts are cleared and the NIC is
1584  *  literally switched on by writing into the adapter control register.
1585  *  Return Value:
1586  *  SUCCESS on success and -1 on failure.
1587  */
1588
1589 static int start_nic(struct s2io_nic *nic)
1590 {
1591         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1592         struct net_device *dev = nic->dev;
1593         register u64 val64 = 0;
1594         u16 interruptible;
1595         u16 subid, i;
1596         mac_info_t *mac_control;
1597         struct config_param *config;
1598
1599         mac_control = &nic->mac_control;
1600         config = &nic->config;
1601
1602         /*  PRC Initialization and configuration */
1603         for (i = 0; i < config->rx_ring_num; i++) {
1604                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1605                        &bar0->prc_rxd0_n[i]);
1606
1607                 val64 = readq(&bar0->prc_ctrl_n[i]);
1608 #ifndef CONFIG_2BUFF_MODE
1609                 val64 |= PRC_CTRL_RC_ENABLED;
1610 #else
1611                 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1612 #endif
1613                 writeq(val64, &bar0->prc_ctrl_n[i]);
1614         }
1615
1616 #ifdef CONFIG_2BUFF_MODE
1617         /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1618         val64 = readq(&bar0->rx_pa_cfg);
1619         val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1620         writeq(val64, &bar0->rx_pa_cfg);
1621 #endif
1622
1623         /*
1624          * Enabling MC-RLDRAM. After enabling the device, we timeout
1625          * for around 100ms, which is approximately the time required
1626          * for the device to be ready for operation.
1627          */
1628         val64 = readq(&bar0->mc_rldram_mrs);
1629         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1630         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1631         val64 = readq(&bar0->mc_rldram_mrs);
1632
1633         msleep(100);    /* Delay by around 100 ms. */
1634
1635         /* Enabling ECC Protection. */
1636         val64 = readq(&bar0->adapter_control);
1637         val64 &= ~ADAPTER_ECC_EN;
1638         writeq(val64, &bar0->adapter_control);
1639
1640         /*
1641          * Clearing any possible Link state change interrupts that
1642          * could have popped up just before Enabling the card.
1643          */
1644         val64 = readq(&bar0->mac_rmac_err_reg);
1645         if (val64)
1646                 writeq(val64, &bar0->mac_rmac_err_reg);
1647
1648         /*
1649          * Verify if the device is ready to be enabled, if so enable
1650          * it.
1651          */
1652         val64 = readq(&bar0->adapter_status);
1653         if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1654                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1655                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1656                           (unsigned long long) val64);
1657                 return FAILURE;
1658         }
1659
1660         /*  Enable select interrupts */
1661         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1662             RX_MAC_INTR | MC_INTR;
1663         en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1664
1665         /*
1666          * With some switches, link might be already up at this point.
1667          * Because of this weird behavior, when we enable laser,
1668          * we may not get link. We need to handle this. We cannot
1669          * figure out which switch is misbehaving. So we are forced to
1670          * make a global change.
1671          */
1672
1673         /* Enabling Laser. */
1674         val64 = readq(&bar0->adapter_control);
1675         val64 |= ADAPTER_EOI_TX_ON;
1676         writeq(val64, &bar0->adapter_control);
1677
1678         /* SXE-002: Initialize link and activity LED */
1679         subid = nic->pdev->subsystem_device;
1680         if ((subid & 0xFF) >= 0x07) {
1681                 val64 = readq(&bar0->gpio_control);
1682                 val64 |= 0x0000800000000000ULL;
1683                 writeq(val64, &bar0->gpio_control);
1684                 val64 = 0x0411040400000000ULL;
1685                 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1686         }
1687
1688         /*
1689          * Don't see link state interrupts on certain switches, so
1690          * directly scheduling a link state task from here.
1691          */
1692         schedule_work(&nic->set_link_task);
1693
1694         return SUCCESS;
1695 }
1696
1697 /**
1698  *  free_tx_buffers - Free all queued Tx buffers
1699  *  @nic : device private variable.
1700  *  Description:
1701  *  Free all queued Tx buffers.
1702  *  Return Value: void
1703 */
1704
1705 static void free_tx_buffers(struct s2io_nic *nic)
1706 {
1707         struct net_device *dev = nic->dev;
1708         struct sk_buff *skb;
1709         TxD_t *txdp;
1710         int i, j;
1711         mac_info_t *mac_control;
1712         struct config_param *config;
1713         int cnt = 0;
1714
1715         mac_control = &nic->mac_control;
1716         config = &nic->config;
1717
1718         for (i = 0; i < config->tx_fifo_num; i++) {
1719                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1720                         txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1721                             list_virt_addr;
1722                         skb =
1723                             (struct sk_buff *) ((unsigned long) txdp->
1724                                                 Host_Control);
1725                         if (skb == NULL) {
1726                                 memset(txdp, 0, sizeof(TxD_t));
1727                                 continue;
1728                         }
1729                         dev_kfree_skb(skb);
1730                         memset(txdp, 0, sizeof(TxD_t));
1731                         cnt++;
1732                 }
1733                 DBG_PRINT(INTR_DBG,
1734                           "%s:forcibly freeing %d skbs on FIFO%d\n",
1735                           dev->name, cnt, i);
1736                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
1737                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1738         }
1739 }
1740
1741 /**
1742  *   stop_nic -  To stop the nic
1743  *   @nic ; device private variable.
1744  *   Description:
1745  *   This function does exactly the opposite of what the start_nic()
1746  *   function does. This function is called to stop the device.
1747  *   Return Value:
1748  *   void.
1749  */
1750
1751 static void stop_nic(struct s2io_nic *nic)
1752 {
1753         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1754         register u64 val64 = 0;
1755         u16 interruptible, i;
1756         mac_info_t *mac_control;
1757         struct config_param *config;
1758
1759         mac_control = &nic->mac_control;
1760         config = &nic->config;
1761
1762         /*  Disable all interrupts */
1763         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1764             RX_MAC_INTR | MC_INTR;
1765         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1766
1767         /*  Disable PRCs */
1768         for (i = 0; i < config->rx_ring_num; i++) {
1769                 val64 = readq(&bar0->prc_ctrl_n[i]);
1770                 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
1771                 writeq(val64, &bar0->prc_ctrl_n[i]);
1772         }
1773 }
1774
1775 /**
1776  *  fill_rx_buffers - Allocates the Rx side skbs
1777  *  @nic:  device private variable
1778  *  @ring_no: ring number
1779  *  Description:
1780  *  The function allocates Rx side skbs and puts the physical
1781  *  address of these buffers into the RxD buffer pointers, so that the NIC
1782  *  can DMA the received frame into these locations.
1783  *  The NIC supports 3 receive modes, viz
1784  *  1. single buffer,
1785  *  2. three buffer and
1786  *  3. Five buffer modes.
1787  *  Each mode defines how many fragments the received frame will be split
1788  *  up into by the NIC. The frame is split into L3 header, L4 Header,
1789  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1790  *  is split into 3 fragments. As of now only single buffer mode is
1791  *  supported.
1792  *   Return Value:
1793  *  SUCCESS on success or an appropriate -ve value on failure.
1794  */
1795
1796 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1797 {
1798         struct net_device *dev = nic->dev;
1799         struct sk_buff *skb;
1800         RxD_t *rxdp;
1801         int off, off1, size, block_no, block_no1;
1802         int offset, offset1;
1803         u32 alloc_tab = 0;
1804         u32 alloc_cnt;
1805         mac_info_t *mac_control;
1806         struct config_param *config;
1807 #ifdef CONFIG_2BUFF_MODE
1808         RxD_t *rxdpnext;
1809         int nextblk;
1810         u64 tmp;
1811         buffAdd_t *ba;
1812         dma_addr_t rxdpphys;
1813 #endif
1814 #ifndef CONFIG_S2IO_NAPI
1815         unsigned long flags;
1816 #endif
1817
1818         mac_control = &nic->mac_control;
1819         config = &nic->config;
1820         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
1821             atomic_read(&nic->rx_bufs_left[ring_no]);
1822         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
1823             HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
1824
1825         while (alloc_tab < alloc_cnt) {
1826                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1827                     block_index;
1828                 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
1829                     block_index;
1830                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
1831                 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1832 #ifndef CONFIG_2BUFF_MODE
1833                 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
1834                 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
1835 #else
1836                 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
1837                 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
1838 #endif
1839
1840                 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1841                     block_virt_addr + off;
1842                 if ((offset == offset1) && (rxdp->Host_Control)) {
1843                         DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
1844                         DBG_PRINT(INTR_DBG, " info equated\n");
1845                         goto end;
1846                 }
1847 #ifndef CONFIG_2BUFF_MODE
1848                 if (rxdp->Control_1 == END_OF_BLOCK) {
1849                         mac_control->rings[ring_no].rx_curr_put_info.
1850                             block_index++;
1851                         mac_control->rings[ring_no].rx_curr_put_info.
1852                             block_index %= mac_control->rings[ring_no].block_count;
1853                         block_no = mac_control->rings[ring_no].rx_curr_put_info.
1854                                 block_index;
1855                         off++;
1856                         off %= (MAX_RXDS_PER_BLOCK + 1);
1857                         mac_control->rings[ring_no].rx_curr_put_info.offset =
1858                             off;
1859                         rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
1860                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
1861                                   dev->name, rxdp);
1862                 }
1863 #ifndef CONFIG_S2IO_NAPI
1864                 spin_lock_irqsave(&nic->put_lock, flags);
1865                 mac_control->rings[ring_no].put_pos =
1866                     (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
1867                 spin_unlock_irqrestore(&nic->put_lock, flags);
1868 #endif
1869 #else
1870                 if (rxdp->Host_Control == END_OF_BLOCK) {
1871                         mac_control->rings[ring_no].rx_curr_put_info.
1872                             block_index++;
1873                         mac_control->rings[ring_no].rx_curr_put_info.block_index
1874                             %= mac_control->rings[ring_no].block_count;
1875                         block_no = mac_control->rings[ring_no].rx_curr_put_info
1876                             .block_index;
1877                         off = 0;
1878                         DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
1879                                   dev->name, block_no,
1880                                   (unsigned long long) rxdp->Control_1);
1881                         mac_control->rings[ring_no].rx_curr_put_info.offset =
1882                             off;
1883                         rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1884                             block_virt_addr;
1885                 }
1886 #ifndef CONFIG_S2IO_NAPI
1887                 spin_lock_irqsave(&nic->put_lock, flags);
1888                 mac_control->rings[ring_no].put_pos = (block_no *
1889                                          (MAX_RXDS_PER_BLOCK + 1)) + off;
1890                 spin_unlock_irqrestore(&nic->put_lock, flags);
1891 #endif
1892 #endif
1893
1894 #ifndef CONFIG_2BUFF_MODE
1895                 if (rxdp->Control_1 & RXD_OWN_XENA)
1896 #else
1897                 if (rxdp->Control_2 & BIT(0))
1898 #endif
1899                 {
1900                         mac_control->rings[ring_no].rx_curr_put_info.
1901                             offset = off;
1902                         goto end;
1903                 }
1904 #ifdef  CONFIG_2BUFF_MODE
1905                 /*
1906                  * RxDs Spanning cache lines will be replenished only
1907                  * if the succeeding RxD is also owned by Host. It
1908                  * will always be the ((8*i)+3) and ((8*i)+6)
1909                  * descriptors for the 48 byte descriptor. The offending
1910                  * decsriptor is of-course the 3rd descriptor.
1911                  */
1912                 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
1913                     block_dma_addr + (off * sizeof(RxD_t));
1914                 if (((u64) (rxdpphys)) % 128 > 80) {
1915                         rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
1916                             block_virt_addr + (off + 1);
1917                         if (rxdpnext->Host_Control == END_OF_BLOCK) {
1918                                 nextblk = (block_no + 1) %
1919                                     (mac_control->rings[ring_no].block_count);
1920                                 rxdpnext = mac_control->rings[ring_no].rx_blocks
1921                                     [nextblk].block_virt_addr;
1922                         }
1923                         if (rxdpnext->Control_2 & BIT(0))
1924                                 goto end;
1925                 }
1926 #endif
1927
1928 #ifndef CONFIG_2BUFF_MODE
1929                 skb = dev_alloc_skb(size + NET_IP_ALIGN);
1930 #else
1931                 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
1932 #endif
1933                 if (!skb) {
1934                         DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
1935                         DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
1936                         return -ENOMEM;
1937                 }
1938 #ifndef CONFIG_2BUFF_MODE
1939                 skb_reserve(skb, NET_IP_ALIGN);
1940                 memset(rxdp, 0, sizeof(RxD_t));
1941                 rxdp->Buffer0_ptr = pci_map_single
1942                     (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1943                 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
1944                 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
1945                 rxdp->Host_Control = (unsigned long) (skb);
1946                 rxdp->Control_1 |= RXD_OWN_XENA;
1947                 off++;
1948                 off %= (MAX_RXDS_PER_BLOCK + 1);
1949                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1950 #else
1951                 ba = &mac_control->rings[ring_no].ba[block_no][off];
1952                 skb_reserve(skb, BUF0_LEN);
1953                 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
1954                 if (tmp)
1955                         skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
1956
1957                 memset(rxdp, 0, sizeof(RxD_t));
1958                 rxdp->Buffer2_ptr = pci_map_single
1959                     (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
1960                      PCI_DMA_FROMDEVICE);
1961                 rxdp->Buffer0_ptr =
1962                     pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
1963                                    PCI_DMA_FROMDEVICE);
1964                 rxdp->Buffer1_ptr =
1965                     pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
1966                                    PCI_DMA_FROMDEVICE);
1967
1968                 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
1969                 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
1970                 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
1971                 rxdp->Control_2 |= BIT(0);      /* Set Buffer_Empty bit. */
1972                 rxdp->Host_Control = (u64) ((unsigned long) (skb));
1973                 rxdp->Control_1 |= RXD_OWN_XENA;
1974                 off++;
1975                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1976 #endif
1977                 rxdp->Control_2 |= SET_RXD_MARKER;
1978
1979                 atomic_inc(&nic->rx_bufs_left[ring_no]);
1980                 alloc_tab++;
1981         }
1982
1983       end:
1984         return SUCCESS;
1985 }
1986
1987 /**
1988  *  free_rx_buffers - Frees all Rx buffers
1989  *  @sp: device private variable.
1990  *  Description:
1991  *  This function will free all Rx buffers allocated by host.
1992  *  Return Value:
1993  *  NONE.
1994  */
1995
1996 static void free_rx_buffers(struct s2io_nic *sp)
1997 {
1998         struct net_device *dev = sp->dev;
1999         int i, j, blk = 0, off, buf_cnt = 0;
2000         RxD_t *rxdp;
2001         struct sk_buff *skb;
2002         mac_info_t *mac_control;
2003         struct config_param *config;
2004 #ifdef CONFIG_2BUFF_MODE
2005         buffAdd_t *ba;
2006 #endif
2007
2008         mac_control = &sp->mac_control;
2009         config = &sp->config;
2010
2011         for (i = 0; i < config->rx_ring_num; i++) {
2012                 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2013                         off = j % (MAX_RXDS_PER_BLOCK + 1);
2014                         rxdp = mac_control->rings[i].rx_blocks[blk].
2015                                 block_virt_addr + off;
2016
2017 #ifndef CONFIG_2BUFF_MODE
2018                         if (rxdp->Control_1 == END_OF_BLOCK) {
2019                                 rxdp =
2020                                     (RxD_t *) ((unsigned long) rxdp->
2021                                                Control_2);
2022                                 j++;
2023                                 blk++;
2024                         }
2025 #else
2026                         if (rxdp->Host_Control == END_OF_BLOCK) {
2027                                 blk++;
2028                                 continue;
2029                         }
2030 #endif
2031
2032                         if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2033                                 memset(rxdp, 0, sizeof(RxD_t));
2034                                 continue;
2035                         }
2036
2037                         skb =
2038                             (struct sk_buff *) ((unsigned long) rxdp->
2039                                                 Host_Control);
2040                         if (skb) {
2041 #ifndef CONFIG_2BUFF_MODE
2042                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2043                                                  rxdp->Buffer0_ptr,
2044                                                  dev->mtu +
2045                                                  HEADER_ETHERNET_II_802_3_SIZE
2046                                                  + HEADER_802_2_SIZE +
2047                                                  HEADER_SNAP_SIZE,
2048                                                  PCI_DMA_FROMDEVICE);
2049 #else
2050                                 ba = &mac_control->rings[i].ba[blk][off];
2051                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2052                                                  rxdp->Buffer0_ptr,
2053                                                  BUF0_LEN,
2054                                                  PCI_DMA_FROMDEVICE);
2055                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2056                                                  rxdp->Buffer1_ptr,
2057                                                  BUF1_LEN,
2058                                                  PCI_DMA_FROMDEVICE);
2059                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2060                                                  rxdp->Buffer2_ptr,
2061                                                  dev->mtu + BUF0_LEN + 4,
2062                                                  PCI_DMA_FROMDEVICE);
2063 #endif
2064                                 dev_kfree_skb(skb);
2065                                 atomic_dec(&sp->rx_bufs_left[i]);
2066                                 buf_cnt++;
2067                         }
2068                         memset(rxdp, 0, sizeof(RxD_t));
2069                 }
2070                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2071                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2072                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2073                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2074                 atomic_set(&sp->rx_bufs_left[i], 0);
2075                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2076                           dev->name, buf_cnt, i);
2077         }
2078 }
2079
2080 /**
2081  * s2io_poll - Rx interrupt handler for NAPI support
2082  * @dev : pointer to the device structure.
2083  * @budget : The number of packets that were budgeted to be processed
2084  * during  one pass through the 'Poll" function.
2085  * Description:
2086  * Comes into picture only if NAPI support has been incorporated. It does
2087  * the same thing that rx_intr_handler does, but not in a interrupt context
2088  * also It will process only a given number of packets.
2089  * Return value:
2090  * 0 on success and 1 if there are No Rx packets to be processed.
2091  */
2092
2093 #if defined(CONFIG_S2IO_NAPI)
2094 static int s2io_poll(struct net_device *dev, int *budget)
2095 {
2096         nic_t *nic = dev->priv;
2097         int pkt_cnt = 0, org_pkts_to_process;
2098         mac_info_t *mac_control;
2099         struct config_param *config;
2100         XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2101         u64 val64;
2102         int i;
2103
2104         mac_control = &nic->mac_control;
2105         config = &nic->config;
2106
2107         nic->pkts_to_process = *budget;
2108         if (nic->pkts_to_process > dev->quota)
2109                 nic->pkts_to_process = dev->quota;
2110         org_pkts_to_process = nic->pkts_to_process;
2111
2112         val64 = readq(&bar0->rx_traffic_int);
2113         writeq(val64, &bar0->rx_traffic_int);
2114
2115         for (i = 0; i < config->rx_ring_num; i++) {
2116                 rx_intr_handler(&mac_control->rings[i]);
2117                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2118                 if (!nic->pkts_to_process) {
2119                         /* Quota for the current iteration has been met */
2120                         goto no_rx;
2121                 }
2122         }
2123         if (!pkt_cnt)
2124                 pkt_cnt = 1;
2125
2126         dev->quota -= pkt_cnt;
2127         *budget -= pkt_cnt;
2128         netif_rx_complete(dev);
2129
2130         for (i = 0; i < config->rx_ring_num; i++) {
2131                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2132                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2133                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2134                         break;
2135                 }
2136         }
2137         /* Re enable the Rx interrupts. */
2138         en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2139         return 0;
2140
2141 no_rx:
2142         dev->quota -= pkt_cnt;
2143         *budget -= pkt_cnt;
2144
2145         for (i = 0; i < config->rx_ring_num; i++) {
2146                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2147                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2148                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2149                         break;
2150                 }
2151         }
2152         return 1;
2153 }
2154 #endif
2155
2156 /**
2157  *  rx_intr_handler - Rx interrupt handler
2158  *  @nic: device private variable.
2159  *  Description:
2160  *  If the interrupt is because of a received frame or if the
2161  *  receive ring contains fresh as yet un-processed frames,this function is
2162  *  called. It picks out the RxD at which place the last Rx processing had
2163  *  stopped and sends the skb to the OSM's Rx handler and then increments
2164  *  the offset.
2165  *  Return Value:
2166  *  NONE.
2167  */
2168 static void rx_intr_handler(ring_info_t *ring_data)
2169 {
2170         nic_t *nic = ring_data->nic;
2171         struct net_device *dev = (struct net_device *) nic->dev;
2172         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2173         int get_block, get_offset, put_block, put_offset, ring_bufs;
2174         rx_curr_get_info_t get_info, put_info;
2175         RxD_t *rxdp;
2176         struct sk_buff *skb;
2177 #ifndef CONFIG_S2IO_NAPI
2178         int pkt_cnt = 0;
2179 #endif
2180         register u64 val64;
2181
2182         /*
2183          * rx_traffic_int reg is an R1 register, hence we read and write
2184          * back the same value in the register to clear it
2185          */
2186         val64 = readq(&bar0->tx_traffic_int);
2187         writeq(val64, &bar0->tx_traffic_int);
2188
2189         get_info = ring_data->rx_curr_get_info;
2190         get_block = get_info.block_index;
2191         put_info = ring_data->rx_curr_put_info;
2192         put_block = put_info.block_index;
2193         ring_bufs = get_info.ring_len+1;
2194         rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2195                     get_info.offset;
2196         get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2197                 get_info.offset;
2198 #ifndef CONFIG_S2IO_NAPI
2199         spin_lock(&nic->put_lock);
2200         put_offset = ring_data->put_pos;
2201         spin_unlock(&nic->put_lock);
2202 #else
2203         put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2204                 put_info.offset;
2205 #endif
2206         while (RXD_IS_UP2DT(rxdp) &&
2207                (((get_offset + 1) % ring_bufs) != put_offset)) {
2208                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2209                 if (skb == NULL) {
2210                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2211                                   dev->name);
2212                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2213                         return;
2214                 }
2215 #ifndef CONFIG_2BUFF_MODE
2216                 pci_unmap_single(nic->pdev, (dma_addr_t)
2217                                  rxdp->Buffer0_ptr,
2218                                  dev->mtu +
2219                                  HEADER_ETHERNET_II_802_3_SIZE +
2220                                  HEADER_802_2_SIZE +
2221                                  HEADER_SNAP_SIZE,
2222                                  PCI_DMA_FROMDEVICE);
2223 #else
2224                 pci_unmap_single(nic->pdev, (dma_addr_t)
2225                                  rxdp->Buffer0_ptr,
2226                                  BUF0_LEN, PCI_DMA_FROMDEVICE);
2227                 pci_unmap_single(nic->pdev, (dma_addr_t)
2228                                  rxdp->Buffer1_ptr,
2229                                  BUF1_LEN, PCI_DMA_FROMDEVICE);
2230                 pci_unmap_single(nic->pdev, (dma_addr_t)
2231                                  rxdp->Buffer2_ptr,
2232                                  dev->mtu + BUF0_LEN + 4,
2233                                  PCI_DMA_FROMDEVICE);
2234 #endif
2235                 rx_osm_handler(ring_data, rxdp);
2236                 get_info.offset++;
2237                 ring_data->rx_curr_get_info.offset =
2238                     get_info.offset;
2239                 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2240                     get_info.offset;
2241                 if (get_info.offset &&
2242                     (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2243                         get_info.offset = 0;
2244                         ring_data->rx_curr_get_info.offset
2245                             = get_info.offset;
2246                         get_block++;
2247                         get_block %= ring_data->block_count;
2248                         ring_data->rx_curr_get_info.block_index
2249                             = get_block;
2250                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2251                 }
2252
2253                 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2254                             get_info.offset;
2255 #ifdef CONFIG_S2IO_NAPI
2256                 nic->pkts_to_process -= 1;
2257                 if (!nic->pkts_to_process)
2258                         break;
2259 #else
2260                 pkt_cnt++;
2261                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2262                         break;
2263 #endif
2264         }
2265 }
2266
2267 /**
2268  *  tx_intr_handler - Transmit interrupt handler
2269  *  @nic : device private variable
2270  *  Description:
2271  *  If an interrupt was raised to indicate DMA complete of the
2272  *  Tx packet, this function is called. It identifies the last TxD
2273  *  whose buffer was freed and frees all skbs whose data have already
2274  *  DMA'ed into the NICs internal memory.
2275  *  Return Value:
2276  *  NONE
2277  */
2278
2279 static void tx_intr_handler(fifo_info_t *fifo_data)
2280 {
2281         nic_t *nic = fifo_data->nic;
2282         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2283         struct net_device *dev = (struct net_device *) nic->dev;
2284         tx_curr_get_info_t get_info, put_info;
2285         struct sk_buff *skb;
2286         TxD_t *txdlp;
2287         u16 j, frg_cnt;
2288         register u64 val64 = 0;
2289
2290         /*
2291          * tx_traffic_int reg is an R1 register, hence we read and write
2292          * back the same value in the register to clear it
2293          */
2294         val64 = readq(&bar0->tx_traffic_int);
2295         writeq(val64, &bar0->tx_traffic_int);
2296
2297         get_info = fifo_data->tx_curr_get_info;
2298         put_info = fifo_data->tx_curr_put_info;
2299         txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2300             list_virt_addr;
2301         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2302                (get_info.offset != put_info.offset) &&
2303                (txdlp->Host_Control)) {
2304                 /* Check for TxD errors */
2305                 if (txdlp->Control_1 & TXD_T_CODE) {
2306                         unsigned long long err;
2307                         err = txdlp->Control_1 & TXD_T_CODE;
2308                         DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2309                                   err);
2310                 }
2311
2312                 skb = (struct sk_buff *) ((unsigned long)
2313                                 txdlp->Host_Control);
2314                 if (skb == NULL) {
2315                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
2316                         __FUNCTION__);
2317                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2318                         return;
2319                 }
2320
2321                 frg_cnt = skb_shinfo(skb)->nr_frags;
2322                 nic->tx_pkt_count++;
2323
2324                 pci_unmap_single(nic->pdev, (dma_addr_t)
2325                                  txdlp->Buffer_Pointer,
2326                                  skb->len - skb->data_len,
2327                                  PCI_DMA_TODEVICE);
2328                 if (frg_cnt) {
2329                         TxD_t *temp;
2330                         temp = txdlp;
2331                         txdlp++;
2332                         for (j = 0; j < frg_cnt; j++, txdlp++) {
2333                                 skb_frag_t *frag =
2334                                     &skb_shinfo(skb)->frags[j];
2335                                 pci_unmap_page(nic->pdev,
2336                                                (dma_addr_t)
2337                                                txdlp->
2338                                                Buffer_Pointer,
2339                                                frag->size,
2340                                                PCI_DMA_TODEVICE);
2341                         }
2342                         txdlp = temp;
2343                 }
2344                 memset(txdlp, 0,
2345                        (sizeof(TxD_t) * fifo_data->max_txds));
2346
2347                 /* Updating the statistics block */
2348                 nic->stats.tx_packets++;
2349                 nic->stats.tx_bytes += skb->len;
2350                 dev_kfree_skb_irq(skb);
2351
2352                 get_info.offset++;
2353                 get_info.offset %= get_info.fifo_len + 1;
2354                 txdlp = (TxD_t *) fifo_data->list_info
2355                     [get_info.offset].list_virt_addr;
2356                 fifo_data->tx_curr_get_info.offset =
2357                     get_info.offset;
2358         }
2359
2360         spin_lock(&nic->tx_lock);
2361         if (netif_queue_stopped(dev))
2362                 netif_wake_queue(dev);
2363         spin_unlock(&nic->tx_lock);
2364 }
2365
2366 /**
2367  *  alarm_intr_handler - Alarm Interrrupt handler
2368  *  @nic: device private variable
2369  *  Description: If the interrupt was neither because of Rx packet or Tx
2370  *  complete, this function is called. If the interrupt was to indicate
2371  *  a loss of link, the OSM link status handler is invoked for any other
2372  *  alarm interrupt the block that raised the interrupt is displayed
2373  *  and a H/W reset is issued.
2374  *  Return Value:
2375  *  NONE
2376 */
2377
2378 static void alarm_intr_handler(struct s2io_nic *nic)
2379 {
2380         struct net_device *dev = (struct net_device *) nic->dev;
2381         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2382         register u64 val64 = 0, err_reg = 0;
2383
2384         /* Handling link status change error Intr */
2385         err_reg = readq(&bar0->mac_rmac_err_reg);
2386         writeq(err_reg, &bar0->mac_rmac_err_reg);
2387         if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2388                 schedule_work(&nic->set_link_task);
2389         }
2390
2391         /* Handling Ecc errors */
2392         val64 = readq(&bar0->mc_err_reg);
2393         writeq(val64, &bar0->mc_err_reg);
2394         if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2395                 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2396                         DBG_PRINT(ERR_DBG, "%s: Device indicates ",
2397                                   dev->name);
2398                         DBG_PRINT(ERR_DBG, "double ECC error!!\n");
2399                         netif_stop_queue(dev);
2400                         schedule_work(&nic->rst_timer_task);
2401                 } else {
2402                         /* Device can recover from Single ECC errors */
2403                 }
2404         }
2405
2406         /* In case of a serious error, the device will be Reset. */
2407         val64 = readq(&bar0->serr_source);
2408         if (val64 & SERR_SOURCE_ANY) {
2409                 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2410                 DBG_PRINT(ERR_DBG, "serious error!!\n");
2411                 netif_stop_queue(dev);
2412                 schedule_work(&nic->rst_timer_task);
2413         }
2414
2415         /*
2416          * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2417          * Error occurs, the adapter will be recycled by disabling the
2418          * adapter enable bit and enabling it again after the device
2419          * becomes Quiescent.
2420          */
2421         val64 = readq(&bar0->pcc_err_reg);
2422         writeq(val64, &bar0->pcc_err_reg);
2423         if (val64 & PCC_FB_ECC_DB_ERR) {
2424                 u64 ac = readq(&bar0->adapter_control);
2425                 ac &= ~(ADAPTER_CNTL_EN);
2426                 writeq(ac, &bar0->adapter_control);
2427                 ac = readq(&bar0->adapter_control);
2428                 schedule_work(&nic->set_link_task);
2429         }
2430
2431         /* Other type of interrupts are not being handled now,  TODO */
2432 }
2433
2434 /**
2435  *  wait_for_cmd_complete - waits for a command to complete.
2436  *  @sp : private member of the device structure, which is a pointer to the
2437  *  s2io_nic structure.
2438  *  Description: Function that waits for a command to Write into RMAC
2439  *  ADDR DATA registers to be completed and returns either success or
2440  *  error depending on whether the command was complete or not.
2441  *  Return value:
2442  *   SUCCESS on success and FAILURE on failure.
2443  */
2444
2445 int wait_for_cmd_complete(nic_t * sp)
2446 {
2447         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2448         int ret = FAILURE, cnt = 0;
2449         u64 val64;
2450
2451         while (TRUE) {
2452                 val64 = readq(&bar0->rmac_addr_cmd_mem);
2453                 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2454                         ret = SUCCESS;
2455                         break;
2456                 }
2457                 msleep(50);
2458                 if (cnt++ > 10)
2459                         break;
2460         }
2461
2462         return ret;
2463 }
2464
2465 /**
2466  *  s2io_reset - Resets the card.
2467  *  @sp : private member of the device structure.
2468  *  Description: Function to Reset the card. This function then also
2469  *  restores the previously saved PCI configuration space registers as
2470  *  the card reset also resets the configuration space.
2471  *  Return value:
2472  *  void.
2473  */
2474
2475 void s2io_reset(nic_t * sp)
2476 {
2477         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2478         u64 val64;
2479         u16 subid, pci_cmd;
2480
2481         val64 = SW_RESET_ALL;
2482         writeq(val64, &bar0->sw_reset);
2483
2484         /*
2485          * At this stage, if the PCI write is indeed completed, the
2486          * card is reset and so is the PCI Config space of the device.
2487          * So a read cannot be issued at this stage on any of the
2488          * registers to ensure the write into "sw_reset" register
2489          * has gone through.
2490          * Question: Is there any system call that will explicitly force
2491          * all the write commands still pending on the bus to be pushed
2492          * through?
2493          * As of now I'am just giving a 250ms delay and hoping that the
2494          * PCI write to sw_reset register is done by this time.
2495          */
2496         msleep(250);
2497
2498         /* Restore the PCI state saved during initializarion. */
2499         pci_restore_state(sp->pdev);
2500
2501         s2io_init_pci(sp);
2502
2503         msleep(250);
2504
2505         /* Set swapper to enable I/O register access */
2506         s2io_set_swapper(sp);
2507
2508         /* Clear certain PCI/PCI-X fields after reset */
2509         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
2510         pci_cmd &= 0x7FFF; /* Clear parity err detect bit */
2511         pci_write_config_word(sp->pdev, PCI_COMMAND, pci_cmd);
2512
2513         val64 = readq(&bar0->txpic_int_reg);
2514         val64 &= ~BIT(62); /* Clearing PCI_STATUS error reflected here */
2515         writeq(val64, &bar0->txpic_int_reg);
2516
2517         /* Clearing PCIX Ecc status register */
2518         pci_write_config_dword(sp->pdev, 0x68, 0);
2519
2520         /* Reset device statistics maintained by OS */
2521         memset(&sp->stats, 0, sizeof (struct net_device_stats));
2522
2523         /* SXE-002: Configure link and activity LED to turn it off */
2524         subid = sp->pdev->subsystem_device;
2525         if ((subid & 0xFF) >= 0x07) {
2526                 val64 = readq(&bar0->gpio_control);
2527                 val64 |= 0x0000800000000000ULL;
2528                 writeq(val64, &bar0->gpio_control);
2529                 val64 = 0x0411040400000000ULL;
2530                 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
2531         }
2532
2533         sp->device_enabled_once = FALSE;
2534 }
2535
2536 /**
2537  *  s2io_set_swapper - to set the swapper controle on the card
2538  *  @sp : private member of the device structure,
2539  *  pointer to the s2io_nic structure.
2540  *  Description: Function to set the swapper control on the card
2541  *  correctly depending on the 'endianness' of the system.
2542  *  Return value:
2543  *  SUCCESS on success and FAILURE on failure.
2544  */
2545
2546 int s2io_set_swapper(nic_t * sp)
2547 {
2548         struct net_device *dev = sp->dev;
2549         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2550         u64 val64, valt, valr;
2551
2552         /*
2553          * Set proper endian settings and verify the same by reading
2554          * the PIF Feed-back register.
2555          */
2556
2557         val64 = readq(&bar0->pif_rd_swapper_fb);
2558         if (val64 != 0x0123456789ABCDEFULL) {
2559                 int i = 0;
2560                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
2561                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
2562                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
2563                                 0};                     /* FE=0, SE=0 */
2564
2565                 while(i<4) {
2566                         writeq(value[i], &bar0->swapper_ctrl);
2567                         val64 = readq(&bar0->pif_rd_swapper_fb);
2568                         if (val64 == 0x0123456789ABCDEFULL)
2569                                 break;
2570                         i++;
2571                 }
2572                 if (i == 4) {
2573                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2574                                 dev->name);
2575                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2576                                 (unsigned long long) val64);
2577                         return FAILURE;
2578                 }
2579                 valr = value[i];
2580         } else {
2581                 valr = readq(&bar0->swapper_ctrl);
2582         }
2583
2584         valt = 0x0123456789ABCDEFULL;
2585         writeq(valt, &bar0->xmsi_address);
2586         val64 = readq(&bar0->xmsi_address);
2587
2588         if(val64 != valt) {
2589                 int i = 0;
2590                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
2591                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
2592                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
2593                                 0};                     /* FE=0, SE=0 */
2594
2595                 while(i<4) {
2596                         writeq((value[i] | valr), &bar0->swapper_ctrl);
2597                         writeq(valt, &bar0->xmsi_address);
2598                         val64 = readq(&bar0->xmsi_address);
2599                         if(val64 == valt)
2600                                 break;
2601                         i++;
2602                 }
2603                 if(i == 4) {
2604                         unsigned long long x = val64;
2605                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2606                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2607                         return FAILURE;
2608                 }
2609         }
2610         val64 = readq(&bar0->swapper_ctrl);
2611         val64 &= 0xFFFF000000000000ULL;
2612
2613 #ifdef  __BIG_ENDIAN
2614         /*
2615          * The device by default set to a big endian format, so a
2616          * big endian driver need not set anything.
2617          */
2618         val64 |= (SWAPPER_CTRL_TXP_FE |
2619                  SWAPPER_CTRL_TXP_SE |
2620                  SWAPPER_CTRL_TXD_R_FE |
2621                  SWAPPER_CTRL_TXD_W_FE |
2622                  SWAPPER_CTRL_TXF_R_FE |
2623                  SWAPPER_CTRL_RXD_R_FE |
2624                  SWAPPER_CTRL_RXD_W_FE |
2625                  SWAPPER_CTRL_RXF_W_FE |
2626                  SWAPPER_CTRL_XMSI_FE |
2627                  SWAPPER_CTRL_XMSI_SE |
2628                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2629         writeq(val64, &bar0->swapper_ctrl);
2630 #else
2631         /*
2632          * Initially we enable all bits to make it accessible by the
2633          * driver, then we selectively enable only those bits that
2634          * we want to set.
2635          */
2636         val64 |= (SWAPPER_CTRL_TXP_FE |
2637                  SWAPPER_CTRL_TXP_SE |
2638                  SWAPPER_CTRL_TXD_R_FE |
2639                  SWAPPER_CTRL_TXD_R_SE |
2640                  SWAPPER_CTRL_TXD_W_FE |
2641                  SWAPPER_CTRL_TXD_W_SE |
2642                  SWAPPER_CTRL_TXF_R_FE |
2643                  SWAPPER_CTRL_RXD_R_FE |
2644                  SWAPPER_CTRL_RXD_R_SE |
2645                  SWAPPER_CTRL_RXD_W_FE |
2646                  SWAPPER_CTRL_RXD_W_SE |
2647                  SWAPPER_CTRL_RXF_W_FE |
2648                  SWAPPER_CTRL_XMSI_FE |
2649                  SWAPPER_CTRL_XMSI_SE |
2650                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2651         writeq(val64, &bar0->swapper_ctrl);
2652 #endif
2653         val64 = readq(&bar0->swapper_ctrl);
2654
2655         /*
2656          * Verifying if endian settings are accurate by reading a
2657          * feedback register.
2658          */
2659         val64 = readq(&bar0->pif_rd_swapper_fb);
2660         if (val64 != 0x0123456789ABCDEFULL) {
2661                 /* Endian settings are incorrect, calls for another dekko. */
2662                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2663                           dev->name);
2664                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2665                           (unsigned long long) val64);
2666                 return FAILURE;
2667         }
2668
2669         return SUCCESS;
2670 }
2671
2672 /* ********************************************************* *
2673  * Functions defined below concern the OS part of the driver *
2674  * ********************************************************* */
2675
2676 /**
2677  *  s2io_open - open entry point of the driver
2678  *  @dev : pointer to the device structure.
2679  *  Description:
2680  *  This function is the open entry point of the driver. It mainly calls a
2681  *  function to allocate Rx buffers and inserts them into the buffer
2682  *  descriptors and then enables the Rx part of the NIC.
2683  *  Return value:
2684  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2685  *   file on failure.
2686  */
2687
2688 int s2io_open(struct net_device *dev)
2689 {
2690         nic_t *sp = dev->priv;
2691         int err = 0;
2692
2693         /*
2694          * Make sure you have link off by default every time
2695          * Nic is initialized
2696          */
2697         netif_carrier_off(dev);
2698         sp->last_link_state = LINK_DOWN;
2699
2700         /* Initialize H/W and enable interrupts */
2701         if (s2io_card_up(sp)) {
2702                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2703                           dev->name);
2704                 err = -ENODEV;
2705                 goto hw_init_failed;
2706         }
2707
2708         /* After proper initialization of H/W, register ISR */
2709         err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
2710                           sp->name, dev);
2711         if (err) {
2712                 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2713                           dev->name);
2714                 goto isr_registration_failed;
2715         }
2716
2717         if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2718                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2719                 err = -ENODEV;
2720                 goto setting_mac_address_failed;
2721         }
2722
2723         netif_start_queue(dev);
2724         return 0;
2725
2726 setting_mac_address_failed:
2727         free_irq(sp->pdev->irq, dev);
2728 isr_registration_failed:
2729         s2io_reset(sp);
2730 hw_init_failed:
2731         return err;
2732 }
2733
2734 /**
2735  *  s2io_close -close entry point of the driver
2736  *  @dev : device pointer.
2737  *  Description:
2738  *  This is the stop entry point of the driver. It needs to undo exactly
2739  *  whatever was done by the open entry point,thus it's usually referred to
2740  *  as the close function.Among other things this function mainly stops the
2741  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2742  *  Return value:
2743  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2744  *  file on failure.
2745  */
2746
2747 int s2io_close(struct net_device *dev)
2748 {
2749         nic_t *sp = dev->priv;
2750         flush_scheduled_work();
2751         netif_stop_queue(dev);
2752         /* Reset card, kill tasklet and free Tx and Rx buffers. */
2753         s2io_card_down(sp);
2754
2755         free_irq(sp->pdev->irq, dev);
2756         sp->device_close_flag = TRUE;   /* Device is shut down. */
2757         return 0;
2758 }
2759
2760 /**
2761  *  s2io_xmit - Tx entry point of te driver
2762  *  @skb : the socket buffer containing the Tx data.
2763  *  @dev : device pointer.
2764  *  Description :
2765  *  This function is the Tx entry point of the driver. S2IO NIC supports
2766  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
2767  *  NOTE: when device cant queue the pkt,just the trans_start variable will
2768  *  not be upadted.
2769  *  Return value:
2770  *  0 on success & 1 on failure.
2771  */
2772
2773 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2774 {
2775         nic_t *sp = dev->priv;
2776         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
2777         register u64 val64;
2778         TxD_t *txdp;
2779         TxFIFO_element_t __iomem *tx_fifo;
2780         unsigned long flags;
2781 #ifdef NETIF_F_TSO
2782         int mss;
2783 #endif
2784         mac_info_t *mac_control;
2785         struct config_param *config;
2786         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2787
2788         mac_control = &sp->mac_control;
2789         config = &sp->config;
2790
2791         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
2792         spin_lock_irqsave(&sp->tx_lock, flags);
2793         if (atomic_read(&sp->card_state) == CARD_DOWN) {
2794                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
2795                           dev->name);
2796                 spin_unlock_irqrestore(&sp->tx_lock, flags);
2797                 dev_kfree_skb(skb);
2798                 return 0;
2799         }
2800
2801         queue = 0;
2802
2803         put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
2804         get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
2805         txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
2806                 list_virt_addr;
2807
2808         queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2809         /* Avoid "put" pointer going beyond "get" pointer */
2810         if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
2811                 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
2812                 netif_stop_queue(dev);
2813                 dev_kfree_skb(skb);
2814                 spin_unlock_irqrestore(&sp->tx_lock, flags);
2815                 return 0;
2816         }
2817 #ifdef NETIF_F_TSO
2818         mss = skb_shinfo(skb)->tso_size;
2819         if (mss) {
2820                 txdp->Control_1 |= TXD_TCP_LSO_EN;
2821                 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
2822         }
2823 #endif
2824
2825         frg_cnt = skb_shinfo(skb)->nr_frags;
2826         frg_len = skb->len - skb->data_len;
2827
2828         txdp->Buffer_Pointer = pci_map_single
2829             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
2830         txdp->Host_Control = (unsigned long) skb;
2831         if (skb->ip_summed == CHECKSUM_HW) {
2832                 txdp->Control_2 |=
2833                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
2834                      TXD_TX_CKO_UDP_EN);
2835         }
2836
2837         txdp->Control_2 |= config->tx_intr_type;
2838
2839         txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
2840                             TXD_GATHER_CODE_FIRST);
2841         txdp->Control_1 |= TXD_LIST_OWN_XENA;
2842
2843         /* For fragmented SKB. */
2844         for (i = 0; i < frg_cnt; i++) {
2845                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2846                 txdp++;
2847                 txdp->Buffer_Pointer = (u64) pci_map_page
2848                     (sp->pdev, frag->page, frag->page_offset,
2849                      frag->size, PCI_DMA_TODEVICE);
2850                 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
2851         }
2852         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
2853
2854         tx_fifo = mac_control->tx_FIFO_start[queue];
2855         val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
2856         writeq(val64, &tx_fifo->TxDL_Pointer);
2857
2858         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
2859                  TX_FIFO_LAST_LIST);
2860
2861 #ifdef NETIF_F_TSO
2862         if (mss)
2863                 val64 |= TX_FIFO_SPECIAL_FUNC;
2864 #endif
2865         writeq(val64, &tx_fifo->List_Control);
2866
2867         /* Perform a PCI read to flush previous writes */
2868         val64 = readq(&bar0->general_int_status);
2869
2870         put_off++;
2871         put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2872         mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
2873
2874         /* Avoid "put" pointer going beyond "get" pointer */
2875         if (((put_off + 1) % queue_len) == get_off) {
2876                 DBG_PRINT(TX_DBG,
2877                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
2878                           put_off, get_off);
2879                 netif_stop_queue(dev);
2880         }
2881
2882         dev->trans_start = jiffies;
2883         spin_unlock_irqrestore(&sp->tx_lock, flags);
2884
2885         return 0;
2886 }
2887
2888 /**
2889  *  s2io_isr - ISR handler of the device .
2890  *  @irq: the irq of the device.
2891  *  @dev_id: a void pointer to the dev structure of the NIC.
2892  *  @pt_regs: pointer to the registers pushed on the stack.
2893  *  Description:  This function is the ISR handler of the device. It
2894  *  identifies the reason for the interrupt and calls the relevant
2895  *  service routines. As a contongency measure, this ISR allocates the
2896  *  recv buffers, if their numbers are below the panic value which is
2897  *  presently set to 25% of the original number of rcv buffers allocated.
2898  *  Return value:
2899  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
2900  *   IRQ_NONE: will be returned if interrupt is not from our device
2901  */
2902 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2903 {
2904         struct net_device *dev = (struct net_device *) dev_id;
2905         nic_t *sp = dev->priv;
2906         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2907         int i;
2908         u64 reason = 0;
2909         mac_info_t *mac_control;
2910         struct config_param *config;
2911
2912         mac_control = &sp->mac_control;
2913         config = &sp->config;
2914
2915         /*
2916          * Identify the cause for interrupt and call the appropriate
2917          * interrupt handler. Causes for the interrupt could be;
2918          * 1. Rx of packet.
2919          * 2. Tx complete.
2920          * 3. Link down.
2921          * 4. Error in any functional blocks of the NIC.
2922          */
2923         reason = readq(&bar0->general_int_status);
2924
2925         if (!reason) {
2926                 /* The interrupt was not raised by Xena. */
2927                 return IRQ_NONE;
2928         }
2929
2930         if (reason & (GEN_ERROR_INTR))
2931                 alarm_intr_handler(sp);
2932
2933 #ifdef CONFIG_S2IO_NAPI
2934         if (reason & GEN_INTR_RXTRAFFIC) {
2935                 if (netif_rx_schedule_prep(dev)) {
2936                         en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
2937                                               DISABLE_INTRS);
2938                         __netif_rx_schedule(dev);
2939                 }
2940         }
2941 #else
2942         /* If Intr is because of Rx Traffic */
2943         if (reason & GEN_INTR_RXTRAFFIC) {
2944                 for (i = 0; i < config->rx_ring_num; i++) {
2945                         rx_intr_handler(&mac_control->rings[i]);
2946                 }
2947         }
2948 #endif
2949
2950         /* If Intr is because of Tx Traffic */
2951         if (reason & GEN_INTR_TXTRAFFIC) {
2952                 for (i = 0; i < config->tx_fifo_num; i++)
2953                         tx_intr_handler(&mac_control->fifos[i]);
2954         }
2955
2956         /*
2957          * If the Rx buffer count is below the panic threshold then
2958          * reallocate the buffers from the interrupt handler itself,
2959          * else schedule a tasklet to reallocate the buffers.
2960          */
2961 #ifndef CONFIG_S2IO_NAPI
2962         for (i = 0; i < config->rx_ring_num; i++) {
2963                 int ret;
2964                 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
2965                 int level = rx_buffer_level(sp, rxb_size, i);
2966
2967                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
2968                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
2969                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
2970                         if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
2971                                 DBG_PRINT(ERR_DBG, "%s:Out of memory",
2972                                           dev->name);
2973                                 DBG_PRINT(ERR_DBG, " in ISR!!\n");
2974                                 clear_bit(0, (&sp->tasklet_status));
2975                                 return IRQ_HANDLED;
2976                         }
2977                         clear_bit(0, (&sp->tasklet_status));
2978                 } else if (level == LOW) {
2979                         tasklet_schedule(&sp->task);
2980                 }
2981         }
2982 #endif
2983
2984         return IRQ_HANDLED;
2985 }
2986
2987 /**
2988  *  s2io_get_stats - Updates the device statistics structure.
2989  *  @dev : pointer to the device structure.
2990  *  Description:
2991  *  This function updates the device statistics structure in the s2io_nic
2992  *  structure and returns a pointer to the same.
2993  *  Return value:
2994  *  pointer to the updated net_device_stats structure.
2995  */
2996
2997 struct net_device_stats *s2io_get_stats(struct net_device *dev)
2998 {
2999         nic_t *sp = dev->priv;
3000         mac_info_t *mac_control;
3001         struct config_param *config;
3002
3003
3004         mac_control = &sp->mac_control;
3005         config = &sp->config;
3006
3007         sp->stats.tx_errors =
3008                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3009         sp->stats.rx_errors =
3010                 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3011         sp->stats.multicast =
3012                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
3013         sp->stats.rx_length_errors =
3014                 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
3015
3016         return (&sp->stats);
3017 }
3018
3019 /**
3020  *  s2io_set_multicast - entry point for multicast address enable/disable.
3021  *  @dev : pointer to the device structure
3022  *  Description:
3023  *  This function is a driver entry point which gets called by the kernel
3024  *  whenever multicast addresses must be enabled/disabled. This also gets
3025  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
3026  *  determine, if multicast address must be enabled or if promiscuous mode
3027  *  is to be disabled etc.
3028  *  Return value:
3029  *  void.
3030  */
3031
3032 static void s2io_set_multicast(struct net_device *dev)
3033 {
3034         int i, j, prev_cnt;
3035         struct dev_mc_list *mclist;
3036         nic_t *sp = dev->priv;
3037         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3038         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3039             0xfeffffffffffULL;
3040         u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3041         void __iomem *add;
3042
3043         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3044                 /*  Enable all Multicast addresses */
3045                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3046                        &bar0->rmac_addr_data0_mem);
3047                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3048                        &bar0->rmac_addr_data1_mem);
3049                 val64 = RMAC_ADDR_CMD_MEM_WE |
3050                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3051                     RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3052                 writeq(val64, &bar0->rmac_addr_cmd_mem);
3053                 /* Wait till command completes */
3054                 wait_for_cmd_complete(sp);
3055
3056                 sp->m_cast_flg = 1;
3057                 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3058         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3059                 /*  Disable all Multicast addresses */
3060                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3061                        &bar0->rmac_addr_data0_mem);
3062                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3063                        &bar0->rmac_addr_data1_mem);
3064                 val64 = RMAC_ADDR_CMD_MEM_WE |
3065                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3066                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3067                 writeq(val64, &bar0->rmac_addr_cmd_mem);
3068                 /* Wait till command completes */
3069                 wait_for_cmd_complete(sp);
3070
3071                 sp->m_cast_flg = 0;
3072                 sp->all_multi_pos = 0;
3073         }
3074
3075         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3076                 /*  Put the NIC into promiscuous mode */
3077                 add = &bar0->mac_cfg;
3078                 val64 = readq(&bar0->mac_cfg);
3079                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3080
3081                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3082                 writel((u32) val64, add);
3083                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3084                 writel((u32) (val64 >> 32), (add + 4));
3085
3086                 val64 = readq(&bar0->mac_cfg);
3087                 sp->promisc_flg = 1;
3088                 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
3089                           dev->name);
3090         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3091                 /*  Remove the NIC from promiscuous mode */
3092                 add = &bar0->mac_cfg;
3093                 val64 = readq(&bar0->mac_cfg);
3094                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3095
3096                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3097                 writel((u32) val64, add);
3098                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3099                 writel((u32) (val64 >> 32), (add + 4));
3100
3101                 val64 = readq(&bar0->mac_cfg);
3102                 sp->promisc_flg = 0;
3103                 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
3104                           dev->name);
3105         }
3106
3107         /*  Update individual M_CAST address list */
3108         if ((!sp->m_cast_flg) && dev->mc_count) {
3109                 if (dev->mc_count >
3110                     (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3111                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3112                                   dev->name);
3113                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
3114                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3115                         return;
3116                 }
3117
3118                 prev_cnt = sp->mc_addr_count;
3119                 sp->mc_addr_count = dev->mc_count;
3120
3121                 /* Clear out the previous list of Mc in the H/W. */
3122                 for (i = 0; i < prev_cnt; i++) {
3123                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3124                                &bar0->rmac_addr_data0_mem);
3125                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3126                                 &bar0->rmac_addr_data1_mem);
3127                         val64 = RMAC_ADDR_CMD_MEM_WE |
3128                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3129                             RMAC_ADDR_CMD_MEM_OFFSET
3130                             (MAC_MC_ADDR_START_OFFSET + i);
3131                         writeq(val64, &bar0->rmac_addr_cmd_mem);
3132
3133                         /* Wait for command completes */
3134                         if (wait_for_cmd_complete(sp)) {
3135                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
3136                                           dev->name);
3137                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3138                                 return;
3139                         }
3140                 }
3141
3142                 /* Create the new Rx filter list and update the same in H/W. */
3143                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3144                      i++, mclist = mclist->next) {
3145                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3146                                ETH_ALEN);
3147                         for (j = 0; j < ETH_ALEN; j++) {
3148                                 mac_addr |= mclist->dmi_addr[j];
3149                                 mac_addr <<= 8;
3150                         }
3151                         mac_addr >>= 8;
3152                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3153                                &bar0->rmac_addr_data0_mem);
3154                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3155                                 &bar0->rmac_addr_data1_mem);
3156                         val64 = RMAC_ADDR_CMD_MEM_WE |
3157                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3158                             RMAC_ADDR_CMD_MEM_OFFSET
3159                             (i + MAC_MC_ADDR_START_OFFSET);
3160                         writeq(val64, &bar0->rmac_addr_cmd_mem);
3161
3162                         /* Wait for command completes */
3163                         if (wait_for_cmd_complete(sp)) {
3164                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
3165                                           dev->name);
3166                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3167                                 return;
3168                         }
3169                 }
3170         }
3171 }
3172
3173 /**
3174  *  s2io_set_mac_addr - Programs the Xframe mac address
3175  *  @dev : pointer to the device structure.
3176  *  @addr: a uchar pointer to the new mac address which is to be set.
3177  *  Description : This procedure will program the Xframe to receive
3178  *  frames with new Mac Address
3179  *  Return value: SUCCESS on success and an appropriate (-)ve integer
3180  *  as defined in errno.h file on failure.
3181  */
3182
3183 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3184 {
3185         nic_t *sp = dev->priv;
3186         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3187         register u64 val64, mac_addr = 0;
3188         int i;
3189
3190         /*
3191          * Set the new MAC address as the new unicast filter and reflect this
3192          * change on the device address registered with the OS. It will be
3193          * at offset 0.
3194          */
3195         for (i = 0; i < ETH_ALEN; i++) {
3196                 mac_addr <<= 8;
3197                 mac_addr |= addr[i];
3198         }
3199
3200         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3201                &bar0->rmac_addr_data0_mem);
3202
3203         val64 =
3204             RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3205             RMAC_ADDR_CMD_MEM_OFFSET(0);
3206         writeq(val64, &bar0->rmac_addr_cmd_mem);
3207         /* Wait till command completes */
3208         if (wait_for_cmd_complete(sp)) {
3209                 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3210                 return FAILURE;
3211         }
3212
3213         return SUCCESS;
3214 }
3215
3216 /**
3217  * s2io_ethtool_sset - Sets different link parameters.
3218  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
3219  * @info: pointer to the structure with parameters given by ethtool to set
3220  * link information.
3221  * Description:
3222  * The function sets different link parameters provided by the user onto
3223  * the NIC.
3224  * Return value:
3225  * 0 on success.
3226 */
3227
3228 static int s2io_ethtool_sset(struct net_device *dev,
3229                              struct ethtool_cmd *info)
3230 {
3231         nic_t *sp = dev->priv;
3232         if ((info->autoneg == AUTONEG_ENABLE) ||
3233             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3234                 return -EINVAL;
3235         else {
3236                 s2io_close(sp->dev);
3237                 s2io_open(sp->dev);
3238         }
3239
3240         return 0;
3241 }
3242
3243 /**
3244  * s2io_ethtol_gset - Return link specific information.
3245  * @sp : private member of the device structure, pointer to the
3246  *      s2io_nic structure.
3247  * @info : pointer to the structure with parameters given by ethtool
3248  * to return link information.
3249  * Description:
3250  * Returns link specific information like speed, duplex etc.. to ethtool.
3251  * Return value :
3252  * return 0 on success.
3253  */
3254
3255 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3256 {
3257         nic_t *sp = dev->priv;
3258         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3259         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3260         info->port = PORT_FIBRE;
3261         /* info->transceiver?? TODO */
3262
3263         if (netif_carrier_ok(sp->dev)) {
3264                 info->speed = 10000;
3265                 info->duplex = DUPLEX_FULL;
3266         } else {
3267                 info->speed = -1;
3268                 info->duplex = -1;
3269         }
3270
3271         info->autoneg = AUTONEG_DISABLE;
3272         return 0;
3273 }
3274
3275 /**
3276  * s2io_ethtool_gdrvinfo - Returns driver specific information.
3277  * @sp : private member of the device structure, which is a pointer to the
3278  * s2io_nic structure.
3279  * @info : pointer to the structure with parameters given by ethtool to
3280  * return driver information.
3281  * Description:
3282  * Returns driver specefic information like name, version etc.. to ethtool.
3283  * Return value:
3284  *  void
3285  */
3286
3287 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3288                                   struct ethtool_drvinfo *info)
3289 {
3290         nic_t *sp = dev->priv;
3291
3292         strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3293         strncpy(info->version, s2io_driver_version,
3294                 sizeof(s2io_driver_version));
3295         strncpy(info->fw_version, "", 32);
3296         strncpy(info->bus_info, pci_name(sp->pdev), 32);
3297         info->regdump_len = XENA_REG_SPACE;
3298         info->eedump_len = XENA_EEPROM_SPACE;
3299         info->testinfo_len = S2IO_TEST_LEN;
3300         info->n_stats = S2IO_STAT_LEN;
3301 }
3302
3303 /**
3304  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3305  *  @sp: private member of the device structure, which is a pointer to the
3306  *  s2io_nic structure.
3307  *  @regs : pointer to the structure with parameters given by ethtool for
3308  *  dumping the registers.
3309  *  @reg_space: The input argumnet into which all the registers are dumped.
3310  *  Description:
3311  *  Dumps the entire register space of xFrame NIC into the user given
3312  *  buffer area.
3313  * Return value :
3314  * void .
3315 */
3316
3317 static void s2io_ethtool_gregs(struct net_device *dev,
3318                                struct ethtool_regs *regs, void *space)
3319 {
3320         int i;
3321         u64 reg;
3322         u8 *reg_space = (u8 *) space;
3323         nic_t *sp = dev->priv;
3324
3325         regs->len = XENA_REG_SPACE;
3326         regs->version = sp->pdev->subsystem_device;
3327
3328         for (i = 0; i < regs->len; i += 8) {
3329                 reg = readq(sp->bar0 + i);
3330                 memcpy((reg_space + i), &reg, 8);
3331         }
3332 }
3333
3334 /**
3335  *  s2io_phy_id  - timer function that alternates adapter LED.
3336  *  @data : address of the private member of the device structure, which
3337  *  is a pointer to the s2io_nic structure, provided as an u32.
3338  * Description: This is actually the timer function that alternates the
3339  * adapter LED bit of the adapter control bit to set/reset every time on
3340  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3341  *  once every second.
3342 */
3343 static void s2io_phy_id(unsigned long data)
3344 {
3345         nic_t *sp = (nic_t *) data;
3346         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3347         u64 val64 = 0;
3348         u16 subid;
3349
3350         subid = sp->pdev->subsystem_device;
3351         if ((subid & 0xFF) >= 0x07) {
3352                 val64 = readq(&bar0->gpio_control);
3353                 val64 ^= GPIO_CTRL_GPIO_0;
3354                 writeq(val64, &bar0->gpio_control);
3355         } else {
3356                 val64 = readq(&bar0->adapter_control);
3357                 val64 ^= ADAPTER_LED_ON;
3358                 writeq(val64, &bar0->adapter_control);
3359         }
3360
3361         mod_timer(&sp->id_timer, jiffies + HZ / 2);
3362 }
3363
3364 /**
3365  * s2io_ethtool_idnic - To physically identify the nic on the system.
3366  * @sp : private member of the device structure, which is a pointer to the
3367  * s2io_nic structure.
3368  * @id : pointer to the structure with identification parameters given by
3369  * ethtool.
3370  * Description: Used to physically identify the NIC on the system.
3371  * The Link LED will blink for a time specified by the user for
3372  * identification.
3373  * NOTE: The Link has to be Up to be able to blink the LED. Hence
3374  * identification is possible only if it's link is up.
3375  * Return value:
3376  * int , returns 0 on success
3377  */
3378
3379 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3380 {
3381         u64 val64 = 0, last_gpio_ctrl_val;
3382         nic_t *sp = dev->priv;
3383         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3384         u16 subid;
3385
3386         subid = sp->pdev->subsystem_device;
3387         last_gpio_ctrl_val = readq(&bar0->gpio_control);
3388         if ((subid & 0xFF) < 0x07) {
3389                 val64 = readq(&bar0->adapter_control);
3390                 if (!(val64 & ADAPTER_CNTL_EN)) {
3391                         printk(KERN_ERR
3392                                "Adapter Link down, cannot blink LED\n");
3393                         return -EFAULT;
3394                 }
3395         }
3396         if (sp->id_timer.function == NULL) {
3397                 init_timer(&sp->id_timer);
3398                 sp->id_timer.function = s2io_phy_id;
3399                 sp->id_timer.data = (unsigned long) sp;
3400         }
3401         mod_timer(&sp->id_timer, jiffies);
3402         if (data)
3403                 msleep_interruptible(data * HZ);
3404         else
3405                 msleep_interruptible(MAX_FLICKER_TIME);
3406         del_timer_sync(&sp->id_timer);
3407
3408         if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
3409                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3410                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3411         }
3412
3413         return 0;
3414 }
3415
3416 /**
3417  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3418  * @sp : private member of the device structure, which is a pointer to the
3419  *      s2io_nic structure.
3420  * @ep : pointer to the structure with pause parameters given by ethtool.
3421  * Description:
3422  * Returns the Pause frame generation and reception capability of the NIC.
3423  * Return value:
3424  *  void
3425  */
3426 static void s2io_ethtool_getpause_data(struct net_device *dev,
3427                                        struct ethtool_pauseparam *ep)
3428 {
3429         u64 val64;
3430         nic_t *sp = dev->priv;
3431         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3432
3433         val64 = readq(&bar0->rmac_pause_cfg);
3434         if (val64 & RMAC_PAUSE_GEN_ENABLE)
3435                 ep->tx_pause = TRUE;
3436         if (val64 & RMAC_PAUSE_RX_ENABLE)
3437                 ep->rx_pause = TRUE;
3438         ep->autoneg = FALSE;
3439 }
3440
3441 /**
3442  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
3443  * @sp : private member of the device structure, which is a pointer to the
3444  *      s2io_nic structure.
3445  * @ep : pointer to the structure with pause parameters given by ethtool.
3446  * Description:
3447  * It can be used to set or reset Pause frame generation or reception
3448  * support of the NIC.
3449  * Return value:
3450  * int, returns 0 on Success
3451  */
3452
3453 static int s2io_ethtool_setpause_data(struct net_device *dev,
3454                                struct ethtool_pauseparam *ep)
3455 {
3456         u64 val64;
3457         nic_t *sp = dev->priv;
3458         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3459
3460         val64 = readq(&bar0->rmac_pause_cfg);
3461         if (ep->tx_pause)
3462                 val64 |= RMAC_PAUSE_GEN_ENABLE;
3463         else
3464                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3465         if (ep->rx_pause)
3466                 val64 |= RMAC_PAUSE_RX_ENABLE;
3467         else
3468                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3469         writeq(val64, &bar0->rmac_pause_cfg);
3470         return 0;
3471 }
3472
3473 /**
3474  * read_eeprom - reads 4 bytes of data from user given offset.
3475  * @sp : private member of the device structure, which is a pointer to the
3476  *      s2io_nic structure.
3477  * @off : offset at which the data must be written
3478  * @data : Its an output parameter where the data read at the given
3479  *      offset is stored.
3480  * Description:
3481  * Will read 4 bytes of data from the user given offset and return the
3482  * read data.
3483  * NOTE: Will allow to read only part of the EEPROM visible through the
3484  *   I2C bus.
3485  * Return value:
3486  *  -1 on failure and 0 on success.
3487  */
3488
3489 #define S2IO_DEV_ID             5
3490 static int read_eeprom(nic_t * sp, int off, u32 * data)
3491 {
3492         int ret = -1;
3493         u32 exit_cnt = 0;
3494         u64 val64;
3495         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3496
3497         val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3498             I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3499             I2C_CONTROL_CNTL_START;
3500         SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3501
3502         while (exit_cnt < 5) {
3503                 val64 = readq(&bar0->i2c_control);
3504                 if (I2C_CONTROL_CNTL_END(val64)) {
3505                         *data = I2C_CONTROL_GET_DATA(val64);
3506                         ret = 0;
3507                         break;
3508                 }
3509                 msleep(50);
3510                 exit_cnt++;
3511         }
3512
3513         return ret;
3514 }
3515
3516 /**
3517  *  write_eeprom - actually writes the relevant part of the data value.
3518  *  @sp : private member of the device structure, which is a pointer to the
3519  *       s2io_nic structure.
3520  *  @off : offset at which the data must be written
3521  *  @data : The data that is to be written
3522  *  @cnt : Number of bytes of the data that are actually to be written into
3523  *  the Eeprom. (max of 3)
3524  * Description:
3525  *  Actually writes the relevant part of the data value into the Eeprom
3526  *  through the I2C bus.
3527  * Return value:
3528  *  0 on success, -1 on failure.
3529  */
3530
3531 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3532 {
3533         int exit_cnt = 0, ret = -1;
3534         u64 val64;
3535         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3536
3537         val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3538             I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3539             I2C_CONTROL_CNTL_START;
3540         SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3541
3542         while (exit_cnt < 5) {
3543                 val64 = readq(&bar0->i2c_control);
3544                 if (I2C_CONTROL_CNTL_END(val64)) {
3545                         if (!(val64 & I2C_CONTROL_NACK))
3546                                 ret = 0;
3547                         break;
3548                 }
3549                 msleep(50);
3550                 exit_cnt++;
3551         }
3552
3553         return ret;
3554 }
3555
3556 /**
3557  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
3558  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
3559  *  @eeprom : pointer to the user level structure provided by ethtool,
3560  *  containing all relevant information.
3561  *  @data_buf : user defined value to be written into Eeprom.
3562  *  Description: Reads the values stored in the Eeprom at given offset
3563  *  for a given length. Stores these values int the input argument data
3564  *  buffer 'data_buf' and returns these to the caller (ethtool.)
3565  *  Return value:
3566  *  int  0 on success
3567  */
3568
3569 static int s2io_ethtool_geeprom(struct net_device *dev,
3570                          struct ethtool_eeprom *eeprom, u8 * data_buf)
3571 {
3572         u32 data, i, valid;
3573         nic_t *sp = dev->priv;
3574
3575         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3576
3577         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3578                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3579
3580         for (i = 0; i < eeprom->len; i += 4) {
3581                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3582                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3583                         return -EFAULT;
3584                 }
3585                 valid = INV(data);
3586                 memcpy((data_buf + i), &valid, 4);
3587         }
3588         return 0;
3589 }
3590
3591 /**
3592  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3593  *  @sp : private member of the device structure, which is a pointer to the
3594  *  s2io_nic structure.
3595  *  @eeprom : pointer to the user level structure provided by ethtool,
3596  *  containing all relevant information.
3597  *  @data_buf ; user defined value to be written into Eeprom.
3598  *  Description:
3599  *  Tries to write the user provided value in the Eeprom, at the offset
3600  *  given by the user.
3601  *  Return value:
3602  *  0 on success, -EFAULT on failure.
3603  */
3604
3605 static int s2io_ethtool_seeprom(struct net_device *dev,
3606                                 struct ethtool_eeprom *eeprom,
3607                                 u8 * data_buf)
3608 {
3609         int len = eeprom->len, cnt = 0;
3610         u32 valid = 0, data;
3611         nic_t *sp = dev->priv;
3612
3613         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
3614                 DBG_PRINT(ERR_DBG,
3615                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
3616                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
3617                           eeprom->magic);
3618                 return -EFAULT;
3619         }
3620
3621         while (len) {
3622                 data = (u32) data_buf[cnt] & 0x000000FF;
3623                 if (data) {
3624                         valid = (u32) (data << 24);
3625                 } else
3626                         valid = data;
3627
3628                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
3629                         DBG_PRINT(ERR_DBG,
3630                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
3631                         DBG_PRINT(ERR_DBG,
3632                                   "write into the specified offset\n");
3633                         return -EFAULT;
3634                 }
3635                 cnt++;
3636                 len--;
3637         }
3638
3639         return 0;
3640 }
3641
3642 /**
3643  * s2io_register_test - reads and writes into all clock domains.
3644  * @sp : private member of the device structure, which is a pointer to the
3645  * s2io_nic structure.
3646  * @data : variable that returns the result of each of the test conducted b
3647  * by the driver.
3648  * Description:
3649  * Read and write into all clock domains. The NIC has 3 clock domains,
3650  * see that registers in all the three regions are accessible.
3651  * Return value:
3652  * 0 on success.
3653  */
3654
3655 static int s2io_register_test(nic_t * sp, uint64_t * data)
3656 {
3657         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3658         u64 val64 = 0;
3659         int fail = 0;
3660
3661         val64 = readq(&bar0->pif_rd_swapper_fb);
3662         if (val64 != 0x123456789abcdefULL) {
3663                 fail = 1;
3664                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3665         }
3666
3667         val64 = readq(&bar0->rmac_pause_cfg);
3668         if (val64 != 0xc000ffff00000000ULL) {
3669                 fail = 1;
3670                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
3671         }
3672
3673         val64 = readq(&bar0->rx_queue_cfg);
3674         if (val64 != 0x0808080808080808ULL) {
3675                 fail = 1;
3676                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
3677         }
3678
3679         val64 = readq(&bar0->xgxs_efifo_cfg);
3680         if (val64 != 0x000000001923141EULL) {
3681                 fail = 1;
3682                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
3683         }
3684
3685         val64 = 0x5A5A5A5A5A5A5A5AULL;
3686         writeq(val64, &bar0->xmsi_data);
3687         val64 = readq(&bar0->xmsi_data);
3688         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
3689                 fail = 1;
3690                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
3691         }
3692
3693         val64 = 0xA5A5A5A5A5A5A5A5ULL;
3694         writeq(val64, &bar0->xmsi_data);
3695         val64 = readq(&bar0->xmsi_data);
3696         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
3697                 fail = 1;
3698                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
3699         }
3700
3701         *data = fail;
3702         return 0;
3703 }
3704
3705 /**
3706  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3707  * @sp : private member of the device structure, which is a pointer to the
3708  * s2io_nic structure.
3709  * @data:variable that returns the result of each of the test conducted by
3710  * the driver.
3711  * Description:
3712  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
3713  * register.
3714  * Return value:
3715  * 0 on success.
3716  */
3717
3718 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
3719 {
3720         int fail = 0;
3721         u32 ret_data;
3722
3723         /* Test Write Error at offset 0 */
3724         if (!write_eeprom(sp, 0, 0, 3))
3725                 fail = 1;
3726
3727         /* Test Write at offset 4f0 */
3728         if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
3729                 fail = 1;
3730         if (read_eeprom(sp, 0x4F0, &ret_data))
3731                 fail = 1;
3732
3733         if (ret_data != 0x01234567)
3734                 fail = 1;
3735
3736         /* Reset the EEPROM data go FFFF */
3737         write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
3738
3739         /* Test Write Request Error at offset 0x7c */
3740         if (!write_eeprom(sp, 0x07C, 0, 3))
3741                 fail = 1;
3742
3743         /* Test Write Request at offset 0x7fc */
3744         if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
3745                 fail = 1;
3746         if (read_eeprom(sp, 0x7FC, &ret_data))
3747                 fail = 1;
3748
3749         if (ret_data != 0x01234567)
3750                 fail = 1;
3751
3752         /* Reset the EEPROM data go FFFF */
3753         write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
3754
3755         /* Test Write Error at offset 0x80 */
3756         if (!write_eeprom(sp, 0x080, 0, 3))
3757                 fail = 1;
3758
3759         /* Test Write Error at offset 0xfc */
3760         if (!write_eeprom(sp, 0x0FC, 0, 3))
3761                 fail = 1;
3762
3763         /* Test Write Error at offset 0x100 */
3764         if (!write_eeprom(sp, 0x100, 0, 3))
3765                 fail = 1;
3766
3767         /* Test Write Error at offset 4ec */
3768         if (!write_eeprom(sp, 0x4EC, 0, 3))
3769                 fail = 1;
3770
3771         *data = fail;
3772         return 0;
3773 }
3774
3775 /**
3776  * s2io_bist_test - invokes the MemBist test of the card .
3777  * @sp : private member of the device structure, which is a pointer to the
3778  * s2io_nic structure.
3779  * @data:variable that returns the result of each of the test conducted by
3780  * the driver.
3781  * Description:
3782  * This invokes the MemBist test of the card. We give around
3783  * 2 secs time for the Test to complete. If it's still not complete
3784  * within this peiod, we consider that the test failed.
3785  * Return value:
3786  * 0 on success and -1 on failure.
3787  */
3788
3789 static int s2io_bist_test(nic_t * sp, uint64_t * data)
3790 {
3791         u8 bist = 0;
3792         int cnt = 0, ret = -1;
3793
3794         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3795         bist |= PCI_BIST_START;
3796         pci_write_config_word(sp->pdev, PCI_BIST, bist);
3797
3798         while (cnt < 20) {
3799                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3800                 if (!(bist & PCI_BIST_START)) {
3801                         *data = (bist & PCI_BIST_CODE_MASK);
3802                         ret = 0;
3803                         break;
3804                 }
3805                 msleep(100);
3806                 cnt++;
3807         }
3808
3809         return ret;
3810 }
3811
3812 /**
3813  * s2io-link_test - verifies the link state of the nic
3814  * @sp ; private member of the device structure, which is a pointer to the
3815  * s2io_nic structure.
3816  * @data: variable that returns the result of each of the test conducted by
3817  * the driver.
3818  * Description:
3819  * The function verifies the link state of the NIC and updates the input
3820  * argument 'data' appropriately.
3821  * Return value:
3822  * 0 on success.
3823  */
3824
3825 static int s2io_link_test(nic_t * sp, uint64_t * data)
3826 {
3827         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3828         u64 val64;
3829
3830         val64 = readq(&bar0->adapter_status);
3831         if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
3832                 *data = 1;
3833
3834         return 0;
3835 }
3836
3837 /**
3838  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
3839  * @sp - private member of the device structure, which is a pointer to the
3840  * s2io_nic structure.
3841  * @data - variable that returns the result of each of the test
3842  * conducted by the driver.
3843  * Description:
3844  *  This is one of the offline test that tests the read and write
3845  *  access to the RldRam chip on the NIC.
3846  * Return value:
3847  *  0 on success.
3848  */
3849
3850 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
3851 {
3852         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3853         u64 val64;
3854         int cnt, iteration = 0, test_pass = 0;
3855
3856         val64 = readq(&bar0->adapter_control);
3857         val64 &= ~ADAPTER_ECC_EN;
3858         writeq(val64, &bar0->adapter_control);
3859
3860         val64 = readq(&bar0->mc_rldram_test_ctrl);
3861         val64 |= MC_RLDRAM_TEST_MODE;
3862         writeq(val64, &bar0->mc_rldram_test_ctrl);
3863
3864         val64 = readq(&bar0->mc_rldram_mrs);
3865         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
3866         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3867
3868         val64 |= MC_RLDRAM_MRS_ENABLE;
3869         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3870
3871         while (iteration < 2) {
3872                 val64 = 0x55555555aaaa0000ULL;
3873                 if (iteration == 1) {
3874                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
3875                 }
3876                 writeq(val64, &bar0->mc_rldram_test_d0);
3877
3878                 val64 = 0xaaaa5a5555550000ULL;
3879                 if (iteration == 1) {
3880                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
3881                 }
3882                 writeq(val64, &bar0->mc_rldram_test_d1);
3883
3884                 val64 = 0x55aaaaaaaa5a0000ULL;
3885                 if (iteration == 1) {
3886                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
3887                 }
3888                 writeq(val64, &bar0->mc_rldram_test_d2);
3889
3890                 val64 = (u64) (0x0000003fffff0000ULL);
3891                 writeq(val64, &bar0->mc_rldram_test_add);
3892
3893
3894                 val64 = MC_RLDRAM_TEST_MODE;
3895                 writeq(val64, &bar0->mc_rldram_test_ctrl);
3896
3897                 val64 |=
3898                     MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
3899                     MC_RLDRAM_TEST_GO;
3900                 writeq(val64, &bar0->mc_rldram_test_ctrl);
3901
3902                 for (cnt = 0; cnt < 5; cnt++) {
3903                         val64 = readq(&bar0->mc_rldram_test_ctrl);
3904                         if (val64 & MC_RLDRAM_TEST_DONE)
3905                                 break;
3906                         msleep(200);
3907                 }
3908
3909                 if (cnt == 5)
3910                         break;
3911
3912                 val64 = MC_RLDRAM_TEST_MODE;
3913                 writeq(val64, &bar0->mc_rldram_test_ctrl);
3914
3915                 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
3916                 writeq(val64, &bar0->mc_rldram_test_ctrl);
3917
3918                 for (cnt = 0; cnt < 5; cnt++) {
3919                         val64 = readq(&bar0->mc_rldram_test_ctrl);
3920                         if (val64 & MC_RLDRAM_TEST_DONE)
3921                                 break;
3922                         msleep(500);
3923                 }
3924
3925                 if (cnt == 5)
3926                         break;
3927
3928                 val64 = readq(&bar0->mc_rldram_test_ctrl);
3929                 if (val64 & MC_RLDRAM_TEST_PASS)
3930                         test_pass = 1;
3931
3932                 iteration++;
3933         }
3934
3935         if (!test_pass)
3936                 *data = 1;
3937         else
3938                 *data = 0;
3939
3940         return 0;
3941 }
3942
3943 /**
3944  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
3945  *  @sp : private member of the device structure, which is a pointer to the
3946  *  s2io_nic structure.
3947  *  @ethtest : pointer to a ethtool command specific structure that will be
3948  *  returned to the user.
3949  *  @data : variable that returns the result of each of the test
3950  * conducted by the driver.
3951  * Description:
3952  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
3953  *  the health of the card.
3954  * Return value:
3955  *  void
3956  */
3957
3958 static void s2io_ethtool_test(struct net_device *dev,
3959                               struct ethtool_test *ethtest,
3960                               uint64_t * data)
3961 {
3962         nic_t *sp = dev->priv;
3963         int orig_state = netif_running(sp->dev);
3964
3965         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
3966                 /* Offline Tests. */
3967                 if (orig_state)
3968                         s2io_close(sp->dev);
3969
3970                 if (s2io_register_test(sp, &data[0]))
3971                         ethtest->flags |= ETH_TEST_FL_FAILED;
3972
3973                 s2io_reset(sp);
3974
3975                 if (s2io_rldram_test(sp, &data[3]))
3976                         ethtest->flags |= ETH_TEST_FL_FAILED;
3977
3978                 s2io_reset(sp);
3979
3980                 if (s2io_eeprom_test(sp, &data[1]))
3981                         ethtest->flags |= ETH_TEST_FL_FAILED;
3982
3983                 if (s2io_bist_test(sp, &data[4]))
3984                         ethtest->flags |= ETH_TEST_FL_FAILED;
3985
3986                 if (orig_state)
3987                         s2io_open(sp->dev);
3988
3989                 data[2] = 0;
3990         } else {
3991                 /* Online Tests. */
3992                 if (!orig_state) {
3993                         DBG_PRINT(ERR_DBG,
3994                                   "%s: is not up, cannot run test\n",
3995                                   dev->name);
3996                         data[0] = -1;
3997                         data[1] = -1;
3998                         data[2] = -1;
3999                         data[3] = -1;
4000                         data[4] = -1;
4001                 }
4002
4003                 if (s2io_link_test(sp, &data[2]))
4004                         ethtest->flags |= ETH_TEST_FL_FAILED;
4005
4006                 data[0] = 0;
4007                 data[1] = 0;
4008                 data[3] = 0;
4009                 data[4] = 0;
4010         }
4011 }
4012
4013 static void s2io_get_ethtool_stats(struct net_device *dev,
4014                                    struct ethtool_stats *estats,
4015                                    u64 * tmp_stats)
4016 {
4017         int i = 0;
4018         nic_t *sp = dev->priv;
4019         StatInfo_t *stat_info = sp->mac_control.stats_info;
4020
4021         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms);
4022         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets);
4023         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
4024         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms);
4025         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms);
4026         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
4027         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms);
4028         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
4029         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip);
4030         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip);
4031         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp);
4032         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp);
4033         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
4034         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp);
4035         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms);
4036         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets);
4037         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4038         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
4039         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4040         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms);
4041         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4042         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4043         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
4044         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms);
4045         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms);
4046         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms);
4047         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms);
4048         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms);
4049         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip);
4050         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4051         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
4052         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip);
4053         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp);
4054         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
4055         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp);
4056         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp);
4057         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt);
4058         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip);
4059         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
4060 }
4061
4062 int s2io_ethtool_get_regs_len(struct net_device *dev)
4063 {
4064         return (XENA_REG_SPACE);
4065 }
4066
4067
4068 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
4069 {
4070         nic_t *sp = dev->priv;
4071
4072         return (sp->rx_csum);
4073 }
4074 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
4075 {
4076         nic_t *sp = dev->priv;
4077
4078         if (data)
4079                 sp->rx_csum = 1;
4080         else
4081                 sp->rx_csum = 0;
4082
4083         return 0;
4084 }
4085 int s2io_get_eeprom_len(struct net_device *dev)
4086 {
4087         return (XENA_EEPROM_SPACE);
4088 }
4089
4090 int s2io_ethtool_self_test_count(struct net_device *dev)
4091 {
4092         return (S2IO_TEST_LEN);
4093 }
4094 void s2io_ethtool_get_strings(struct net_device *dev,
4095                               u32 stringset, u8 * data)
4096 {
4097         switch (stringset) {
4098         case ETH_SS_TEST:
4099                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
4100                 break;
4101         case ETH_SS_STATS:
4102                 memcpy(data, &ethtool_stats_keys,
4103                        sizeof(ethtool_stats_keys));
4104         }
4105 }
4106 static int s2io_ethtool_get_stats_count(struct net_device *dev)
4107 {
4108         return (S2IO_STAT_LEN);
4109 }
4110
4111 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4112 {
4113         if (data)
4114                 dev->features |= NETIF_F_IP_CSUM;
4115         else
4116                 dev->features &= ~NETIF_F_IP_CSUM;
4117
4118         return 0;
4119 }
4120
4121
4122 static struct ethtool_ops netdev_ethtool_ops = {
4123         .get_settings = s2io_ethtool_gset,
4124         .set_settings = s2io_ethtool_sset,
4125         .get_drvinfo = s2io_ethtool_gdrvinfo,
4126         .get_regs_len = s2io_ethtool_get_regs_len,
4127         .get_regs = s2io_ethtool_gregs,
4128         .get_link = ethtool_op_get_link,
4129         .get_eeprom_len = s2io_get_eeprom_len,
4130         .get_eeprom = s2io_ethtool_geeprom,
4131         .set_eeprom = s2io_ethtool_seeprom,
4132         .get_pauseparam = s2io_ethtool_getpause_data,
4133         .set_pauseparam = s2io_ethtool_setpause_data,
4134         .get_rx_csum = s2io_ethtool_get_rx_csum,
4135         .set_rx_csum = s2io_ethtool_set_rx_csum,
4136         .get_tx_csum = ethtool_op_get_tx_csum,
4137         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4138         .get_sg = ethtool_op_get_sg,
4139         .set_sg = ethtool_op_set_sg,
4140 #ifdef NETIF_F_TSO
4141         .get_tso = ethtool_op_get_tso,
4142         .set_tso = ethtool_op_set_tso,
4143 #endif
4144         .self_test_count = s2io_ethtool_self_test_count,
4145         .self_test = s2io_ethtool_test,
4146         .get_strings = s2io_ethtool_get_strings,
4147         .phys_id = s2io_ethtool_idnic,
4148         .get_stats_count = s2io_ethtool_get_stats_count,
4149         .get_ethtool_stats = s2io_get_ethtool_stats
4150 };
4151
4152 /**
4153  *  s2io_ioctl - Entry point for the Ioctl
4154  *  @dev :  Device pointer.
4155  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
4156  *  a proprietary structure used to pass information to the driver.
4157  *  @cmd :  This is used to distinguish between the different commands that
4158  *  can be passed to the IOCTL functions.
4159  *  Description:
4160  *  Currently there are no special functionality supported in IOCTL, hence
4161  *  function always return EOPNOTSUPPORTED
4162  */
4163
4164 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4165 {
4166         return -EOPNOTSUPP;
4167 }
4168
4169 /**
4170  *  s2io_change_mtu - entry point to change MTU size for the device.
4171  *   @dev : device pointer.
4172  *   @new_mtu : the new MTU size for the device.
4173  *   Description: A driver entry point to change MTU size for the device.
4174  *   Before changing the MTU the device must be stopped.
4175  *  Return value:
4176  *   0 on success and an appropriate (-)ve integer as defined in errno.h
4177  *   file on failure.
4178  */
4179
4180 int s2io_change_mtu(struct net_device *dev, int new_mtu)
4181 {
4182         nic_t *sp = dev->priv;
4183         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4184         register u64 val64;
4185
4186         if (netif_running(dev)) {
4187                 DBG_PRINT(ERR_DBG, "%s: Must be stopped to ", dev->name);
4188                 DBG_PRINT(ERR_DBG, "change its MTU\n");
4189                 return -EBUSY;
4190         }
4191
4192         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4193                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4194                           dev->name);
4195                 return -EPERM;
4196         }
4197
4198         /* Set the new MTU into the PYLD register of the NIC */
4199         val64 = new_mtu;
4200         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4201
4202         dev->mtu = new_mtu;
4203
4204         return 0;
4205 }
4206
4207 /**
4208  *  s2io_tasklet - Bottom half of the ISR.
4209  *  @dev_adr : address of the device structure in dma_addr_t format.
4210  *  Description:
4211  *  This is the tasklet or the bottom half of the ISR. This is
4212  *  an extension of the ISR which is scheduled by the scheduler to be run
4213  *  when the load on the CPU is low. All low priority tasks of the ISR can
4214  *  be pushed into the tasklet. For now the tasklet is used only to
4215  *  replenish the Rx buffers in the Rx buffer descriptors.
4216  *  Return value:
4217  *  void.
4218  */
4219
4220 static void s2io_tasklet(unsigned long dev_addr)
4221 {
4222         struct net_device *dev = (struct net_device *) dev_addr;
4223         nic_t *sp = dev->priv;
4224         int i, ret;
4225         mac_info_t *mac_control;
4226         struct config_param *config;
4227
4228         mac_control = &sp->mac_control;
4229         config = &sp->config;
4230
4231         if (!TASKLET_IN_USE) {
4232                 for (i = 0; i < config->rx_ring_num; i++) {
4233                         ret = fill_rx_buffers(sp, i);
4234                         if (ret == -ENOMEM) {
4235                                 DBG_PRINT(ERR_DBG, "%s: Out of ",
4236                                           dev->name);
4237                                 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4238                                 break;
4239                         } else if (ret == -EFILL) {
4240                                 DBG_PRINT(ERR_DBG,
4241                                           "%s: Rx Ring %d is full\n",
4242                                           dev->name, i);
4243                                 break;
4244                         }
4245                 }
4246                 clear_bit(0, (&sp->tasklet_status));
4247         }
4248 }
4249
4250 /**
4251  * s2io_set_link - Set the LInk status
4252  * @data: long pointer to device private structue
4253  * Description: Sets the link status for the adapter
4254  */
4255
4256 static void s2io_set_link(unsigned long data)
4257 {
4258         nic_t *nic = (nic_t *) data;
4259         struct net_device *dev = nic->dev;
4260         XENA_dev_config_t __iomem *bar0 = nic->bar0;
4261         register u64 val64;
4262         u16 subid;
4263
4264         if (test_and_set_bit(0, &(nic->link_state))) {
4265                 /* The card is being reset, no point doing anything */
4266                 return;
4267         }
4268
4269         subid = nic->pdev->subsystem_device;
4270         /*
4271          * Allow a small delay for the NICs self initiated
4272          * cleanup to complete.
4273          */
4274         msleep(100);
4275
4276         val64 = readq(&bar0->adapter_status);
4277         if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
4278                 if (LINK_IS_UP(val64)) {
4279                         val64 = readq(&bar0->adapter_control);
4280                         val64 |= ADAPTER_CNTL_EN;
4281                         writeq(val64, &bar0->adapter_control);
4282                         if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4283                                 val64 = readq(&bar0->gpio_control);
4284                                 val64 |= GPIO_CTRL_GPIO_0;
4285                                 writeq(val64, &bar0->gpio_control);
4286                                 val64 = readq(&bar0->gpio_control);
4287                         } else {
4288                                 val64 |= ADAPTER_LED_ON;
4289                                 writeq(val64, &bar0->adapter_control);
4290                         }
4291                         val64 = readq(&bar0->adapter_status);
4292                         if (!LINK_IS_UP(val64)) {
4293                                 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4294                                 DBG_PRINT(ERR_DBG, " Link down");
4295                                 DBG_PRINT(ERR_DBG, "after ");
4296                                 DBG_PRINT(ERR_DBG, "enabling ");
4297                                 DBG_PRINT(ERR_DBG, "device \n");
4298                         }
4299                         if (nic->device_enabled_once == FALSE) {
4300                                 nic->device_enabled_once = TRUE;
4301                         }
4302                         s2io_link(nic, LINK_UP);
4303                 } else {
4304                         if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4305                                 val64 = readq(&bar0->gpio_control);
4306                                 val64 &= ~GPIO_CTRL_GPIO_0;
4307                                 writeq(val64, &bar0->gpio_control);
4308                                 val64 = readq(&bar0->gpio_control);
4309                         }
4310                         s2io_link(nic, LINK_DOWN);
4311                 }
4312         } else {                /* NIC is not Quiescent. */
4313                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4314                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4315                 netif_stop_queue(dev);
4316         }
4317         clear_bit(0, &(nic->link_state));
4318 }
4319
4320 static void s2io_card_down(nic_t * sp)
4321 {
4322         int cnt = 0;
4323         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4324         unsigned long flags;
4325         register u64 val64 = 0;
4326
4327         /* If s2io_set_link task is executing, wait till it completes. */
4328         while (test_and_set_bit(0, &(sp->link_state))) {
4329                 msleep(50);
4330         }
4331         atomic_set(&sp->card_state, CARD_DOWN);
4332
4333         /* disable Tx and Rx traffic on the NIC */
4334         stop_nic(sp);
4335
4336         /* Kill tasklet. */
4337         tasklet_kill(&sp->task);
4338
4339         /* Check if the device is Quiescent and then Reset the NIC */
4340         do {
4341                 val64 = readq(&bar0->adapter_status);
4342                 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
4343                         break;
4344                 }
4345
4346                 msleep(50);
4347                 cnt++;
4348                 if (cnt == 10) {
4349                         DBG_PRINT(ERR_DBG,
4350                                   "s2io_close:Device not Quiescent ");
4351                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4352                                   (unsigned long long) val64);
4353                         break;
4354                 }
4355         } while (1);
4356         spin_lock_irqsave(&sp->tx_lock, flags);
4357         s2io_reset(sp);
4358
4359         /* Free all unused Tx and Rx buffers */
4360         free_tx_buffers(sp);
4361         free_rx_buffers(sp);
4362
4363         spin_unlock_irqrestore(&sp->tx_lock, flags);
4364         clear_bit(0, &(sp->link_state));
4365 }
4366
4367 static int s2io_card_up(nic_t * sp)
4368 {
4369         int i, ret;
4370         mac_info_t *mac_control;
4371         struct config_param *config;
4372         struct net_device *dev = (struct net_device *) sp->dev;
4373
4374         /* Initialize the H/W I/O registers */
4375         if (init_nic(sp) != 0) {
4376                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4377                           dev->name);
4378                 return -ENODEV;
4379         }
4380
4381         /*
4382          * Initializing the Rx buffers. For now we are considering only 1
4383          * Rx ring and initializing buffers into 30 Rx blocks
4384          */
4385         mac_control = &sp->mac_control;
4386         config = &sp->config;
4387
4388         for (i = 0; i < config->rx_ring_num; i++) {
4389                 if ((ret = fill_rx_buffers(sp, i))) {
4390                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4391                                   dev->name);
4392                         s2io_reset(sp);
4393                         free_rx_buffers(sp);
4394                         return -ENOMEM;
4395                 }
4396                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4397                           atomic_read(&sp->rx_bufs_left[i]));
4398         }
4399
4400         /* Setting its receive mode */
4401         s2io_set_multicast(dev);
4402
4403         /* Enable tasklet for the device */
4404         tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4405
4406         /* Enable Rx Traffic and interrupts on the NIC */
4407         if (start_nic(sp)) {
4408                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4409                 tasklet_kill(&sp->task);
4410                 s2io_reset(sp);
4411                 free_irq(dev->irq, dev);
4412                 free_rx_buffers(sp);
4413                 return -ENODEV;
4414         }
4415
4416         atomic_set(&sp->card_state, CARD_UP);
4417         return 0;
4418 }
4419
4420 /**
4421  * s2io_restart_nic - Resets the NIC.
4422  * @data : long pointer to the device private structure
4423  * Description:
4424  * This function is scheduled to be run by the s2io_tx_watchdog
4425  * function after 0.5 secs to reset the NIC. The idea is to reduce
4426  * the run time of the watch dog routine which is run holding a
4427  * spin lock.
4428  */
4429
4430 static void s2io_restart_nic(unsigned long data)
4431 {
4432         struct net_device *dev = (struct net_device *) data;
4433         nic_t *sp = dev->priv;
4434
4435         s2io_card_down(sp);
4436         if (s2io_card_up(sp)) {
4437                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4438                           dev->name);
4439         }
4440         netif_wake_queue(dev);
4441         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4442                   dev->name);
4443
4444 }
4445
4446 /**
4447  *  s2io_tx_watchdog - Watchdog for transmit side.
4448  *  @dev : Pointer to net device structure
4449  *  Description:
4450  *  This function is triggered if the Tx Queue is stopped
4451  *  for a pre-defined amount of time when the Interface is still up.
4452  *  If the Interface is jammed in such a situation, the hardware is
4453  *  reset (by s2io_close) and restarted again (by s2io_open) to
4454  *  overcome any problem that might have been caused in the hardware.
4455  *  Return value:
4456  *  void
4457  */
4458
4459 static void s2io_tx_watchdog(struct net_device *dev)
4460 {
4461         nic_t *sp = dev->priv;
4462
4463         if (netif_carrier_ok(dev)) {
4464                 schedule_work(&sp->rst_timer_task);
4465         }
4466 }
4467
4468 /**
4469  *   rx_osm_handler - To perform some OS related operations on SKB.
4470  *   @sp: private member of the device structure,pointer to s2io_nic structure.
4471  *   @skb : the socket buffer pointer.
4472  *   @len : length of the packet
4473  *   @cksum : FCS checksum of the frame.
4474  *   @ring_no : the ring from which this RxD was extracted.
4475  *   Description:
4476  *   This function is called by the Tx interrupt serivce routine to perform
4477  *   some OS related operations on the SKB before passing it to the upper
4478  *   layers. It mainly checks if the checksum is OK, if so adds it to the
4479  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
4480  *   to the upper layer. If the checksum is wrong, it increments the Rx
4481  *   packet error count, frees the SKB and returns error.
4482  *   Return value:
4483  *   SUCCESS on success and -1 on failure.
4484  */
4485 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
4486 {
4487         nic_t *sp = ring_data->nic;
4488         struct net_device *dev = (struct net_device *) sp->dev;
4489         struct sk_buff *skb = (struct sk_buff *)
4490                 ((unsigned long) rxdp->Host_Control);
4491         int ring_no = ring_data->ring_no;
4492         u16 l3_csum, l4_csum;
4493 #ifdef CONFIG_2BUFF_MODE
4494         int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4495         int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4496         int get_block = ring_data->rx_curr_get_info.block_index;
4497         int get_off = ring_data->rx_curr_get_info.offset;
4498         buffAdd_t *ba = &ring_data->ba[get_block][get_off];
4499         unsigned char *buff;
4500 #else
4501         u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
4502 #endif
4503         skb->dev = dev;
4504         if (rxdp->Control_1 & RXD_T_CODE) {
4505                 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4506                 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4507                           dev->name, err);
4508         }
4509
4510         /* Updating statistics */
4511         rxdp->Host_Control = 0;
4512         sp->rx_pkt_count++;
4513         sp->stats.rx_packets++;
4514 #ifndef CONFIG_2BUFF_MODE
4515         sp->stats.rx_bytes += len;
4516 #else
4517         sp->stats.rx_bytes += buf0_len + buf2_len;
4518 #endif
4519
4520 #ifndef CONFIG_2BUFF_MODE
4521         skb_put(skb, len);
4522 #else
4523         buff = skb_push(skb, buf0_len);
4524         memcpy(buff, ba->ba_0, buf0_len);
4525         skb_put(skb, buf2_len);
4526 #endif
4527
4528         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
4529             (sp->rx_csum)) {
4530                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4531                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4532                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4533                         /*
4534                          * NIC verifies if the Checksum of the received
4535                          * frame is Ok or not and accordingly returns
4536                          * a flag in the RxD.
4537                          */
4538                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4539                 } else {
4540                         /*
4541                          * Packet with erroneous checksum, let the
4542                          * upper layers deal with it.
4543                          */
4544                         skb->ip_summed = CHECKSUM_NONE;
4545                 }
4546         } else {
4547                 skb->ip_summed = CHECKSUM_NONE;
4548         }
4549
4550         skb->protocol = eth_type_trans(skb, dev);
4551 #ifdef CONFIG_S2IO_NAPI
4552         netif_receive_skb(skb);
4553 #else
4554         netif_rx(skb);
4555 #endif
4556         dev->last_rx = jiffies;
4557         atomic_dec(&sp->rx_bufs_left[ring_no]);
4558         return SUCCESS;
4559 }
4560
4561 /**
4562  *  s2io_link - stops/starts the Tx queue.
4563  *  @sp : private member of the device structure, which is a pointer to the
4564  *  s2io_nic structure.
4565  *  @link : inidicates whether link is UP/DOWN.
4566  *  Description:
4567  *  This function stops/starts the Tx queue depending on whether the link
4568  *  status of the NIC is is down or up. This is called by the Alarm
4569  *  interrupt handler whenever a link change interrupt comes up.
4570  *  Return value:
4571  *  void.
4572  */
4573
4574 void s2io_link(nic_t * sp, int link)
4575 {
4576         struct net_device *dev = (struct net_device *) sp->dev;
4577
4578         if (link != sp->last_link_state) {
4579                 if (link == LINK_DOWN) {
4580                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
4581                         netif_carrier_off(dev);
4582                 } else {
4583                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
4584                         netif_carrier_on(dev);
4585                 }
4586         }
4587         sp->last_link_state = link;
4588 }
4589
4590 /**
4591  *  get_xena_rev_id - to identify revision ID of xena.
4592  *  @pdev : PCI Dev structure
4593  *  Description:
4594  *  Function to identify the Revision ID of xena.
4595  *  Return value:
4596  *  returns the revision ID of the device.
4597  */
4598
4599 int get_xena_rev_id(struct pci_dev *pdev)
4600 {
4601         u8 id = 0;
4602         int ret;
4603         ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
4604         return id;
4605 }
4606
4607 /**
4608  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
4609  *  @sp : private member of the device structure, which is a pointer to the
4610  *  s2io_nic structure.
4611  *  Description:
4612  *  This function initializes a few of the PCI and PCI-X configuration registers
4613  *  with recommended values.
4614  *  Return value:
4615  *  void
4616  */
4617
4618 static void s2io_init_pci(nic_t * sp)
4619 {
4620         u16 pci_cmd = 0, pcix_cmd = 0;
4621
4622         /* Enable Data Parity Error Recovery in PCI-X command register. */
4623         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4624                              &(pcix_cmd));
4625         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4626                               (pcix_cmd | 1));
4627         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4628                              &(pcix_cmd));
4629
4630         /* Set the PErr Response bit in PCI command register. */
4631         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4632         pci_write_config_word(sp->pdev, PCI_COMMAND,
4633                               (pci_cmd | PCI_COMMAND_PARITY));
4634         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4635
4636         /* Forcibly disabling relaxed ordering capability of the card. */
4637         pcix_cmd &= 0xfffd;
4638         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4639                               pcix_cmd);
4640         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4641                              &(pcix_cmd));
4642 }
4643
4644 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
4645 MODULE_LICENSE("GPL");
4646 module_param(tx_fifo_num, int, 0);
4647 module_param(rx_ring_num, int, 0);
4648 module_param_array(tx_fifo_len, uint, NULL, 0);
4649 module_param_array(rx_ring_sz, uint, NULL, 0);
4650 module_param(Stats_refresh_time, int, 0);
4651 module_param_array(rts_frm_len, uint, NULL, 0);
4652 module_param(use_continuous_tx_intrs, int, 1);
4653 module_param(rmac_pause_time, int, 0);
4654 module_param(mc_pause_threshold_q0q3, int, 0);
4655 module_param(mc_pause_threshold_q4q7, int, 0);
4656 module_param(shared_splits, int, 0);
4657 module_param(tmac_util_period, int, 0);
4658 module_param(rmac_util_period, int, 0);
4659 #ifndef CONFIG_S2IO_NAPI
4660 module_param(indicate_max_pkts, int, 0);
4661 #endif
4662
4663 /**
4664  *  s2io_init_nic - Initialization of the adapter .
4665  *  @pdev : structure containing the PCI related information of the device.
4666  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
4667  *  Description:
4668  *  The function initializes an adapter identified by the pci_dec structure.
4669  *  All OS related initialization including memory and device structure and
4670  *  initlaization of the device private variable is done. Also the swapper
4671  *  control register is initialized to enable read and write into the I/O
4672  *  registers of the device.
4673  *  Return value:
4674  *  returns 0 on success and negative on failure.
4675  */
4676
4677 static int __devinit
4678 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4679 {
4680         nic_t *sp;
4681         struct net_device *dev;
4682         int i, j, ret;
4683         int dma_flag = FALSE;
4684         u32 mac_up, mac_down;
4685         u64 val64 = 0, tmp64 = 0;
4686         XENA_dev_config_t __iomem *bar0 = NULL;
4687         u16 subid;
4688         mac_info_t *mac_control;
4689         struct config_param *config;
4690
4691 #ifdef CONFIG_S2IO_NAPI
4692         DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
4693 #endif
4694
4695         if ((ret = pci_enable_device(pdev))) {
4696                 DBG_PRINT(ERR_DBG,
4697                           "s2io_init_nic: pci_enable_device failed\n");
4698                 return ret;
4699         }
4700
4701         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
4702                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
4703                 dma_flag = TRUE;
4704                 if (pci_set_consistent_dma_mask
4705                     (pdev, DMA_64BIT_MASK)) {
4706                         DBG_PRINT(ERR_DBG,
4707                                   "Unable to obtain 64bit DMA for \
4708                                         consistent allocations\n");
4709                         pci_disable_device(pdev);
4710                         return -ENOMEM;
4711                 }
4712         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
4713                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
4714         } else {
4715                 pci_disable_device(pdev);
4716                 return -ENOMEM;
4717         }
4718
4719         if (pci_request_regions(pdev, s2io_driver_name)) {
4720                 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
4721                     pci_disable_device(pdev);
4722                 return -ENODEV;
4723         }
4724
4725         dev = alloc_etherdev(sizeof(nic_t));
4726         if (dev == NULL) {
4727                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
4728                 pci_disable_device(pdev);
4729                 pci_release_regions(pdev);
4730                 return -ENODEV;
4731         }
4732
4733         pci_set_master(pdev);
4734         pci_set_drvdata(pdev, dev);
4735         SET_MODULE_OWNER(dev);
4736         SET_NETDEV_DEV(dev, &pdev->dev);
4737
4738         /*  Private member variable initialized to s2io NIC structure */
4739         sp = dev->priv;
4740         memset(sp, 0, sizeof(nic_t));
4741         sp->dev = dev;
4742         sp->pdev = pdev;
4743         sp->high_dma_flag = dma_flag;
4744         sp->device_enabled_once = FALSE;
4745
4746         /* Initialize some PCI/PCI-X fields of the NIC. */
4747         s2io_init_pci(sp);
4748
4749         /*
4750          * Setting the device configuration parameters.
4751          * Most of these parameters can be specified by the user during
4752          * module insertion as they are module loadable parameters. If
4753          * these parameters are not not specified during load time, they
4754          * are initialized with default values.
4755          */
4756         mac_control = &sp->mac_control;
4757         config = &sp->config;
4758
4759         /* Tx side parameters. */
4760         tx_fifo_len[0] = DEFAULT_FIFO_LEN;      /* Default value. */
4761         config->tx_fifo_num = tx_fifo_num;
4762         for (i = 0; i < MAX_TX_FIFOS; i++) {
4763                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
4764                 config->tx_cfg[i].fifo_priority = i;
4765         }
4766
4767         /* mapping the QoS priority to the configured fifos */
4768         for (i = 0; i < MAX_TX_FIFOS; i++)
4769                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
4770
4771         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
4772         for (i = 0; i < config->tx_fifo_num; i++) {
4773                 config->tx_cfg[i].f_no_snoop =
4774                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
4775                 if (config->tx_cfg[i].fifo_len < 65) {
4776                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
4777                         break;
4778                 }
4779         }
4780         config->max_txds = MAX_SKB_FRAGS;
4781
4782         /* Rx side parameters. */
4783         rx_ring_sz[0] = SMALL_BLK_CNT;  /* Default value. */
4784         config->rx_ring_num = rx_ring_num;
4785         for (i = 0; i < MAX_RX_RINGS; i++) {
4786                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
4787                     (MAX_RXDS_PER_BLOCK + 1);
4788                 config->rx_cfg[i].ring_priority = i;
4789         }
4790
4791         for (i = 0; i < rx_ring_num; i++) {
4792                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
4793                 config->rx_cfg[i].f_no_snoop =
4794                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
4795         }
4796
4797         /*  Setting Mac Control parameters */
4798         mac_control->rmac_pause_time = rmac_pause_time;
4799         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
4800         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
4801
4802
4803         /* Initialize Ring buffer parameters. */
4804         for (i = 0; i < config->rx_ring_num; i++)
4805                 atomic_set(&sp->rx_bufs_left[i], 0);
4806
4807         /*  initialize the shared memory used by the NIC and the host */
4808         if (init_shared_mem(sp)) {
4809                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
4810                           dev->name);
4811                 ret = -ENOMEM;
4812                 goto mem_alloc_failed;
4813         }
4814
4815         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
4816                                      pci_resource_len(pdev, 0));
4817         if (!sp->bar0) {
4818                 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
4819                           dev->name);
4820                 ret = -ENOMEM;
4821                 goto bar0_remap_failed;
4822         }
4823
4824         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
4825                                      pci_resource_len(pdev, 2));
4826         if (!sp->bar1) {
4827                 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
4828                           dev->name);
4829                 ret = -ENOMEM;
4830                 goto bar1_remap_failed;
4831         }
4832
4833         dev->irq = pdev->irq;
4834         dev->base_addr = (unsigned long) sp->bar0;
4835
4836         /* Initializing the BAR1 address as the start of the FIFO pointer. */
4837         for (j = 0; j < MAX_TX_FIFOS; j++) {
4838                 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
4839                     (sp->bar1 + (j * 0x00020000));
4840         }
4841
4842         /*  Driver entry points */
4843         dev->open = &s2io_open;
4844         dev->stop = &s2io_close;
4845         dev->hard_start_xmit = &s2io_xmit;
4846         dev->get_stats = &s2io_get_stats;
4847         dev->set_multicast_list = &s2io_set_multicast;
4848         dev->do_ioctl = &s2io_ioctl;
4849         dev->change_mtu = &s2io_change_mtu;
4850         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
4851
4852         /*
4853          * will use eth_mac_addr() for  dev->set_mac_address
4854          * mac address will be set every time dev->open() is called
4855          */
4856 #if defined(CONFIG_S2IO_NAPI)
4857         dev->poll = s2io_poll;
4858         dev->weight = 32;
4859 #endif
4860
4861         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
4862         if (sp->high_dma_flag == TRUE)
4863                 dev->features |= NETIF_F_HIGHDMA;
4864 #ifdef NETIF_F_TSO
4865         dev->features |= NETIF_F_TSO;
4866 #endif
4867
4868         dev->tx_timeout = &s2io_tx_watchdog;
4869         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
4870         INIT_WORK(&sp->rst_timer_task,
4871                   (void (*)(void *)) s2io_restart_nic, dev);
4872         INIT_WORK(&sp->set_link_task,
4873                   (void (*)(void *)) s2io_set_link, sp);
4874
4875         pci_save_state(sp->pdev);
4876
4877         /* Setting swapper control on the NIC, for proper reset operation */
4878         if (s2io_set_swapper(sp)) {
4879                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
4880                           dev->name);
4881                 ret = -EAGAIN;
4882                 goto set_swap_failed;
4883         }
4884
4885         /*
4886          * Fix for all "FFs" MAC address problems observed on
4887          * Alpha platforms
4888          */
4889         fix_mac_address(sp);
4890         s2io_reset(sp);
4891
4892         /*
4893          * MAC address initialization.
4894          * For now only one mac address will be read and used.
4895          */
4896         bar0 = sp->bar0;
4897         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4898             RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
4899         writeq(val64, &bar0->rmac_addr_cmd_mem);
4900         wait_for_cmd_complete(sp);
4901
4902         tmp64 = readq(&bar0->rmac_addr_data0_mem);
4903         mac_down = (u32) tmp64;
4904         mac_up = (u32) (tmp64 >> 32);
4905
4906         memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4907
4908         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
4909         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
4910         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
4911         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
4912         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
4913         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
4914
4915         DBG_PRINT(INIT_DBG,
4916                   "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
4917                   sp->def_mac_addr[0].mac_addr[0],
4918                   sp->def_mac_addr[0].mac_addr[1],
4919                   sp->def_mac_addr[0].mac_addr[2],
4920                   sp->def_mac_addr[0].mac_addr[3],
4921                   sp->def_mac_addr[0].mac_addr[4],
4922                   sp->def_mac_addr[0].mac_addr[5]);
4923
4924         /*  Set the factory defined MAC address initially   */
4925         dev->addr_len = ETH_ALEN;
4926         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
4927
4928         /*
4929          * Initialize the tasklet status and link state flags
4930          * and the card statte parameter
4931          */
4932         atomic_set(&(sp->card_state), 0);
4933         sp->tasklet_status = 0;
4934         sp->link_state = 0;
4935
4936         /* Initialize spinlocks */
4937         spin_lock_init(&sp->tx_lock);
4938 #ifndef CONFIG_S2IO_NAPI
4939         spin_lock_init(&sp->put_lock);
4940 #endif
4941
4942         /*
4943          * SXE-002: Configure link and activity LED to init state
4944          * on driver load.
4945          */
4946         subid = sp->pdev->subsystem_device;
4947         if ((subid & 0xFF) >= 0x07) {
4948                 val64 = readq(&bar0->gpio_control);
4949                 val64 |= 0x0000800000000000ULL;
4950                 writeq(val64, &bar0->gpio_control);
4951                 val64 = 0x0411040400000000ULL;
4952                 writeq(val64, (void __iomem *) bar0 + 0x2700);
4953                 val64 = readq(&bar0->gpio_control);
4954         }
4955
4956         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
4957
4958         if (register_netdev(dev)) {
4959                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
4960                 ret = -ENODEV;
4961                 goto register_failed;
4962         }
4963
4964         /*
4965          * Make Link state as off at this point, when the Link change
4966          * interrupt comes the state will be automatically changed to
4967          * the right state.
4968          */
4969         netif_carrier_off(dev);
4970         sp->last_link_state = LINK_DOWN;
4971
4972         return 0;
4973
4974       register_failed:
4975       set_swap_failed:
4976         iounmap(sp->bar1);
4977       bar1_remap_failed:
4978         iounmap(sp->bar0);
4979       bar0_remap_failed:
4980       mem_alloc_failed:
4981         free_shared_mem(sp);
4982         pci_disable_device(pdev);
4983         pci_release_regions(pdev);
4984         pci_set_drvdata(pdev, NULL);
4985         free_netdev(dev);
4986
4987         return ret;
4988 }
4989
4990 /**
4991  * s2io_rem_nic - Free the PCI device
4992  * @pdev: structure containing the PCI related information of the device.
4993  * Description: This function is called by the Pci subsystem to release a
4994  * PCI device and free up all resource held up by the device. This could
4995  * be in response to a Hot plug event or when the driver is to be removed
4996  * from memory.
4997  */
4998
4999 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
5000 {
5001         struct net_device *dev =
5002             (struct net_device *) pci_get_drvdata(pdev);
5003         nic_t *sp;
5004
5005         if (dev == NULL) {
5006                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
5007                 return;
5008         }
5009
5010         sp = dev->priv;
5011         unregister_netdev(dev);
5012
5013         free_shared_mem(sp);
5014         iounmap(sp->bar0);
5015         iounmap(sp->bar1);
5016         pci_disable_device(pdev);
5017         pci_release_regions(pdev);
5018         pci_set_drvdata(pdev, NULL);
5019         free_netdev(dev);
5020 }
5021
5022 /**
5023  * s2io_starter - Entry point for the driver
5024  * Description: This function is the entry point for the driver. It verifies
5025  * the module loadable parameters and initializes PCI configuration space.
5026  */
5027
5028 int __init s2io_starter(void)
5029 {
5030         return pci_module_init(&s2io_driver);
5031 }
5032
5033 /**
5034  * s2io_closer - Cleanup routine for the driver
5035  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
5036  */
5037
5038 void s2io_closer(void)
5039 {
5040         pci_unregister_driver(&s2io_driver);
5041         DBG_PRINT(INIT_DBG, "cleanup done\n");
5042 }
5043
5044 module_init(s2io_starter);
5045 module_exit(s2io_closer);