]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/s2io.c
[IPV4]: Broken memory allocation in fib_trie
[net-next-2.6.git] / drivers / net / s2io.c
CommitLineData
1da177e4
LT
1/************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
26 *
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
30 * in the driver.
31 * rx_ring_len: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO.
36 * in PCI Configuration space.
37 ************************************************************************/
38
39#include <linux/config.h>
40#include <linux/module.h>
41#include <linux/types.h>
42#include <linux/errno.h>
43#include <linux/ioport.h>
44#include <linux/pci.h>
1e7f0bd8 45#include <linux/dma-mapping.h>
1da177e4
LT
46#include <linux/kernel.h>
47#include <linux/netdevice.h>
48#include <linux/etherdevice.h>
49#include <linux/skbuff.h>
50#include <linux/init.h>
51#include <linux/delay.h>
52#include <linux/stddef.h>
53#include <linux/ioctl.h>
54#include <linux/timex.h>
55#include <linux/sched.h>
56#include <linux/ethtool.h>
57#include <linux/version.h>
58#include <linux/workqueue.h>
59
60#include <asm/io.h>
61#include <asm/system.h>
62#include <asm/uaccess.h>
63
64/* local include */
65#include "s2io.h"
66#include "s2io-regs.h"
67
68/* S2io Driver name & version. */
69static char s2io_driver_name[] = "s2io";
70static char s2io_driver_version[] = "Version 1.7.7.1";
71
72/*
73 * Cards with following subsystem_id have a link state indication
74 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
75 * macro below identifies these cards given the subsystem_id.
76 */
77#define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
78 (((subid >= 0x600B) && (subid <= 0x600D)) || \
79 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0
80
81#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
82 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
83#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
84#define PANIC 1
85#define LOW 2
86static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
87{
88 int level = 0;
89 if ((sp->pkt_cnt[ring] - rxb_size) > 16) {
90 level = LOW;
91 if ((sp->pkt_cnt[ring] - rxb_size) < MAX_RXDS_PER_BLOCK) {
92 level = PANIC;
93 }
94 }
95
96 return level;
97}
98
99/* Ethtool related variables and Macros. */
100static char s2io_gstrings[][ETH_GSTRING_LEN] = {
101 "Register test\t(offline)",
102 "Eeprom test\t(offline)",
103 "Link test\t(online)",
104 "RLDRAM test\t(offline)",
105 "BIST Test\t(offline)"
106};
107
108static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
109 {"tmac_frms"},
110 {"tmac_data_octets"},
111 {"tmac_drop_frms"},
112 {"tmac_mcst_frms"},
113 {"tmac_bcst_frms"},
114 {"tmac_pause_ctrl_frms"},
115 {"tmac_any_err_frms"},
116 {"tmac_vld_ip_octets"},
117 {"tmac_vld_ip"},
118 {"tmac_drop_ip"},
119 {"tmac_icmp"},
120 {"tmac_rst_tcp"},
121 {"tmac_tcp"},
122 {"tmac_udp"},
123 {"rmac_vld_frms"},
124 {"rmac_data_octets"},
125 {"rmac_fcs_err_frms"},
126 {"rmac_drop_frms"},
127 {"rmac_vld_mcst_frms"},
128 {"rmac_vld_bcst_frms"},
129 {"rmac_in_rng_len_err_frms"},
130 {"rmac_long_frms"},
131 {"rmac_pause_ctrl_frms"},
132 {"rmac_discarded_frms"},
133 {"rmac_usized_frms"},
134 {"rmac_osized_frms"},
135 {"rmac_frag_frms"},
136 {"rmac_jabber_frms"},
137 {"rmac_ip"},
138 {"rmac_ip_octets"},
139 {"rmac_hdr_err_ip"},
140 {"rmac_drop_ip"},
141 {"rmac_icmp"},
142 {"rmac_tcp"},
143 {"rmac_udp"},
144 {"rmac_err_drp_udp"},
145 {"rmac_pause_cnt"},
146 {"rmac_accepted_ip"},
147 {"rmac_err_tcp"},
148};
149
150#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
151#define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
152
153#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
154#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
155
156
157/*
158 * Constants to be programmed into the Xena's registers, to configure
159 * the XAUI.
160 */
161
162#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
163#define END_SIGN 0x0
164
165static u64 default_mdio_cfg[] = {
166 /* Reset PMA PLL */
167 0xC001010000000000ULL, 0xC0010100000000E0ULL,
168 0xC0010100008000E4ULL,
169 /* Remove Reset from PMA PLL */
170 0xC001010000000000ULL, 0xC0010100000000E0ULL,
171 0xC0010100000000E4ULL,
172 END_SIGN
173};
174
175static u64 default_dtx_cfg[] = {
176 0x8000051500000000ULL, 0x80000515000000E0ULL,
177 0x80000515D93500E4ULL, 0x8001051500000000ULL,
178 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
179 0x8002051500000000ULL, 0x80020515000000E0ULL,
180 0x80020515F21000E4ULL,
181 /* Set PADLOOPBACKN */
182 0x8002051500000000ULL, 0x80020515000000E0ULL,
183 0x80020515B20000E4ULL, 0x8003051500000000ULL,
184 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
185 0x8004051500000000ULL, 0x80040515000000E0ULL,
186 0x80040515B20000E4ULL, 0x8005051500000000ULL,
187 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
188 SWITCH_SIGN,
189 /* Remove PADLOOPBACKN */
190 0x8002051500000000ULL, 0x80020515000000E0ULL,
191 0x80020515F20000E4ULL, 0x8003051500000000ULL,
192 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
193 0x8004051500000000ULL, 0x80040515000000E0ULL,
194 0x80040515F20000E4ULL, 0x8005051500000000ULL,
195 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
196 END_SIGN
197};
198
199
200/*
201 * Constants for Fixing the MacAddress problem seen mostly on
202 * Alpha machines.
203 */
204static u64 fix_mac[] = {
205 0x0060000000000000ULL, 0x0060600000000000ULL,
206 0x0040600000000000ULL, 0x0000600000000000ULL,
207 0x0020600000000000ULL, 0x0060600000000000ULL,
208 0x0020600000000000ULL, 0x0060600000000000ULL,
209 0x0020600000000000ULL, 0x0060600000000000ULL,
210 0x0020600000000000ULL, 0x0060600000000000ULL,
211 0x0020600000000000ULL, 0x0060600000000000ULL,
212 0x0020600000000000ULL, 0x0060600000000000ULL,
213 0x0020600000000000ULL, 0x0060600000000000ULL,
214 0x0020600000000000ULL, 0x0060600000000000ULL,
215 0x0020600000000000ULL, 0x0060600000000000ULL,
216 0x0020600000000000ULL, 0x0060600000000000ULL,
217 0x0020600000000000ULL, 0x0000600000000000ULL,
218 0x0040600000000000ULL, 0x0060600000000000ULL,
219 END_SIGN
220};
221
222/* Module Loadable parameters. */
223static unsigned int tx_fifo_num = 1;
224static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
225 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
226static unsigned int rx_ring_num = 1;
227static unsigned int rx_ring_sz[MAX_RX_RINGS] =
228 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
229static unsigned int Stats_refresh_time = 4;
230static unsigned int rmac_pause_time = 65535;
231static unsigned int mc_pause_threshold_q0q3 = 187;
232static unsigned int mc_pause_threshold_q4q7 = 187;
233static unsigned int shared_splits;
234static unsigned int tmac_util_period = 5;
235static unsigned int rmac_util_period = 5;
236#ifndef CONFIG_S2IO_NAPI
237static unsigned int indicate_max_pkts;
238#endif
239
240/*
241 * S2IO device table.
242 * This table lists all the devices that this driver supports.
243 */
244static struct pci_device_id s2io_tbl[] __devinitdata = {
245 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
246 PCI_ANY_ID, PCI_ANY_ID},
247 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
248 PCI_ANY_ID, PCI_ANY_ID},
249 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
250 PCI_ANY_ID, PCI_ANY_ID},
251 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
252 PCI_ANY_ID, PCI_ANY_ID},
253 {0,}
254};
255
256MODULE_DEVICE_TABLE(pci, s2io_tbl);
257
258static struct pci_driver s2io_driver = {
259 .name = "S2IO",
260 .id_table = s2io_tbl,
261 .probe = s2io_init_nic,
262 .remove = __devexit_p(s2io_rem_nic),
263};
264
265/* A simplifier macro used both by init and free shared_mem Fns(). */
266#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
267
268/**
269 * init_shared_mem - Allocation and Initialization of Memory
270 * @nic: Device private variable.
271 * Description: The function allocates all the memory areas shared
272 * between the NIC and the driver. This includes Tx descriptors,
273 * Rx descriptors and the statistics block.
274 */
275
276static int init_shared_mem(struct s2io_nic *nic)
277{
278 u32 size;
279 void *tmp_v_addr, *tmp_v_addr_next;
280 dma_addr_t tmp_p_addr, tmp_p_addr_next;
281 RxD_block_t *pre_rxd_blk = NULL;
282 int i, j, blk_cnt;
283 int lst_size, lst_per_page;
284 struct net_device *dev = nic->dev;
285#ifdef CONFIG_2BUFF_MODE
286 unsigned long tmp;
287 buffAdd_t *ba;
288#endif
289
290 mac_info_t *mac_control;
291 struct config_param *config;
292
293 mac_control = &nic->mac_control;
294 config = &nic->config;
295
296
297 /* Allocation and initialization of TXDLs in FIOFs */
298 size = 0;
299 for (i = 0; i < config->tx_fifo_num; i++) {
300 size += config->tx_cfg[i].fifo_len;
301 }
302 if (size > MAX_AVAILABLE_TXDS) {
303 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
304 dev->name);
305 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
306 DBG_PRINT(ERR_DBG, "that can be used\n");
307 return FAILURE;
308 }
309
310 lst_size = (sizeof(TxD_t) * config->max_txds);
311 lst_per_page = PAGE_SIZE / lst_size;
312
313 for (i = 0; i < config->tx_fifo_num; i++) {
314 int fifo_len = config->tx_cfg[i].fifo_len;
315 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
316 nic->list_info[i] = kmalloc(list_holder_size, GFP_KERNEL);
317 if (!nic->list_info[i]) {
318 DBG_PRINT(ERR_DBG,
319 "Malloc failed for list_info\n");
320 return -ENOMEM;
321 }
322 memset(nic->list_info[i], 0, list_holder_size);
323 }
324 for (i = 0; i < config->tx_fifo_num; i++) {
325 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
326 lst_per_page);
327 mac_control->tx_curr_put_info[i].offset = 0;
328 mac_control->tx_curr_put_info[i].fifo_len =
329 config->tx_cfg[i].fifo_len - 1;
330 mac_control->tx_curr_get_info[i].offset = 0;
331 mac_control->tx_curr_get_info[i].fifo_len =
332 config->tx_cfg[i].fifo_len - 1;
333 for (j = 0; j < page_num; j++) {
334 int k = 0;
335 dma_addr_t tmp_p;
336 void *tmp_v;
337 tmp_v = pci_alloc_consistent(nic->pdev,
338 PAGE_SIZE, &tmp_p);
339 if (!tmp_v) {
340 DBG_PRINT(ERR_DBG,
341 "pci_alloc_consistent ");
342 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
343 return -ENOMEM;
344 }
345 while (k < lst_per_page) {
346 int l = (j * lst_per_page) + k;
347 if (l == config->tx_cfg[i].fifo_len)
348 goto end_txd_alloc;
349 nic->list_info[i][l].list_virt_addr =
350 tmp_v + (k * lst_size);
351 nic->list_info[i][l].list_phy_addr =
352 tmp_p + (k * lst_size);
353 k++;
354 }
355 }
356 }
357 end_txd_alloc:
358
359 /* Allocation and initialization of RXDs in Rings */
360 size = 0;
361 for (i = 0; i < config->rx_ring_num; i++) {
362 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
363 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
364 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
365 i);
366 DBG_PRINT(ERR_DBG, "RxDs per Block");
367 return FAILURE;
368 }
369 size += config->rx_cfg[i].num_rxd;
370 nic->block_count[i] =
371 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
372 nic->pkt_cnt[i] =
373 config->rx_cfg[i].num_rxd - nic->block_count[i];
374 }
375
376 for (i = 0; i < config->rx_ring_num; i++) {
377 mac_control->rx_curr_get_info[i].block_index = 0;
378 mac_control->rx_curr_get_info[i].offset = 0;
379 mac_control->rx_curr_get_info[i].ring_len =
380 config->rx_cfg[i].num_rxd - 1;
381 mac_control->rx_curr_put_info[i].block_index = 0;
382 mac_control->rx_curr_put_info[i].offset = 0;
383 mac_control->rx_curr_put_info[i].ring_len =
384 config->rx_cfg[i].num_rxd - 1;
385 blk_cnt =
386 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
387 /* Allocating all the Rx blocks */
388 for (j = 0; j < blk_cnt; j++) {
389#ifndef CONFIG_2BUFF_MODE
390 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
391#else
392 size = SIZE_OF_BLOCK;
393#endif
394 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
395 &tmp_p_addr);
396 if (tmp_v_addr == NULL) {
397 /*
398 * In case of failure, free_shared_mem()
399 * is called, which should free any
400 * memory that was alloced till the
401 * failure happened.
402 */
403 nic->rx_blocks[i][j].block_virt_addr =
404 tmp_v_addr;
405 return -ENOMEM;
406 }
407 memset(tmp_v_addr, 0, size);
408 nic->rx_blocks[i][j].block_virt_addr = tmp_v_addr;
409 nic->rx_blocks[i][j].block_dma_addr = tmp_p_addr;
410 }
411 /* Interlinking all Rx Blocks */
412 for (j = 0; j < blk_cnt; j++) {
413 tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr;
414 tmp_v_addr_next =
415 nic->rx_blocks[i][(j + 1) %
416 blk_cnt].block_virt_addr;
417 tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr;
418 tmp_p_addr_next =
419 nic->rx_blocks[i][(j + 1) %
420 blk_cnt].block_dma_addr;
421
422 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
423 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
424 * marker.
425 */
426#ifndef CONFIG_2BUFF_MODE
427 pre_rxd_blk->reserved_2_pNext_RxD_block =
428 (unsigned long) tmp_v_addr_next;
429#endif
430 pre_rxd_blk->pNext_RxD_Blk_physical =
431 (u64) tmp_p_addr_next;
432 }
433 }
434
435#ifdef CONFIG_2BUFF_MODE
436 /*
437 * Allocation of Storages for buffer addresses in 2BUFF mode
438 * and the buffers as well.
439 */
440 for (i = 0; i < config->rx_ring_num; i++) {
441 blk_cnt =
442 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
443 nic->ba[i] = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
444 GFP_KERNEL);
445 if (!nic->ba[i])
446 return -ENOMEM;
447 for (j = 0; j < blk_cnt; j++) {
448 int k = 0;
449 nic->ba[i][j] = kmalloc((sizeof(buffAdd_t) *
450 (MAX_RXDS_PER_BLOCK + 1)),
451 GFP_KERNEL);
452 if (!nic->ba[i][j])
453 return -ENOMEM;
454 while (k != MAX_RXDS_PER_BLOCK) {
455 ba = &nic->ba[i][j][k];
456
457 ba->ba_0_org = kmalloc
458 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
459 if (!ba->ba_0_org)
460 return -ENOMEM;
461 tmp = (unsigned long) ba->ba_0_org;
462 tmp += ALIGN_SIZE;
463 tmp &= ~((unsigned long) ALIGN_SIZE);
464 ba->ba_0 = (void *) tmp;
465
466 ba->ba_1_org = kmalloc
467 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
468 if (!ba->ba_1_org)
469 return -ENOMEM;
470 tmp = (unsigned long) ba->ba_1_org;
471 tmp += ALIGN_SIZE;
472 tmp &= ~((unsigned long) ALIGN_SIZE);
473 ba->ba_1 = (void *) tmp;
474 k++;
475 }
476 }
477 }
478#endif
479
480 /* Allocation and initialization of Statistics block */
481 size = sizeof(StatInfo_t);
482 mac_control->stats_mem = pci_alloc_consistent
483 (nic->pdev, size, &mac_control->stats_mem_phy);
484
485 if (!mac_control->stats_mem) {
486 /*
487 * In case of failure, free_shared_mem() is called, which
488 * should free any memory that was alloced till the
489 * failure happened.
490 */
491 return -ENOMEM;
492 }
493 mac_control->stats_mem_sz = size;
494
495 tmp_v_addr = mac_control->stats_mem;
496 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
497 memset(tmp_v_addr, 0, size);
498
499 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
500 (unsigned long long) tmp_p_addr);
501
502 return SUCCESS;
503}
504
505/**
506 * free_shared_mem - Free the allocated Memory
507 * @nic: Device private variable.
508 * Description: This function is to free all memory locations allocated by
509 * the init_shared_mem() function and return it to the kernel.
510 */
511
512static void free_shared_mem(struct s2io_nic *nic)
513{
514 int i, j, blk_cnt, size;
515 void *tmp_v_addr;
516 dma_addr_t tmp_p_addr;
517 mac_info_t *mac_control;
518 struct config_param *config;
519 int lst_size, lst_per_page;
520
521
522 if (!nic)
523 return;
524
525 mac_control = &nic->mac_control;
526 config = &nic->config;
527
528 lst_size = (sizeof(TxD_t) * config->max_txds);
529 lst_per_page = PAGE_SIZE / lst_size;
530
531 for (i = 0; i < config->tx_fifo_num; i++) {
532 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
533 lst_per_page);
534 for (j = 0; j < page_num; j++) {
535 int mem_blks = (j * lst_per_page);
536 if (!nic->list_info[i][mem_blks].list_virt_addr)
537 break;
538 pci_free_consistent(nic->pdev, PAGE_SIZE,
539 nic->list_info[i][mem_blks].
540 list_virt_addr,
541 nic->list_info[i][mem_blks].
542 list_phy_addr);
543 }
544 kfree(nic->list_info[i]);
545 }
546
547#ifndef CONFIG_2BUFF_MODE
548 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
549#else
550 size = SIZE_OF_BLOCK;
551#endif
552 for (i = 0; i < config->rx_ring_num; i++) {
553 blk_cnt = nic->block_count[i];
554 for (j = 0; j < blk_cnt; j++) {
555 tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr;
556 tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr;
557 if (tmp_v_addr == NULL)
558 break;
559 pci_free_consistent(nic->pdev, size,
560 tmp_v_addr, tmp_p_addr);
561 }
562 }
563
564#ifdef CONFIG_2BUFF_MODE
565 /* Freeing buffer storage addresses in 2BUFF mode. */
566 for (i = 0; i < config->rx_ring_num; i++) {
567 blk_cnt =
568 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
569 if (!nic->ba[i])
570 goto end_free;
571 for (j = 0; j < blk_cnt; j++) {
572 int k = 0;
573 if (!nic->ba[i][j]) {
574 kfree(nic->ba[i]);
575 goto end_free;
576 }
577 while (k != MAX_RXDS_PER_BLOCK) {
578 buffAdd_t *ba = &nic->ba[i][j][k];
579 if (!ba || !ba->ba_0_org || !ba->ba_1_org)
580 {
581 kfree(nic->ba[i]);
582 kfree(nic->ba[i][j]);
583 if(ba->ba_0_org)
584 kfree(ba->ba_0_org);
585 if(ba->ba_1_org)
586 kfree(ba->ba_1_org);
587 goto end_free;
588 }
589 kfree(ba->ba_0_org);
590 kfree(ba->ba_1_org);
591 k++;
592 }
593 kfree(nic->ba[i][j]);
594 }
595 kfree(nic->ba[i]);
596 }
597end_free:
598#endif
599
600 if (mac_control->stats_mem) {
601 pci_free_consistent(nic->pdev,
602 mac_control->stats_mem_sz,
603 mac_control->stats_mem,
604 mac_control->stats_mem_phy);
605 }
606}
607
608/**
609 * init_nic - Initialization of hardware
610 * @nic: device peivate variable
611 * Description: The function sequentially configures every block
612 * of the H/W from their reset values.
613 * Return Value: SUCCESS on success and
614 * '-1' on failure (endian settings incorrect).
615 */
616
617static int init_nic(struct s2io_nic *nic)
618{
619 XENA_dev_config_t __iomem *bar0 = nic->bar0;
620 struct net_device *dev = nic->dev;
621 register u64 val64 = 0;
622 void __iomem *add;
623 u32 time;
624 int i, j;
625 mac_info_t *mac_control;
626 struct config_param *config;
627 int mdio_cnt = 0, dtx_cnt = 0;
628 unsigned long long mem_share;
629
630 mac_control = &nic->mac_control;
631 config = &nic->config;
632
633 /* Initialize swapper control register */
634 if (s2io_set_swapper(nic)) {
635 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
636 return -1;
637 }
638
639 /* Remove XGXS from reset state */
640 val64 = 0;
641 writeq(val64, &bar0->sw_reset);
642 val64 = readq(&bar0->sw_reset);
643 msleep(500);
644
645 /* Enable Receiving broadcasts */
646 add = &bar0->mac_cfg;
647 val64 = readq(&bar0->mac_cfg);
648 val64 |= MAC_RMAC_BCAST_ENABLE;
649 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
650 writel((u32) val64, add);
651 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
652 writel((u32) (val64 >> 32), (add + 4));
653
654 /* Read registers in all blocks */
655 val64 = readq(&bar0->mac_int_mask);
656 val64 = readq(&bar0->mc_int_mask);
657 val64 = readq(&bar0->xgxs_int_mask);
658
659 /* Set MTU */
660 val64 = dev->mtu;
661 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
662
663 /*
664 * Configuring the XAUI Interface of Xena.
665 * ***************************************
666 * To Configure the Xena's XAUI, one has to write a series
667 * of 64 bit values into two registers in a particular
668 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
669 * which will be defined in the array of configuration values
670 * (default_dtx_cfg & default_mdio_cfg) at appropriate places
671 * to switch writing from one regsiter to another. We continue
672 * writing these values until we encounter the 'END_SIGN' macro.
673 * For example, After making a series of 21 writes into
674 * dtx_control register the 'SWITCH_SIGN' appears and hence we
675 * start writing into mdio_control until we encounter END_SIGN.
676 */
677 while (1) {
678 dtx_cfg:
679 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
680 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
681 dtx_cnt++;
682 goto mdio_cfg;
683 }
684 SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
685 &bar0->dtx_control, UF);
686 val64 = readq(&bar0->dtx_control);
687 dtx_cnt++;
688 }
689 mdio_cfg:
690 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
691 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
692 mdio_cnt++;
693 goto dtx_cfg;
694 }
695 SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
696 &bar0->mdio_control, UF);
697 val64 = readq(&bar0->mdio_control);
698 mdio_cnt++;
699 }
700 if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
701 (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
702 break;
703 } else {
704 goto dtx_cfg;
705 }
706 }
707
708 /* Tx DMA Initialization */
709 val64 = 0;
710 writeq(val64, &bar0->tx_fifo_partition_0);
711 writeq(val64, &bar0->tx_fifo_partition_1);
712 writeq(val64, &bar0->tx_fifo_partition_2);
713 writeq(val64, &bar0->tx_fifo_partition_3);
714
715
716 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
717 val64 |=
718 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
719 13) | vBIT(config->tx_cfg[i].fifo_priority,
720 ((i * 32) + 5), 3);
721
722 if (i == (config->tx_fifo_num - 1)) {
723 if (i % 2 == 0)
724 i++;
725 }
726
727 switch (i) {
728 case 1:
729 writeq(val64, &bar0->tx_fifo_partition_0);
730 val64 = 0;
731 break;
732 case 3:
733 writeq(val64, &bar0->tx_fifo_partition_1);
734 val64 = 0;
735 break;
736 case 5:
737 writeq(val64, &bar0->tx_fifo_partition_2);
738 val64 = 0;
739 break;
740 case 7:
741 writeq(val64, &bar0->tx_fifo_partition_3);
742 break;
743 }
744 }
745
746 /* Enable Tx FIFO partition 0. */
747 val64 = readq(&bar0->tx_fifo_partition_0);
748 val64 |= BIT(0); /* To enable the FIFO partition. */
749 writeq(val64, &bar0->tx_fifo_partition_0);
750
751 val64 = readq(&bar0->tx_fifo_partition_0);
752 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
753 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
754
755 /*
756 * Initialization of Tx_PA_CONFIG register to ignore packet
757 * integrity checking.
758 */
759 val64 = readq(&bar0->tx_pa_cfg);
760 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
761 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
762 writeq(val64, &bar0->tx_pa_cfg);
763
764 /* Rx DMA intialization. */
765 val64 = 0;
766 for (i = 0; i < config->rx_ring_num; i++) {
767 val64 |=
768 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
769 3);
770 }
771 writeq(val64, &bar0->rx_queue_priority);
772
773 /*
774 * Allocating equal share of memory to all the
775 * configured Rings.
776 */
777 val64 = 0;
778 for (i = 0; i < config->rx_ring_num; i++) {
779 switch (i) {
780 case 0:
781 mem_share = (64 / config->rx_ring_num +
782 64 % config->rx_ring_num);
783 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
784 continue;
785 case 1:
786 mem_share = (64 / config->rx_ring_num);
787 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
788 continue;
789 case 2:
790 mem_share = (64 / config->rx_ring_num);
791 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
792 continue;
793 case 3:
794 mem_share = (64 / config->rx_ring_num);
795 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
796 continue;
797 case 4:
798 mem_share = (64 / config->rx_ring_num);
799 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
800 continue;
801 case 5:
802 mem_share = (64 / config->rx_ring_num);
803 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
804 continue;
805 case 6:
806 mem_share = (64 / config->rx_ring_num);
807 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
808 continue;
809 case 7:
810 mem_share = (64 / config->rx_ring_num);
811 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
812 continue;
813 }
814 }
815 writeq(val64, &bar0->rx_queue_cfg);
816
817 /*
818 * Initializing the Tx round robin registers to 0.
819 * Filling Tx and Rx round robin registers as per the
820 * number of FIFOs and Rings is still TODO.
821 */
822 writeq(0, &bar0->tx_w_round_robin_0);
823 writeq(0, &bar0->tx_w_round_robin_1);
824 writeq(0, &bar0->tx_w_round_robin_2);
825 writeq(0, &bar0->tx_w_round_robin_3);
826 writeq(0, &bar0->tx_w_round_robin_4);
827
828 /*
829 * TODO
830 * Disable Rx steering. Hard coding all packets be steered to
831 * Queue 0 for now.
832 */
833 val64 = 0x8080808080808080ULL;
834 writeq(val64, &bar0->rts_qos_steering);
835
836 /* UDP Fix */
837 val64 = 0;
838 for (i = 1; i < 8; i++)
839 writeq(val64, &bar0->rts_frm_len_n[i]);
840
841 /* Set rts_frm_len register for fifo 0 */
842 writeq(MAC_RTS_FRM_LEN_SET(dev->mtu + 22),
843 &bar0->rts_frm_len_n[0]);
844
845 /* Enable statistics */
846 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
847 val64 = SET_UPDT_PERIOD(Stats_refresh_time) |
848 STAT_CFG_STAT_RO | STAT_CFG_STAT_EN;
849 writeq(val64, &bar0->stat_cfg);
850
851 /*
852 * Initializing the sampling rate for the device to calculate the
853 * bandwidth utilization.
854 */
855 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
856 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
857 writeq(val64, &bar0->mac_link_util);
858
859
860 /*
861 * Initializing the Transmit and Receive Traffic Interrupt
862 * Scheme.
863 */
864 /* TTI Initialization. Default Tx timer gets us about
865 * 250 interrupts per sec. Continuous interrupts are enabled
866 * by default.
867 */
868 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) |
869 TTI_DATA1_MEM_TX_URNG_A(0xA) |
870 TTI_DATA1_MEM_TX_URNG_B(0x10) |
871 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN |
872 TTI_DATA1_MEM_TX_TIMER_CI_EN;
873 writeq(val64, &bar0->tti_data1_mem);
874
875 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
876 TTI_DATA2_MEM_TX_UFC_B(0x20) |
877 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
878 writeq(val64, &bar0->tti_data2_mem);
879
880 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
881 writeq(val64, &bar0->tti_command_mem);
882
883 /*
884 * Once the operation completes, the Strobe bit of the command
885 * register will be reset. We poll for this particular condition
886 * We wait for a maximum of 500ms for the operation to complete,
887 * if it's not complete by then we return error.
888 */
889 time = 0;
890 while (TRUE) {
891 val64 = readq(&bar0->tti_command_mem);
892 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
893 break;
894 }
895 if (time > 10) {
896 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
897 dev->name);
898 return -1;
899 }
900 msleep(50);
901 time++;
902 }
903
904 /* RTI Initialization */
905 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) |
906 RTI_DATA1_MEM_RX_URNG_A(0xA) |
907 RTI_DATA1_MEM_RX_URNG_B(0x10) |
908 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
909
910 writeq(val64, &bar0->rti_data1_mem);
911
912 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
913 RTI_DATA2_MEM_RX_UFC_B(0x2) |
914 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
915 writeq(val64, &bar0->rti_data2_mem);
916
917 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
918 writeq(val64, &bar0->rti_command_mem);
919
920 /*
921 * Once the operation completes, the Strobe bit of the command
922 * register will be reset. We poll for this particular condition
923 * We wait for a maximum of 500ms for the operation to complete,
924 * if it's not complete by then we return error.
925 */
926 time = 0;
927 while (TRUE) {
928 val64 = readq(&bar0->rti_command_mem);
929 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
930 break;
931 }
932 if (time > 10) {
933 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
934 dev->name);
935 return -1;
936 }
937 time++;
938 msleep(50);
939 }
940
941 /*
942 * Initializing proper values as Pause threshold into all
943 * the 8 Queues on Rx side.
944 */
945 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
946 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
947
948 /* Disable RMAC PAD STRIPPING */
949 add = &bar0->mac_cfg;
950 val64 = readq(&bar0->mac_cfg);
951 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
952 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
953 writel((u32) (val64), add);
954 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
955 writel((u32) (val64 >> 32), (add + 4));
956 val64 = readq(&bar0->mac_cfg);
957
958 /*
959 * Set the time value to be inserted in the pause frame
960 * generated by xena.
961 */
962 val64 = readq(&bar0->rmac_pause_cfg);
963 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
964 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
965 writeq(val64, &bar0->rmac_pause_cfg);
966
967 /*
968 * Set the Threshold Limit for Generating the pause frame
969 * If the amount of data in any Queue exceeds ratio of
970 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
971 * pause frame is generated
972 */
973 val64 = 0;
974 for (i = 0; i < 4; i++) {
975 val64 |=
976 (((u64) 0xFF00 | nic->mac_control.
977 mc_pause_threshold_q0q3)
978 << (i * 2 * 8));
979 }
980 writeq(val64, &bar0->mc_pause_thresh_q0q3);
981
982 val64 = 0;
983 for (i = 0; i < 4; i++) {
984 val64 |=
985 (((u64) 0xFF00 | nic->mac_control.
986 mc_pause_threshold_q4q7)
987 << (i * 2 * 8));
988 }
989 writeq(val64, &bar0->mc_pause_thresh_q4q7);
990
991 /*
992 * TxDMA will stop Read request if the number of read split has
993 * exceeded the limit pointed by shared_splits
994 */
995 val64 = readq(&bar0->pic_control);
996 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
997 writeq(val64, &bar0->pic_control);
998
999 return SUCCESS;
1000}
1001
1002/**
1003 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1004 * @nic: device private variable,
1005 * @mask: A mask indicating which Intr block must be modified and,
1006 * @flag: A flag indicating whether to enable or disable the Intrs.
1007 * Description: This function will either disable or enable the interrupts
1008 * depending on the flag argument. The mask argument can be used to
1009 * enable/disable any Intr block.
1010 * Return Value: NONE.
1011 */
1012
1013static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1014{
1015 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1016 register u64 val64 = 0, temp64 = 0;
1017
1018 /* Top level interrupt classification */
1019 /* PIC Interrupts */
1020 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1021 /* Enable PIC Intrs in the general intr mask register */
1022 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1023 if (flag == ENABLE_INTRS) {
1024 temp64 = readq(&bar0->general_int_mask);
1025 temp64 &= ~((u64) val64);
1026 writeq(temp64, &bar0->general_int_mask);
1027 /*
1028 * Disabled all PCIX, Flash, MDIO, IIC and GPIO
1029 * interrupts for now.
1030 * TODO
1031 */
1032 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1033 /*
1034 * No MSI Support is available presently, so TTI and
1035 * RTI interrupts are also disabled.
1036 */
1037 } else if (flag == DISABLE_INTRS) {
1038 /*
1039 * Disable PIC Intrs in the general
1040 * intr mask register
1041 */
1042 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1043 temp64 = readq(&bar0->general_int_mask);
1044 val64 |= temp64;
1045 writeq(val64, &bar0->general_int_mask);
1046 }
1047 }
1048
1049 /* DMA Interrupts */
1050 /* Enabling/Disabling Tx DMA interrupts */
1051 if (mask & TX_DMA_INTR) {
1052 /* Enable TxDMA Intrs in the general intr mask register */
1053 val64 = TXDMA_INT_M;
1054 if (flag == ENABLE_INTRS) {
1055 temp64 = readq(&bar0->general_int_mask);
1056 temp64 &= ~((u64) val64);
1057 writeq(temp64, &bar0->general_int_mask);
1058 /*
1059 * Keep all interrupts other than PFC interrupt
1060 * and PCC interrupt disabled in DMA level.
1061 */
1062 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1063 TXDMA_PCC_INT_M);
1064 writeq(val64, &bar0->txdma_int_mask);
1065 /*
1066 * Enable only the MISC error 1 interrupt in PFC block
1067 */
1068 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1069 writeq(val64, &bar0->pfc_err_mask);
1070 /*
1071 * Enable only the FB_ECC error interrupt in PCC block
1072 */
1073 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1074 writeq(val64, &bar0->pcc_err_mask);
1075 } else if (flag == DISABLE_INTRS) {
1076 /*
1077 * Disable TxDMA Intrs in the general intr mask
1078 * register
1079 */
1080 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1081 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1082 temp64 = readq(&bar0->general_int_mask);
1083 val64 |= temp64;
1084 writeq(val64, &bar0->general_int_mask);
1085 }
1086 }
1087
1088 /* Enabling/Disabling Rx DMA interrupts */
1089 if (mask & RX_DMA_INTR) {
1090 /* Enable RxDMA Intrs in the general intr mask register */
1091 val64 = RXDMA_INT_M;
1092 if (flag == ENABLE_INTRS) {
1093 temp64 = readq(&bar0->general_int_mask);
1094 temp64 &= ~((u64) val64);
1095 writeq(temp64, &bar0->general_int_mask);
1096 /*
1097 * All RxDMA block interrupts are disabled for now
1098 * TODO
1099 */
1100 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1101 } else if (flag == DISABLE_INTRS) {
1102 /*
1103 * Disable RxDMA Intrs in the general intr mask
1104 * register
1105 */
1106 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1107 temp64 = readq(&bar0->general_int_mask);
1108 val64 |= temp64;
1109 writeq(val64, &bar0->general_int_mask);
1110 }
1111 }
1112
1113 /* MAC Interrupts */
1114 /* Enabling/Disabling MAC interrupts */
1115 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1116 val64 = TXMAC_INT_M | RXMAC_INT_M;
1117 if (flag == ENABLE_INTRS) {
1118 temp64 = readq(&bar0->general_int_mask);
1119 temp64 &= ~((u64) val64);
1120 writeq(temp64, &bar0->general_int_mask);
1121 /*
1122 * All MAC block error interrupts are disabled for now
1123 * except the link status change interrupt.
1124 * TODO
1125 */
1126 val64 = MAC_INT_STATUS_RMAC_INT;
1127 temp64 = readq(&bar0->mac_int_mask);
1128 temp64 &= ~((u64) val64);
1129 writeq(temp64, &bar0->mac_int_mask);
1130
1131 val64 = readq(&bar0->mac_rmac_err_mask);
1132 val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1133 writeq(val64, &bar0->mac_rmac_err_mask);
1134 } else if (flag == DISABLE_INTRS) {
1135 /*
1136 * Disable MAC Intrs in the general intr mask register
1137 */
1138 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1139 writeq(DISABLE_ALL_INTRS,
1140 &bar0->mac_rmac_err_mask);
1141
1142 temp64 = readq(&bar0->general_int_mask);
1143 val64 |= temp64;
1144 writeq(val64, &bar0->general_int_mask);
1145 }
1146 }
1147
1148 /* XGXS Interrupts */
1149 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1150 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1151 if (flag == ENABLE_INTRS) {
1152 temp64 = readq(&bar0->general_int_mask);
1153 temp64 &= ~((u64) val64);
1154 writeq(temp64, &bar0->general_int_mask);
1155 /*
1156 * All XGXS block error interrupts are disabled for now
1157 * TODO
1158 */
1159 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1160 } else if (flag == DISABLE_INTRS) {
1161 /*
1162 * Disable MC Intrs in the general intr mask register
1163 */
1164 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1165 temp64 = readq(&bar0->general_int_mask);
1166 val64 |= temp64;
1167 writeq(val64, &bar0->general_int_mask);
1168 }
1169 }
1170
1171 /* Memory Controller(MC) interrupts */
1172 if (mask & MC_INTR) {
1173 val64 = MC_INT_M;
1174 if (flag == ENABLE_INTRS) {
1175 temp64 = readq(&bar0->general_int_mask);
1176 temp64 &= ~((u64) val64);
1177 writeq(temp64, &bar0->general_int_mask);
1178 /*
1179 * All MC block error interrupts are disabled for now
1180 * TODO
1181 */
1182 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1183 } else if (flag == DISABLE_INTRS) {
1184 /*
1185 * Disable MC Intrs in the general intr mask register
1186 */
1187 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1188 temp64 = readq(&bar0->general_int_mask);
1189 val64 |= temp64;
1190 writeq(val64, &bar0->general_int_mask);
1191 }
1192 }
1193
1194
1195 /* Tx traffic interrupts */
1196 if (mask & TX_TRAFFIC_INTR) {
1197 val64 = TXTRAFFIC_INT_M;
1198 if (flag == ENABLE_INTRS) {
1199 temp64 = readq(&bar0->general_int_mask);
1200 temp64 &= ~((u64) val64);
1201 writeq(temp64, &bar0->general_int_mask);
1202 /*
1203 * Enable all the Tx side interrupts
1204 * writing 0 Enables all 64 TX interrupt levels
1205 */
1206 writeq(0x0, &bar0->tx_traffic_mask);
1207 } else if (flag == DISABLE_INTRS) {
1208 /*
1209 * Disable Tx Traffic Intrs in the general intr mask
1210 * register.
1211 */
1212 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1213 temp64 = readq(&bar0->general_int_mask);
1214 val64 |= temp64;
1215 writeq(val64, &bar0->general_int_mask);
1216 }
1217 }
1218
1219 /* Rx traffic interrupts */
1220 if (mask & RX_TRAFFIC_INTR) {
1221 val64 = RXTRAFFIC_INT_M;
1222 if (flag == ENABLE_INTRS) {
1223 temp64 = readq(&bar0->general_int_mask);
1224 temp64 &= ~((u64) val64);
1225 writeq(temp64, &bar0->general_int_mask);
1226 /* writing 0 Enables all 8 RX interrupt levels */
1227 writeq(0x0, &bar0->rx_traffic_mask);
1228 } else if (flag == DISABLE_INTRS) {
1229 /*
1230 * Disable Rx Traffic Intrs in the general intr mask
1231 * register.
1232 */
1233 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1234 temp64 = readq(&bar0->general_int_mask);
1235 val64 |= temp64;
1236 writeq(val64, &bar0->general_int_mask);
1237 }
1238 }
1239}
1240
1241/**
1242 * verify_xena_quiescence - Checks whether the H/W is ready
1243 * @val64 : Value read from adapter status register.
1244 * @flag : indicates if the adapter enable bit was ever written once
1245 * before.
1246 * Description: Returns whether the H/W is ready to go or not. Depending
1247 * on whether adapter enable bit was written or not the comparison
1248 * differs and the calling function passes the input argument flag to
1249 * indicate this.
1250 * Return: 1 If xena is quiescence
1251 * 0 If Xena is not quiescence
1252 */
1253
1254static int verify_xena_quiescence(u64 val64, int flag)
1255{
1256 int ret = 0;
1257 u64 tmp64 = ~((u64) val64);
1258
1259 if (!
1260 (tmp64 &
1261 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1262 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1263 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1264 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1265 ADAPTER_STATUS_P_PLL_LOCK))) {
1266 if (flag == FALSE) {
1267 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1268 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1269 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1270
1271 ret = 1;
1272
1273 }
1274 } else {
1275 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1276 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1277 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1278 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1279 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1280
1281 ret = 1;
1282
1283 }
1284 }
1285 }
1286
1287 return ret;
1288}
1289
1290/**
1291 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1292 * @sp: Pointer to device specifc structure
1293 * Description :
1294 * New procedure to clear mac address reading problems on Alpha platforms
1295 *
1296 */
1297
1298static void fix_mac_address(nic_t * sp)
1299{
1300 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1301 u64 val64;
1302 int i = 0;
1303
1304 while (fix_mac[i] != END_SIGN) {
1305 writeq(fix_mac[i++], &bar0->gpio_control);
1306 val64 = readq(&bar0->gpio_control);
1307 }
1308}
1309
1310/**
1311 * start_nic - Turns the device on
1312 * @nic : device private variable.
1313 * Description:
1314 * This function actually turns the device on. Before this function is
1315 * called,all Registers are configured from their reset states
1316 * and shared memory is allocated but the NIC is still quiescent. On
1317 * calling this function, the device interrupts are cleared and the NIC is
1318 * literally switched on by writing into the adapter control register.
1319 * Return Value:
1320 * SUCCESS on success and -1 on failure.
1321 */
1322
1323static int start_nic(struct s2io_nic *nic)
1324{
1325 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1326 struct net_device *dev = nic->dev;
1327 register u64 val64 = 0;
1328 u16 interruptible, i;
1329 u16 subid;
1330 mac_info_t *mac_control;
1331 struct config_param *config;
1332
1333 mac_control = &nic->mac_control;
1334 config = &nic->config;
1335
1336 /* PRC Initialization and configuration */
1337 for (i = 0; i < config->rx_ring_num; i++) {
1338 writeq((u64) nic->rx_blocks[i][0].block_dma_addr,
1339 &bar0->prc_rxd0_n[i]);
1340
1341 val64 = readq(&bar0->prc_ctrl_n[i]);
1342#ifndef CONFIG_2BUFF_MODE
1343 val64 |= PRC_CTRL_RC_ENABLED;
1344#else
1345 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1346#endif
1347 writeq(val64, &bar0->prc_ctrl_n[i]);
1348 }
1349
1350#ifdef CONFIG_2BUFF_MODE
1351 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1352 val64 = readq(&bar0->rx_pa_cfg);
1353 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1354 writeq(val64, &bar0->rx_pa_cfg);
1355#endif
1356
1357 /*
1358 * Enabling MC-RLDRAM. After enabling the device, we timeout
1359 * for around 100ms, which is approximately the time required
1360 * for the device to be ready for operation.
1361 */
1362 val64 = readq(&bar0->mc_rldram_mrs);
1363 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1364 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1365 val64 = readq(&bar0->mc_rldram_mrs);
1366
1367 msleep(100); /* Delay by around 100 ms. */
1368
1369 /* Enabling ECC Protection. */
1370 val64 = readq(&bar0->adapter_control);
1371 val64 &= ~ADAPTER_ECC_EN;
1372 writeq(val64, &bar0->adapter_control);
1373
1374 /*
1375 * Clearing any possible Link state change interrupts that
1376 * could have popped up just before Enabling the card.
1377 */
1378 val64 = readq(&bar0->mac_rmac_err_reg);
1379 if (val64)
1380 writeq(val64, &bar0->mac_rmac_err_reg);
1381
1382 /*
1383 * Verify if the device is ready to be enabled, if so enable
1384 * it.
1385 */
1386 val64 = readq(&bar0->adapter_status);
1387 if (!verify_xena_quiescence(val64, nic->device_enabled_once)) {
1388 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1389 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1390 (unsigned long long) val64);
1391 return FAILURE;
1392 }
1393
1394 /* Enable select interrupts */
1395 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1396 RX_MAC_INTR;
1397 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1398
1399 /*
1400 * With some switches, link might be already up at this point.
1401 * Because of this weird behavior, when we enable laser,
1402 * we may not get link. We need to handle this. We cannot
1403 * figure out which switch is misbehaving. So we are forced to
1404 * make a global change.
1405 */
1406
1407 /* Enabling Laser. */
1408 val64 = readq(&bar0->adapter_control);
1409 val64 |= ADAPTER_EOI_TX_ON;
1410 writeq(val64, &bar0->adapter_control);
1411
1412 /* SXE-002: Initialize link and activity LED */
1413 subid = nic->pdev->subsystem_device;
1414 if ((subid & 0xFF) >= 0x07) {
1415 val64 = readq(&bar0->gpio_control);
1416 val64 |= 0x0000800000000000ULL;
1417 writeq(val64, &bar0->gpio_control);
1418 val64 = 0x0411040400000000ULL;
1419 writeq(val64, (void __iomem *) bar0 + 0x2700);
1420 }
1421
1422 /*
1423 * Don't see link state interrupts on certain switches, so
1424 * directly scheduling a link state task from here.
1425 */
1426 schedule_work(&nic->set_link_task);
1427
1428 /*
1429 * Here we are performing soft reset on XGXS to
1430 * force link down. Since link is already up, we will get
1431 * link state change interrupt after this reset
1432 */
1433 SPECIAL_REG_WRITE(0x80010515001E0000ULL, &bar0->dtx_control, UF);
1434 val64 = readq(&bar0->dtx_control);
1435 udelay(50);
1436 SPECIAL_REG_WRITE(0x80010515001E00E0ULL, &bar0->dtx_control, UF);
1437 val64 = readq(&bar0->dtx_control);
1438 udelay(50);
1439 SPECIAL_REG_WRITE(0x80070515001F00E4ULL, &bar0->dtx_control, UF);
1440 val64 = readq(&bar0->dtx_control);
1441 udelay(50);
1442
1443 return SUCCESS;
1444}
1445
1446/**
1447 * free_tx_buffers - Free all queued Tx buffers
1448 * @nic : device private variable.
1449 * Description:
1450 * Free all queued Tx buffers.
1451 * Return Value: void
1452*/
1453
1454static void free_tx_buffers(struct s2io_nic *nic)
1455{
1456 struct net_device *dev = nic->dev;
1457 struct sk_buff *skb;
1458 TxD_t *txdp;
1459 int i, j;
1460 mac_info_t *mac_control;
1461 struct config_param *config;
1462 int cnt = 0;
1463
1464 mac_control = &nic->mac_control;
1465 config = &nic->config;
1466
1467 for (i = 0; i < config->tx_fifo_num; i++) {
1468 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1469 txdp = (TxD_t *) nic->list_info[i][j].
1470 list_virt_addr;
1471 skb =
1472 (struct sk_buff *) ((unsigned long) txdp->
1473 Host_Control);
1474 if (skb == NULL) {
1475 memset(txdp, 0, sizeof(TxD_t));
1476 continue;
1477 }
1478 dev_kfree_skb(skb);
1479 memset(txdp, 0, sizeof(TxD_t));
1480 cnt++;
1481 }
1482 DBG_PRINT(INTR_DBG,
1483 "%s:forcibly freeing %d skbs on FIFO%d\n",
1484 dev->name, cnt, i);
1485 mac_control->tx_curr_get_info[i].offset = 0;
1486 mac_control->tx_curr_put_info[i].offset = 0;
1487 }
1488}
1489
1490/**
1491 * stop_nic - To stop the nic
1492 * @nic ; device private variable.
1493 * Description:
1494 * This function does exactly the opposite of what the start_nic()
1495 * function does. This function is called to stop the device.
1496 * Return Value:
1497 * void.
1498 */
1499
1500static void stop_nic(struct s2io_nic *nic)
1501{
1502 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1503 register u64 val64 = 0;
1504 u16 interruptible, i;
1505 mac_info_t *mac_control;
1506 struct config_param *config;
1507
1508 mac_control = &nic->mac_control;
1509 config = &nic->config;
1510
1511 /* Disable all interrupts */
1512 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1513 RX_MAC_INTR;
1514 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1515
1516 /* Disable PRCs */
1517 for (i = 0; i < config->rx_ring_num; i++) {
1518 val64 = readq(&bar0->prc_ctrl_n[i]);
1519 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
1520 writeq(val64, &bar0->prc_ctrl_n[i]);
1521 }
1522}
1523
1524/**
1525 * fill_rx_buffers - Allocates the Rx side skbs
1526 * @nic: device private variable
1527 * @ring_no: ring number
1528 * Description:
1529 * The function allocates Rx side skbs and puts the physical
1530 * address of these buffers into the RxD buffer pointers, so that the NIC
1531 * can DMA the received frame into these locations.
1532 * The NIC supports 3 receive modes, viz
1533 * 1. single buffer,
1534 * 2. three buffer and
1535 * 3. Five buffer modes.
1536 * Each mode defines how many fragments the received frame will be split
1537 * up into by the NIC. The frame is split into L3 header, L4 Header,
1538 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1539 * is split into 3 fragments. As of now only single buffer mode is
1540 * supported.
1541 * Return Value:
1542 * SUCCESS on success or an appropriate -ve value on failure.
1543 */
1544
1545static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1546{
1547 struct net_device *dev = nic->dev;
1548 struct sk_buff *skb;
1549 RxD_t *rxdp;
1550 int off, off1, size, block_no, block_no1;
1551 int offset, offset1;
1552 u32 alloc_tab = 0;
1553 u32 alloc_cnt = nic->pkt_cnt[ring_no] -
1554 atomic_read(&nic->rx_bufs_left[ring_no]);
1555 mac_info_t *mac_control;
1556 struct config_param *config;
1557#ifdef CONFIG_2BUFF_MODE
1558 RxD_t *rxdpnext;
1559 int nextblk;
1560 unsigned long tmp;
1561 buffAdd_t *ba;
1562 dma_addr_t rxdpphys;
1563#endif
1564#ifndef CONFIG_S2IO_NAPI
1565 unsigned long flags;
1566#endif
1567
1568 mac_control = &nic->mac_control;
1569 config = &nic->config;
1570
1571 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
1572 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
1573
1574 while (alloc_tab < alloc_cnt) {
1575 block_no = mac_control->rx_curr_put_info[ring_no].
1576 block_index;
1577 block_no1 = mac_control->rx_curr_get_info[ring_no].
1578 block_index;
1579 off = mac_control->rx_curr_put_info[ring_no].offset;
1580 off1 = mac_control->rx_curr_get_info[ring_no].offset;
1581#ifndef CONFIG_2BUFF_MODE
1582 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
1583 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
1584#else
1585 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
1586 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
1587#endif
1588
1589 rxdp = nic->rx_blocks[ring_no][block_no].
1590 block_virt_addr + off;
1591 if ((offset == offset1) && (rxdp->Host_Control)) {
1592 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
1593 DBG_PRINT(INTR_DBG, " info equated\n");
1594 goto end;
1595 }
1596#ifndef CONFIG_2BUFF_MODE
1597 if (rxdp->Control_1 == END_OF_BLOCK) {
1598 mac_control->rx_curr_put_info[ring_no].
1599 block_index++;
1600 mac_control->rx_curr_put_info[ring_no].
1601 block_index %= nic->block_count[ring_no];
1602 block_no = mac_control->rx_curr_put_info
1603 [ring_no].block_index;
1604 off++;
1605 off %= (MAX_RXDS_PER_BLOCK + 1);
1606 mac_control->rx_curr_put_info[ring_no].offset =
1607 off;
1608 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
1609 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
1610 dev->name, rxdp);
1611 }
1612#ifndef CONFIG_S2IO_NAPI
1613 spin_lock_irqsave(&nic->put_lock, flags);
1614 nic->put_pos[ring_no] =
1615 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
1616 spin_unlock_irqrestore(&nic->put_lock, flags);
1617#endif
1618#else
1619 if (rxdp->Host_Control == END_OF_BLOCK) {
1620 mac_control->rx_curr_put_info[ring_no].
1621 block_index++;
1622 mac_control->rx_curr_put_info[ring_no].
1623 block_index %= nic->block_count[ring_no];
1624 block_no = mac_control->rx_curr_put_info
1625 [ring_no].block_index;
1626 off = 0;
1627 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
1628 dev->name, block_no,
1629 (unsigned long long) rxdp->Control_1);
1630 mac_control->rx_curr_put_info[ring_no].offset =
1631 off;
1632 rxdp = nic->rx_blocks[ring_no][block_no].
1633 block_virt_addr;
1634 }
1635#ifndef CONFIG_S2IO_NAPI
1636 spin_lock_irqsave(&nic->put_lock, flags);
1637 nic->put_pos[ring_no] = (block_no *
1638 (MAX_RXDS_PER_BLOCK + 1)) + off;
1639 spin_unlock_irqrestore(&nic->put_lock, flags);
1640#endif
1641#endif
1642
1643#ifndef CONFIG_2BUFF_MODE
1644 if (rxdp->Control_1 & RXD_OWN_XENA)
1645#else
1646 if (rxdp->Control_2 & BIT(0))
1647#endif
1648 {
1649 mac_control->rx_curr_put_info[ring_no].
1650 offset = off;
1651 goto end;
1652 }
1653#ifdef CONFIG_2BUFF_MODE
1654 /*
1655 * RxDs Spanning cache lines will be replenished only
1656 * if the succeeding RxD is also owned by Host. It
1657 * will always be the ((8*i)+3) and ((8*i)+6)
1658 * descriptors for the 48 byte descriptor. The offending
1659 * decsriptor is of-course the 3rd descriptor.
1660 */
1661 rxdpphys = nic->rx_blocks[ring_no][block_no].
1662 block_dma_addr + (off * sizeof(RxD_t));
1663 if (((u64) (rxdpphys)) % 128 > 80) {
1664 rxdpnext = nic->rx_blocks[ring_no][block_no].
1665 block_virt_addr + (off + 1);
1666 if (rxdpnext->Host_Control == END_OF_BLOCK) {
1667 nextblk = (block_no + 1) %
1668 (nic->block_count[ring_no]);
1669 rxdpnext = nic->rx_blocks[ring_no]
1670 [nextblk].block_virt_addr;
1671 }
1672 if (rxdpnext->Control_2 & BIT(0))
1673 goto end;
1674 }
1675#endif
1676
1677#ifndef CONFIG_2BUFF_MODE
1678 skb = dev_alloc_skb(size + NET_IP_ALIGN);
1679#else
1680 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
1681#endif
1682 if (!skb) {
1683 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
1684 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
1685 return -ENOMEM;
1686 }
1687#ifndef CONFIG_2BUFF_MODE
1688 skb_reserve(skb, NET_IP_ALIGN);
1689 memset(rxdp, 0, sizeof(RxD_t));
1690 rxdp->Buffer0_ptr = pci_map_single
1691 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1692 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
1693 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
1694 rxdp->Host_Control = (unsigned long) (skb);
1695 rxdp->Control_1 |= RXD_OWN_XENA;
1696 off++;
1697 off %= (MAX_RXDS_PER_BLOCK + 1);
1698 mac_control->rx_curr_put_info[ring_no].offset = off;
1699#else
1700 ba = &nic->ba[ring_no][block_no][off];
1701 skb_reserve(skb, BUF0_LEN);
1702 tmp = (unsigned long) skb->data;
1703 tmp += ALIGN_SIZE;
1704 tmp &= ~ALIGN_SIZE;
1705 skb->data = (void *) tmp;
1706 skb->tail = (void *) tmp;
1707
1708 memset(rxdp, 0, sizeof(RxD_t));
1709 rxdp->Buffer2_ptr = pci_map_single
1710 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
1711 PCI_DMA_FROMDEVICE);
1712 rxdp->Buffer0_ptr =
1713 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
1714 PCI_DMA_FROMDEVICE);
1715 rxdp->Buffer1_ptr =
1716 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
1717 PCI_DMA_FROMDEVICE);
1718
1719 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
1720 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
1721 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
1722 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
1723 rxdp->Host_Control = (u64) ((unsigned long) (skb));
1724 rxdp->Control_1 |= RXD_OWN_XENA;
1725 off++;
1726 mac_control->rx_curr_put_info[ring_no].offset = off;
1727#endif
1728 atomic_inc(&nic->rx_bufs_left[ring_no]);
1729 alloc_tab++;
1730 }
1731
1732 end:
1733 return SUCCESS;
1734}
1735
1736/**
1737 * free_rx_buffers - Frees all Rx buffers
1738 * @sp: device private variable.
1739 * Description:
1740 * This function will free all Rx buffers allocated by host.
1741 * Return Value:
1742 * NONE.
1743 */
1744
1745static void free_rx_buffers(struct s2io_nic *sp)
1746{
1747 struct net_device *dev = sp->dev;
1748 int i, j, blk = 0, off, buf_cnt = 0;
1749 RxD_t *rxdp;
1750 struct sk_buff *skb;
1751 mac_info_t *mac_control;
1752 struct config_param *config;
1753#ifdef CONFIG_2BUFF_MODE
1754 buffAdd_t *ba;
1755#endif
1756
1757 mac_control = &sp->mac_control;
1758 config = &sp->config;
1759
1760 for (i = 0; i < config->rx_ring_num; i++) {
1761 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
1762 off = j % (MAX_RXDS_PER_BLOCK + 1);
1763 rxdp = sp->rx_blocks[i][blk].block_virt_addr + off;
1764
1765#ifndef CONFIG_2BUFF_MODE
1766 if (rxdp->Control_1 == END_OF_BLOCK) {
1767 rxdp =
1768 (RxD_t *) ((unsigned long) rxdp->
1769 Control_2);
1770 j++;
1771 blk++;
1772 }
1773#else
1774 if (rxdp->Host_Control == END_OF_BLOCK) {
1775 blk++;
1776 continue;
1777 }
1778#endif
1779
1780 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
1781 memset(rxdp, 0, sizeof(RxD_t));
1782 continue;
1783 }
1784
1785 skb =
1786 (struct sk_buff *) ((unsigned long) rxdp->
1787 Host_Control);
1788 if (skb) {
1789#ifndef CONFIG_2BUFF_MODE
1790 pci_unmap_single(sp->pdev, (dma_addr_t)
1791 rxdp->Buffer0_ptr,
1792 dev->mtu +
1793 HEADER_ETHERNET_II_802_3_SIZE
1794 + HEADER_802_2_SIZE +
1795 HEADER_SNAP_SIZE,
1796 PCI_DMA_FROMDEVICE);
1797#else
1798 ba = &sp->ba[i][blk][off];
1799 pci_unmap_single(sp->pdev, (dma_addr_t)
1800 rxdp->Buffer0_ptr,
1801 BUF0_LEN,
1802 PCI_DMA_FROMDEVICE);
1803 pci_unmap_single(sp->pdev, (dma_addr_t)
1804 rxdp->Buffer1_ptr,
1805 BUF1_LEN,
1806 PCI_DMA_FROMDEVICE);
1807 pci_unmap_single(sp->pdev, (dma_addr_t)
1808 rxdp->Buffer2_ptr,
1809 dev->mtu + BUF0_LEN + 4,
1810 PCI_DMA_FROMDEVICE);
1811#endif
1812 dev_kfree_skb(skb);
1813 atomic_dec(&sp->rx_bufs_left[i]);
1814 buf_cnt++;
1815 }
1816 memset(rxdp, 0, sizeof(RxD_t));
1817 }
1818 mac_control->rx_curr_put_info[i].block_index = 0;
1819 mac_control->rx_curr_get_info[i].block_index = 0;
1820 mac_control->rx_curr_put_info[i].offset = 0;
1821 mac_control->rx_curr_get_info[i].offset = 0;
1822 atomic_set(&sp->rx_bufs_left[i], 0);
1823 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
1824 dev->name, buf_cnt, i);
1825 }
1826}
1827
1828/**
1829 * s2io_poll - Rx interrupt handler for NAPI support
1830 * @dev : pointer to the device structure.
1831 * @budget : The number of packets that were budgeted to be processed
1832 * during one pass through the 'Poll" function.
1833 * Description:
1834 * Comes into picture only if NAPI support has been incorporated. It does
1835 * the same thing that rx_intr_handler does, but not in a interrupt context
1836 * also It will process only a given number of packets.
1837 * Return value:
1838 * 0 on success and 1 if there are No Rx packets to be processed.
1839 */
1840
1841#ifdef CONFIG_S2IO_NAPI
1842static int s2io_poll(struct net_device *dev, int *budget)
1843{
1844 nic_t *nic = dev->priv;
1845 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1846 int pkts_to_process = *budget, pkt_cnt = 0;
1847 register u64 val64 = 0;
1848 rx_curr_get_info_t get_info, put_info;
1849 int i, get_block, put_block, get_offset, put_offset, ring_bufs;
1850#ifndef CONFIG_2BUFF_MODE
1851 u16 val16, cksum;
1852#endif
1853 struct sk_buff *skb;
1854 RxD_t *rxdp;
1855 mac_info_t *mac_control;
1856 struct config_param *config;
1857#ifdef CONFIG_2BUFF_MODE
1858 buffAdd_t *ba;
1859#endif
1860
1861 mac_control = &nic->mac_control;
1862 config = &nic->config;
1863
1864 if (pkts_to_process > dev->quota)
1865 pkts_to_process = dev->quota;
1866
1867 val64 = readq(&bar0->rx_traffic_int);
1868 writeq(val64, &bar0->rx_traffic_int);
1869
1870 for (i = 0; i < config->rx_ring_num; i++) {
1871 get_info = mac_control->rx_curr_get_info[i];
1872 get_block = get_info.block_index;
1873 put_info = mac_control->rx_curr_put_info[i];
1874 put_block = put_info.block_index;
1875 ring_bufs = config->rx_cfg[i].num_rxd;
1876 rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
1877 get_info.offset;
1878#ifndef CONFIG_2BUFF_MODE
1879 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1880 get_info.offset;
1881 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
1882 put_info.offset;
1883 while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
1884 (((get_offset + 1) % ring_bufs) != put_offset)) {
1885 if (--pkts_to_process < 0) {
1886 goto no_rx;
1887 }
1888 if (rxdp->Control_1 == END_OF_BLOCK) {
1889 rxdp =
1890 (RxD_t *) ((unsigned long) rxdp->
1891 Control_2);
1892 get_info.offset++;
1893 get_info.offset %=
1894 (MAX_RXDS_PER_BLOCK + 1);
1895 get_block++;
1896 get_block %= nic->block_count[i];
1897 mac_control->rx_curr_get_info[i].
1898 offset = get_info.offset;
1899 mac_control->rx_curr_get_info[i].
1900 block_index = get_block;
1901 continue;
1902 }
1903 get_offset =
1904 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1905 get_info.offset;
1906 skb =
1907 (struct sk_buff *) ((unsigned long) rxdp->
1908 Host_Control);
1909 if (skb == NULL) {
1910 DBG_PRINT(ERR_DBG, "%s: The skb is ",
1911 dev->name);
1912 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
1913 goto no_rx;
1914 }
1915 val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
1916 val16 = (u16) (val64 >> 48);
1917 cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
1918 pci_unmap_single(nic->pdev, (dma_addr_t)
1919 rxdp->Buffer0_ptr,
1920 dev->mtu +
1921 HEADER_ETHERNET_II_802_3_SIZE +
1922 HEADER_802_2_SIZE +
1923 HEADER_SNAP_SIZE,
1924 PCI_DMA_FROMDEVICE);
1925 rx_osm_handler(nic, val16, rxdp, i);
1926 pkt_cnt++;
1927 get_info.offset++;
1928 get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
1929 rxdp =
1930 nic->rx_blocks[i][get_block].block_virt_addr +
1931 get_info.offset;
1932 mac_control->rx_curr_get_info[i].offset =
1933 get_info.offset;
1934 }
1935#else
1936 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1937 get_info.offset;
1938 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
1939 put_info.offset;
1940 while (((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
1941 !(rxdp->Control_2 & BIT(0))) &&
1942 (((get_offset + 1) % ring_bufs) != put_offset)) {
1943 if (--pkts_to_process < 0) {
1944 goto no_rx;
1945 }
1946 skb = (struct sk_buff *) ((unsigned long)
1947 rxdp->Host_Control);
1948 if (skb == NULL) {
1949 DBG_PRINT(ERR_DBG, "%s: The skb is ",
1950 dev->name);
1951 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
1952 goto no_rx;
1953 }
1954
1955 pci_unmap_single(nic->pdev, (dma_addr_t)
1956 rxdp->Buffer0_ptr,
1957 BUF0_LEN, PCI_DMA_FROMDEVICE);
1958 pci_unmap_single(nic->pdev, (dma_addr_t)
1959 rxdp->Buffer1_ptr,
1960 BUF1_LEN, PCI_DMA_FROMDEVICE);
1961 pci_unmap_single(nic->pdev, (dma_addr_t)
1962 rxdp->Buffer2_ptr,
1963 dev->mtu + BUF0_LEN + 4,
1964 PCI_DMA_FROMDEVICE);
1965 ba = &nic->ba[i][get_block][get_info.offset];
1966
1967 rx_osm_handler(nic, rxdp, i, ba);
1968
1969 get_info.offset++;
1970 mac_control->rx_curr_get_info[i].offset =
1971 get_info.offset;
1972 rxdp =
1973 nic->rx_blocks[i][get_block].block_virt_addr +
1974 get_info.offset;
1975
1976 if (get_info.offset &&
1977 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
1978 get_info.offset = 0;
1979 mac_control->rx_curr_get_info[i].
1980 offset = get_info.offset;
1981 get_block++;
1982 get_block %= nic->block_count[i];
1983 mac_control->rx_curr_get_info[i].
1984 block_index = get_block;
1985 rxdp =
1986 nic->rx_blocks[i][get_block].
1987 block_virt_addr;
1988 }
1989 get_offset =
1990 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1991 get_info.offset;
1992 pkt_cnt++;
1993 }
1994#endif
1995 }
1996 if (!pkt_cnt)
1997 pkt_cnt = 1;
1998
1999 dev->quota -= pkt_cnt;
2000 *budget -= pkt_cnt;
2001 netif_rx_complete(dev);
2002
2003 for (i = 0; i < config->rx_ring_num; i++) {
2004 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2005 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2006 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2007 break;
2008 }
2009 }
2010 /* Re enable the Rx interrupts. */
2011 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2012 return 0;
2013
2014 no_rx:
2015 dev->quota -= pkt_cnt;
2016 *budget -= pkt_cnt;
2017
2018 for (i = 0; i < config->rx_ring_num; i++) {
2019 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2020 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2021 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2022 break;
2023 }
2024 }
2025 return 1;
2026}
2027#else
2028/**
2029 * rx_intr_handler - Rx interrupt handler
2030 * @nic: device private variable.
2031 * Description:
2032 * If the interrupt is because of a received frame or if the
2033 * receive ring contains fresh as yet un-processed frames,this function is
2034 * called. It picks out the RxD at which place the last Rx processing had
2035 * stopped and sends the skb to the OSM's Rx handler and then increments
2036 * the offset.
2037 * Return Value:
2038 * NONE.
2039 */
2040
2041static void rx_intr_handler(struct s2io_nic *nic)
2042{
2043 struct net_device *dev = (struct net_device *) nic->dev;
2044 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2045 rx_curr_get_info_t get_info, put_info;
2046 RxD_t *rxdp;
2047 struct sk_buff *skb;
2048#ifndef CONFIG_2BUFF_MODE
2049 u16 val16, cksum;
2050#endif
2051 register u64 val64 = 0;
2052 int get_block, get_offset, put_block, put_offset, ring_bufs;
2053 int i, pkt_cnt = 0;
2054 mac_info_t *mac_control;
2055 struct config_param *config;
2056#ifdef CONFIG_2BUFF_MODE
2057 buffAdd_t *ba;
2058#endif
2059
2060 mac_control = &nic->mac_control;
2061 config = &nic->config;
2062
2063 /*
2064 * rx_traffic_int reg is an R1 register, hence we read and write back
2065 * the samevalue in the register to clear it.
2066 */
2067 val64 = readq(&bar0->rx_traffic_int);
2068 writeq(val64, &bar0->rx_traffic_int);
2069
2070 for (i = 0; i < config->rx_ring_num; i++) {
2071 get_info = mac_control->rx_curr_get_info[i];
2072 get_block = get_info.block_index;
2073 put_info = mac_control->rx_curr_put_info[i];
2074 put_block = put_info.block_index;
2075 ring_bufs = config->rx_cfg[i].num_rxd;
2076 rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
2077 get_info.offset;
2078#ifndef CONFIG_2BUFF_MODE
2079 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2080 get_info.offset;
2081 spin_lock(&nic->put_lock);
2082 put_offset = nic->put_pos[i];
2083 spin_unlock(&nic->put_lock);
2084 while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
2085 (((get_offset + 1) % ring_bufs) != put_offset)) {
2086 if (rxdp->Control_1 == END_OF_BLOCK) {
2087 rxdp = (RxD_t *) ((unsigned long)
2088 rxdp->Control_2);
2089 get_info.offset++;
2090 get_info.offset %=
2091 (MAX_RXDS_PER_BLOCK + 1);
2092 get_block++;
2093 get_block %= nic->block_count[i];
2094 mac_control->rx_curr_get_info[i].
2095 offset = get_info.offset;
2096 mac_control->rx_curr_get_info[i].
2097 block_index = get_block;
2098 continue;
2099 }
2100 get_offset =
2101 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2102 get_info.offset;
2103 skb = (struct sk_buff *) ((unsigned long)
2104 rxdp->Host_Control);
2105 if (skb == NULL) {
2106 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2107 dev->name);
2108 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2109 return;
2110 }
2111 val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
2112 val16 = (u16) (val64 >> 48);
2113 cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
2114 pci_unmap_single(nic->pdev, (dma_addr_t)
2115 rxdp->Buffer0_ptr,
2116 dev->mtu +
2117 HEADER_ETHERNET_II_802_3_SIZE +
2118 HEADER_802_2_SIZE +
2119 HEADER_SNAP_SIZE,
2120 PCI_DMA_FROMDEVICE);
2121 rx_osm_handler(nic, val16, rxdp, i);
2122 get_info.offset++;
2123 get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
2124 rxdp =
2125 nic->rx_blocks[i][get_block].block_virt_addr +
2126 get_info.offset;
2127 mac_control->rx_curr_get_info[i].offset =
2128 get_info.offset;
2129 pkt_cnt++;
2130 if ((indicate_max_pkts)
2131 && (pkt_cnt > indicate_max_pkts))
2132 break;
2133 }
2134#else
2135 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2136 get_info.offset;
2137 spin_lock(&nic->put_lock);
2138 put_offset = nic->put_pos[i];
2139 spin_unlock(&nic->put_lock);
2140 while (((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
2141 !(rxdp->Control_2 & BIT(0))) &&
2142 (((get_offset + 1) % ring_bufs) != put_offset)) {
2143 skb = (struct sk_buff *) ((unsigned long)
2144 rxdp->Host_Control);
2145 if (skb == NULL) {
2146 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2147 dev->name);
2148 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2149 return;
2150 }
2151
2152 pci_unmap_single(nic->pdev, (dma_addr_t)
2153 rxdp->Buffer0_ptr,
2154 BUF0_LEN, PCI_DMA_FROMDEVICE);
2155 pci_unmap_single(nic->pdev, (dma_addr_t)
2156 rxdp->Buffer1_ptr,
2157 BUF1_LEN, PCI_DMA_FROMDEVICE);
2158 pci_unmap_single(nic->pdev, (dma_addr_t)
2159 rxdp->Buffer2_ptr,
2160 dev->mtu + BUF0_LEN + 4,
2161 PCI_DMA_FROMDEVICE);
2162 ba = &nic->ba[i][get_block][get_info.offset];
2163
2164 rx_osm_handler(nic, rxdp, i, ba);
2165
2166 get_info.offset++;
2167 mac_control->rx_curr_get_info[i].offset =
2168 get_info.offset;
2169 rxdp =
2170 nic->rx_blocks[i][get_block].block_virt_addr +
2171 get_info.offset;
2172
2173 if (get_info.offset &&
2174 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2175 get_info.offset = 0;
2176 mac_control->rx_curr_get_info[i].
2177 offset = get_info.offset;
2178 get_block++;
2179 get_block %= nic->block_count[i];
2180 mac_control->rx_curr_get_info[i].
2181 block_index = get_block;
2182 rxdp =
2183 nic->rx_blocks[i][get_block].
2184 block_virt_addr;
2185 }
2186 get_offset =
2187 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2188 get_info.offset;
2189 pkt_cnt++;
2190 if ((indicate_max_pkts)
2191 && (pkt_cnt > indicate_max_pkts))
2192 break;
2193 }
2194#endif
2195 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2196 break;
2197 }
2198}
2199#endif
2200/**
2201 * tx_intr_handler - Transmit interrupt handler
2202 * @nic : device private variable
2203 * Description:
2204 * If an interrupt was raised to indicate DMA complete of the
2205 * Tx packet, this function is called. It identifies the last TxD
2206 * whose buffer was freed and frees all skbs whose data have already
2207 * DMA'ed into the NICs internal memory.
2208 * Return Value:
2209 * NONE
2210 */
2211
2212static void tx_intr_handler(struct s2io_nic *nic)
2213{
2214 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2215 struct net_device *dev = (struct net_device *) nic->dev;
2216 tx_curr_get_info_t get_info, put_info;
2217 struct sk_buff *skb;
2218 TxD_t *txdlp;
2219 register u64 val64 = 0;
2220 int i;
2221 u16 j, frg_cnt;
2222 mac_info_t *mac_control;
2223 struct config_param *config;
2224
2225 mac_control = &nic->mac_control;
2226 config = &nic->config;
2227
2228 /*
2229 * tx_traffic_int reg is an R1 register, hence we read and write
2230 * back the samevalue in the register to clear it.
2231 */
2232 val64 = readq(&bar0->tx_traffic_int);
2233 writeq(val64, &bar0->tx_traffic_int);
2234
2235 for (i = 0; i < config->tx_fifo_num; i++) {
2236 get_info = mac_control->tx_curr_get_info[i];
2237 put_info = mac_control->tx_curr_put_info[i];
2238 txdlp = (TxD_t *) nic->list_info[i][get_info.offset].
2239 list_virt_addr;
2240 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2241 (get_info.offset != put_info.offset) &&
2242 (txdlp->Host_Control)) {
2243 /* Check for TxD errors */
2244 if (txdlp->Control_1 & TXD_T_CODE) {
2245 unsigned long long err;
2246 err = txdlp->Control_1 & TXD_T_CODE;
2247 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2248 err);
2249 }
2250
2251 skb = (struct sk_buff *) ((unsigned long)
2252 txdlp->Host_Control);
2253 if (skb == NULL) {
2254 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2255 dev->name);
2256 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2257 return;
2258 }
2259 nic->tx_pkt_count++;
2260
2261 frg_cnt = skb_shinfo(skb)->nr_frags;
2262
2263 /* For unfragmented skb */
2264 pci_unmap_single(nic->pdev, (dma_addr_t)
2265 txdlp->Buffer_Pointer,
2266 skb->len - skb->data_len,
2267 PCI_DMA_TODEVICE);
2268 if (frg_cnt) {
2269 TxD_t *temp = txdlp;
2270 txdlp++;
2271 for (j = 0; j < frg_cnt; j++, txdlp++) {
2272 skb_frag_t *frag =
2273 &skb_shinfo(skb)->frags[j];
2274 pci_unmap_page(nic->pdev,
2275 (dma_addr_t)
2276 txdlp->
2277 Buffer_Pointer,
2278 frag->size,
2279 PCI_DMA_TODEVICE);
2280 }
2281 txdlp = temp;
2282 }
2283 memset(txdlp, 0,
2284 (sizeof(TxD_t) * config->max_txds));
2285
2286 /* Updating the statistics block */
2287 nic->stats.tx_packets++;
2288 nic->stats.tx_bytes += skb->len;
2289 dev_kfree_skb_irq(skb);
2290
2291 get_info.offset++;
2292 get_info.offset %= get_info.fifo_len + 1;
2293 txdlp = (TxD_t *) nic->list_info[i]
2294 [get_info.offset].list_virt_addr;
2295 mac_control->tx_curr_get_info[i].offset =
2296 get_info.offset;
2297 }
2298 }
2299
2300 spin_lock(&nic->tx_lock);
2301 if (netif_queue_stopped(dev))
2302 netif_wake_queue(dev);
2303 spin_unlock(&nic->tx_lock);
2304}
2305
2306/**
2307 * alarm_intr_handler - Alarm Interrrupt handler
2308 * @nic: device private variable
2309 * Description: If the interrupt was neither because of Rx packet or Tx
2310 * complete, this function is called. If the interrupt was to indicate
2311 * a loss of link, the OSM link status handler is invoked for any other
2312 * alarm interrupt the block that raised the interrupt is displayed
2313 * and a H/W reset is issued.
2314 * Return Value:
2315 * NONE
2316*/
2317
2318static void alarm_intr_handler(struct s2io_nic *nic)
2319{
2320 struct net_device *dev = (struct net_device *) nic->dev;
2321 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2322 register u64 val64 = 0, err_reg = 0;
2323
2324 /* Handling link status change error Intr */
2325 err_reg = readq(&bar0->mac_rmac_err_reg);
2326 writeq(err_reg, &bar0->mac_rmac_err_reg);
2327 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2328 schedule_work(&nic->set_link_task);
2329 }
2330
2331 /* In case of a serious error, the device will be Reset. */
2332 val64 = readq(&bar0->serr_source);
2333 if (val64 & SERR_SOURCE_ANY) {
2334 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2335 DBG_PRINT(ERR_DBG, "serious error!!\n");
2336 netif_stop_queue(dev);
2337 schedule_work(&nic->rst_timer_task);
2338 }
2339
2340 /*
2341 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2342 * Error occurs, the adapter will be recycled by disabling the
2343 * adapter enable bit and enabling it again after the device
2344 * becomes Quiescent.
2345 */
2346 val64 = readq(&bar0->pcc_err_reg);
2347 writeq(val64, &bar0->pcc_err_reg);
2348 if (val64 & PCC_FB_ECC_DB_ERR) {
2349 u64 ac = readq(&bar0->adapter_control);
2350 ac &= ~(ADAPTER_CNTL_EN);
2351 writeq(ac, &bar0->adapter_control);
2352 ac = readq(&bar0->adapter_control);
2353 schedule_work(&nic->set_link_task);
2354 }
2355
2356 /* Other type of interrupts are not being handled now, TODO */
2357}
2358
2359/**
2360 * wait_for_cmd_complete - waits for a command to complete.
2361 * @sp : private member of the device structure, which is a pointer to the
2362 * s2io_nic structure.
2363 * Description: Function that waits for a command to Write into RMAC
2364 * ADDR DATA registers to be completed and returns either success or
2365 * error depending on whether the command was complete or not.
2366 * Return value:
2367 * SUCCESS on success and FAILURE on failure.
2368 */
2369
2370static int wait_for_cmd_complete(nic_t * sp)
2371{
2372 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2373 int ret = FAILURE, cnt = 0;
2374 u64 val64;
2375
2376 while (TRUE) {
2377 val64 = readq(&bar0->rmac_addr_cmd_mem);
2378 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2379 ret = SUCCESS;
2380 break;
2381 }
2382 msleep(50);
2383 if (cnt++ > 10)
2384 break;
2385 }
2386
2387 return ret;
2388}
2389
2390/**
2391 * s2io_reset - Resets the card.
2392 * @sp : private member of the device structure.
2393 * Description: Function to Reset the card. This function then also
2394 * restores the previously saved PCI configuration space registers as
2395 * the card reset also resets the configuration space.
2396 * Return value:
2397 * void.
2398 */
2399
2400static void s2io_reset(nic_t * sp)
2401{
2402 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2403 u64 val64;
2404 u16 subid;
2405
2406 val64 = SW_RESET_ALL;
2407 writeq(val64, &bar0->sw_reset);
2408
2409 /*
2410 * At this stage, if the PCI write is indeed completed, the
2411 * card is reset and so is the PCI Config space of the device.
2412 * So a read cannot be issued at this stage on any of the
2413 * registers to ensure the write into "sw_reset" register
2414 * has gone through.
2415 * Question: Is there any system call that will explicitly force
2416 * all the write commands still pending on the bus to be pushed
2417 * through?
2418 * As of now I'am just giving a 250ms delay and hoping that the
2419 * PCI write to sw_reset register is done by this time.
2420 */
2421 msleep(250);
2422
2423 /* Restore the PCI state saved during initializarion. */
2424 pci_restore_state(sp->pdev);
2425 s2io_init_pci(sp);
2426
2427 msleep(250);
2428
2429 /* SXE-002: Configure link and activity LED to turn it off */
2430 subid = sp->pdev->subsystem_device;
2431 if ((subid & 0xFF) >= 0x07) {
2432 val64 = readq(&bar0->gpio_control);
2433 val64 |= 0x0000800000000000ULL;
2434 writeq(val64, &bar0->gpio_control);
2435 val64 = 0x0411040400000000ULL;
2436 writeq(val64, (void __iomem *) bar0 + 0x2700);
2437 }
2438
2439 sp->device_enabled_once = FALSE;
2440}
2441
2442/**
2443 * s2io_set_swapper - to set the swapper controle on the card
2444 * @sp : private member of the device structure,
2445 * pointer to the s2io_nic structure.
2446 * Description: Function to set the swapper control on the card
2447 * correctly depending on the 'endianness' of the system.
2448 * Return value:
2449 * SUCCESS on success and FAILURE on failure.
2450 */
2451
2452static int s2io_set_swapper(nic_t * sp)
2453{
2454 struct net_device *dev = sp->dev;
2455 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2456 u64 val64, valt, valr;
2457
2458 /*
2459 * Set proper endian settings and verify the same by reading
2460 * the PIF Feed-back register.
2461 */
2462
2463 val64 = readq(&bar0->pif_rd_swapper_fb);
2464 if (val64 != 0x0123456789ABCDEFULL) {
2465 int i = 0;
2466 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2467 0x8100008181000081ULL, /* FE=1, SE=0 */
2468 0x4200004242000042ULL, /* FE=0, SE=1 */
2469 0}; /* FE=0, SE=0 */
2470
2471 while(i<4) {
2472 writeq(value[i], &bar0->swapper_ctrl);
2473 val64 = readq(&bar0->pif_rd_swapper_fb);
2474 if (val64 == 0x0123456789ABCDEFULL)
2475 break;
2476 i++;
2477 }
2478 if (i == 4) {
2479 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2480 dev->name);
2481 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2482 (unsigned long long) val64);
2483 return FAILURE;
2484 }
2485 valr = value[i];
2486 } else {
2487 valr = readq(&bar0->swapper_ctrl);
2488 }
2489
2490 valt = 0x0123456789ABCDEFULL;
2491 writeq(valt, &bar0->xmsi_address);
2492 val64 = readq(&bar0->xmsi_address);
2493
2494 if(val64 != valt) {
2495 int i = 0;
2496 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2497 0x0081810000818100ULL, /* FE=1, SE=0 */
2498 0x0042420000424200ULL, /* FE=0, SE=1 */
2499 0}; /* FE=0, SE=0 */
2500
2501 while(i<4) {
2502 writeq((value[i] | valr), &bar0->swapper_ctrl);
2503 writeq(valt, &bar0->xmsi_address);
2504 val64 = readq(&bar0->xmsi_address);
2505 if(val64 == valt)
2506 break;
2507 i++;
2508 }
2509 if(i == 4) {
2510 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2511 DBG_PRINT(ERR_DBG, "reads:0x%llx\n",val64);
2512 return FAILURE;
2513 }
2514 }
2515 val64 = readq(&bar0->swapper_ctrl);
2516 val64 &= 0xFFFF000000000000ULL;
2517
2518#ifdef __BIG_ENDIAN
2519 /*
2520 * The device by default set to a big endian format, so a
2521 * big endian driver need not set anything.
2522 */
2523 val64 |= (SWAPPER_CTRL_TXP_FE |
2524 SWAPPER_CTRL_TXP_SE |
2525 SWAPPER_CTRL_TXD_R_FE |
2526 SWAPPER_CTRL_TXD_W_FE |
2527 SWAPPER_CTRL_TXF_R_FE |
2528 SWAPPER_CTRL_RXD_R_FE |
2529 SWAPPER_CTRL_RXD_W_FE |
2530 SWAPPER_CTRL_RXF_W_FE |
2531 SWAPPER_CTRL_XMSI_FE |
2532 SWAPPER_CTRL_XMSI_SE |
2533 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2534 writeq(val64, &bar0->swapper_ctrl);
2535#else
2536 /*
2537 * Initially we enable all bits to make it accessible by the
2538 * driver, then we selectively enable only those bits that
2539 * we want to set.
2540 */
2541 val64 |= (SWAPPER_CTRL_TXP_FE |
2542 SWAPPER_CTRL_TXP_SE |
2543 SWAPPER_CTRL_TXD_R_FE |
2544 SWAPPER_CTRL_TXD_R_SE |
2545 SWAPPER_CTRL_TXD_W_FE |
2546 SWAPPER_CTRL_TXD_W_SE |
2547 SWAPPER_CTRL_TXF_R_FE |
2548 SWAPPER_CTRL_RXD_R_FE |
2549 SWAPPER_CTRL_RXD_R_SE |
2550 SWAPPER_CTRL_RXD_W_FE |
2551 SWAPPER_CTRL_RXD_W_SE |
2552 SWAPPER_CTRL_RXF_W_FE |
2553 SWAPPER_CTRL_XMSI_FE |
2554 SWAPPER_CTRL_XMSI_SE |
2555 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2556 writeq(val64, &bar0->swapper_ctrl);
2557#endif
2558 val64 = readq(&bar0->swapper_ctrl);
2559
2560 /*
2561 * Verifying if endian settings are accurate by reading a
2562 * feedback register.
2563 */
2564 val64 = readq(&bar0->pif_rd_swapper_fb);
2565 if (val64 != 0x0123456789ABCDEFULL) {
2566 /* Endian settings are incorrect, calls for another dekko. */
2567 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2568 dev->name);
2569 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2570 (unsigned long long) val64);
2571 return FAILURE;
2572 }
2573
2574 return SUCCESS;
2575}
2576
2577/* ********************************************************* *
2578 * Functions defined below concern the OS part of the driver *
2579 * ********************************************************* */
2580
2581/**
2582 * s2io_open - open entry point of the driver
2583 * @dev : pointer to the device structure.
2584 * Description:
2585 * This function is the open entry point of the driver. It mainly calls a
2586 * function to allocate Rx buffers and inserts them into the buffer
2587 * descriptors and then enables the Rx part of the NIC.
2588 * Return value:
2589 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2590 * file on failure.
2591 */
2592
2593static int s2io_open(struct net_device *dev)
2594{
2595 nic_t *sp = dev->priv;
2596 int err = 0;
2597
2598 /*
2599 * Make sure you have link off by default every time
2600 * Nic is initialized
2601 */
2602 netif_carrier_off(dev);
2603 sp->last_link_state = LINK_DOWN;
2604
2605 /* Initialize H/W and enable interrupts */
2606 if (s2io_card_up(sp)) {
2607 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2608 dev->name);
2609 return -ENODEV;
2610 }
2611
2612 /* After proper initialization of H/W, register ISR */
2613 err = request_irq((int) sp->irq, s2io_isr, SA_SHIRQ,
2614 sp->name, dev);
2615 if (err) {
2616 s2io_reset(sp);
2617 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2618 dev->name);
2619 return err;
2620 }
2621
2622 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2623 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2624 s2io_reset(sp);
2625 return -ENODEV;
2626 }
2627
2628 netif_start_queue(dev);
2629 return 0;
2630}
2631
2632/**
2633 * s2io_close -close entry point of the driver
2634 * @dev : device pointer.
2635 * Description:
2636 * This is the stop entry point of the driver. It needs to undo exactly
2637 * whatever was done by the open entry point,thus it's usually referred to
2638 * as the close function.Among other things this function mainly stops the
2639 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2640 * Return value:
2641 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2642 * file on failure.
2643 */
2644
2645static int s2io_close(struct net_device *dev)
2646{
2647 nic_t *sp = dev->priv;
2648
2649 flush_scheduled_work();
2650 netif_stop_queue(dev);
2651 /* Reset card, kill tasklet and free Tx and Rx buffers. */
2652 s2io_card_down(sp);
2653
2654 free_irq(dev->irq, dev);
2655 sp->device_close_flag = TRUE; /* Device is shut down. */
2656 return 0;
2657}
2658
2659/**
2660 * s2io_xmit - Tx entry point of te driver
2661 * @skb : the socket buffer containing the Tx data.
2662 * @dev : device pointer.
2663 * Description :
2664 * This function is the Tx entry point of the driver. S2IO NIC supports
2665 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
2666 * NOTE: when device cant queue the pkt,just the trans_start variable will
2667 * not be upadted.
2668 * Return value:
2669 * 0 on success & 1 on failure.
2670 */
2671
2672static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2673{
2674 nic_t *sp = dev->priv;
2675 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
2676 register u64 val64;
2677 TxD_t *txdp;
2678 TxFIFO_element_t __iomem *tx_fifo;
2679 unsigned long flags;
2680#ifdef NETIF_F_TSO
2681 int mss;
2682#endif
2683 mac_info_t *mac_control;
2684 struct config_param *config;
2685 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2686
2687 mac_control = &sp->mac_control;
2688 config = &sp->config;
2689
2690 DBG_PRINT(TX_DBG, "%s: In S2IO Tx routine\n", dev->name);
2691 spin_lock_irqsave(&sp->tx_lock, flags);
2692
2693 if (atomic_read(&sp->card_state) == CARD_DOWN) {
2694 DBG_PRINT(ERR_DBG, "%s: Card going down for reset\n",
2695 dev->name);
2696 spin_unlock_irqrestore(&sp->tx_lock, flags);
2697 return 1;
2698 }
2699
2700 queue = 0;
2701 put_off = (u16) mac_control->tx_curr_put_info[queue].offset;
2702 get_off = (u16) mac_control->tx_curr_get_info[queue].offset;
2703 txdp = (TxD_t *) sp->list_info[queue][put_off].list_virt_addr;
2704
2705 queue_len = mac_control->tx_curr_put_info[queue].fifo_len + 1;
2706 /* Avoid "put" pointer going beyond "get" pointer */
2707 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
2708 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
2709 netif_stop_queue(dev);
2710 dev_kfree_skb(skb);
2711 spin_unlock_irqrestore(&sp->tx_lock, flags);
2712 return 0;
2713 }
2714#ifdef NETIF_F_TSO
2715 mss = skb_shinfo(skb)->tso_size;
2716 if (mss) {
2717 txdp->Control_1 |= TXD_TCP_LSO_EN;
2718 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
2719 }
2720#endif
2721
2722 frg_cnt = skb_shinfo(skb)->nr_frags;
2723 frg_len = skb->len - skb->data_len;
2724
2725 txdp->Host_Control = (unsigned long) skb;
2726 txdp->Buffer_Pointer = pci_map_single
2727 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
2728 if (skb->ip_summed == CHECKSUM_HW) {
2729 txdp->Control_2 |=
2730 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
2731 TXD_TX_CKO_UDP_EN);
2732 }
2733
2734 txdp->Control_2 |= config->tx_intr_type;
2735
2736 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
2737 TXD_GATHER_CODE_FIRST);
2738 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2739
2740 /* For fragmented SKB. */
2741 for (i = 0; i < frg_cnt; i++) {
2742 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2743 txdp++;
2744 txdp->Buffer_Pointer = (u64) pci_map_page
2745 (sp->pdev, frag->page, frag->page_offset,
2746 frag->size, PCI_DMA_TODEVICE);
2747 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
2748 }
2749 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
2750
2751 tx_fifo = mac_control->tx_FIFO_start[queue];
2752 val64 = sp->list_info[queue][put_off].list_phy_addr;
2753 writeq(val64, &tx_fifo->TxDL_Pointer);
2754
2755 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
2756 TX_FIFO_LAST_LIST);
2757#ifdef NETIF_F_TSO
2758 if (mss)
2759 val64 |= TX_FIFO_SPECIAL_FUNC;
2760#endif
2761 writeq(val64, &tx_fifo->List_Control);
2762
2763 /* Perform a PCI read to flush previous writes */
2764 val64 = readq(&bar0->general_int_status);
2765
2766 put_off++;
2767 put_off %= mac_control->tx_curr_put_info[queue].fifo_len + 1;
2768 mac_control->tx_curr_put_info[queue].offset = put_off;
2769
2770 /* Avoid "put" pointer going beyond "get" pointer */
2771 if (((put_off + 1) % queue_len) == get_off) {
2772 DBG_PRINT(TX_DBG,
2773 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
2774 put_off, get_off);
2775 netif_stop_queue(dev);
2776 }
2777
2778 dev->trans_start = jiffies;
2779 spin_unlock_irqrestore(&sp->tx_lock, flags);
2780
2781 return 0;
2782}
2783
2784/**
2785 * s2io_isr - ISR handler of the device .
2786 * @irq: the irq of the device.
2787 * @dev_id: a void pointer to the dev structure of the NIC.
2788 * @pt_regs: pointer to the registers pushed on the stack.
2789 * Description: This function is the ISR handler of the device. It
2790 * identifies the reason for the interrupt and calls the relevant
2791 * service routines. As a contongency measure, this ISR allocates the
2792 * recv buffers, if their numbers are below the panic value which is
2793 * presently set to 25% of the original number of rcv buffers allocated.
2794 * Return value:
2795 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
2796 * IRQ_NONE: will be returned if interrupt is not from our device
2797 */
2798static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2799{
2800 struct net_device *dev = (struct net_device *) dev_id;
2801 nic_t *sp = dev->priv;
2802 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2803#ifndef CONFIG_S2IO_NAPI
2804 int i, ret;
2805#endif
2806 u64 reason = 0;
2807 mac_info_t *mac_control;
2808 struct config_param *config;
2809
2810 mac_control = &sp->mac_control;
2811 config = &sp->config;
2812
2813 /*
2814 * Identify the cause for interrupt and call the appropriate
2815 * interrupt handler. Causes for the interrupt could be;
2816 * 1. Rx of packet.
2817 * 2. Tx complete.
2818 * 3. Link down.
2819 * 4. Error in any functional blocks of the NIC.
2820 */
2821 reason = readq(&bar0->general_int_status);
2822
2823 if (!reason) {
2824 /* The interrupt was not raised by Xena. */
2825 return IRQ_NONE;
2826 }
2827
2828 /* If Intr is because of Tx Traffic */
2829 if (reason & GEN_INTR_TXTRAFFIC) {
2830 tx_intr_handler(sp);
2831 }
2832
2833 /* If Intr is because of an error */
2834 if (reason & (GEN_ERROR_INTR))
2835 alarm_intr_handler(sp);
2836
2837#ifdef CONFIG_S2IO_NAPI
2838 if (reason & GEN_INTR_RXTRAFFIC) {
2839 if (netif_rx_schedule_prep(dev)) {
2840 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
2841 DISABLE_INTRS);
2842 __netif_rx_schedule(dev);
2843 }
2844 }
2845#else
2846 /* If Intr is because of Rx Traffic */
2847 if (reason & GEN_INTR_RXTRAFFIC) {
2848 rx_intr_handler(sp);
2849 }
2850#endif
2851
2852 /*
2853 * If the Rx buffer count is below the panic threshold then
2854 * reallocate the buffers from the interrupt handler itself,
2855 * else schedule a tasklet to reallocate the buffers.
2856 */
2857#ifndef CONFIG_S2IO_NAPI
2858 for (i = 0; i < config->rx_ring_num; i++) {
2859 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
2860 int level = rx_buffer_level(sp, rxb_size, i);
2861
2862 if ((level == PANIC) && (!TASKLET_IN_USE)) {
2863 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
2864 DBG_PRINT(INTR_DBG, "PANIC levels\n");
2865 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
2866 DBG_PRINT(ERR_DBG, "%s:Out of memory",
2867 dev->name);
2868 DBG_PRINT(ERR_DBG, " in ISR!!\n");
2869 clear_bit(0, (&sp->tasklet_status));
2870 return IRQ_HANDLED;
2871 }
2872 clear_bit(0, (&sp->tasklet_status));
2873 } else if (level == LOW) {
2874 tasklet_schedule(&sp->task);
2875 }
2876 }
2877#endif
2878
2879 return IRQ_HANDLED;
2880}
2881
2882/**
2883 * s2io_get_stats - Updates the device statistics structure.
2884 * @dev : pointer to the device structure.
2885 * Description:
2886 * This function updates the device statistics structure in the s2io_nic
2887 * structure and returns a pointer to the same.
2888 * Return value:
2889 * pointer to the updated net_device_stats structure.
2890 */
2891
2892static struct net_device_stats *s2io_get_stats(struct net_device *dev)
2893{
2894 nic_t *sp = dev->priv;
2895 mac_info_t *mac_control;
2896 struct config_param *config;
2897
2898 mac_control = &sp->mac_control;
2899 config = &sp->config;
2900
2901 sp->stats.tx_errors = mac_control->stats_info->tmac_any_err_frms;
2902 sp->stats.rx_errors = mac_control->stats_info->rmac_drop_frms;
2903 sp->stats.multicast = mac_control->stats_info->rmac_vld_mcst_frms;
2904 sp->stats.rx_length_errors =
2905 mac_control->stats_info->rmac_long_frms;
2906
2907 return (&sp->stats);
2908}
2909
2910/**
2911 * s2io_set_multicast - entry point for multicast address enable/disable.
2912 * @dev : pointer to the device structure
2913 * Description:
2914 * This function is a driver entry point which gets called by the kernel
2915 * whenever multicast addresses must be enabled/disabled. This also gets
2916 * called to set/reset promiscuous mode. Depending on the deivce flag, we
2917 * determine, if multicast address must be enabled or if promiscuous mode
2918 * is to be disabled etc.
2919 * Return value:
2920 * void.
2921 */
2922
2923static void s2io_set_multicast(struct net_device *dev)
2924{
2925 int i, j, prev_cnt;
2926 struct dev_mc_list *mclist;
2927 nic_t *sp = dev->priv;
2928 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2929 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
2930 0xfeffffffffffULL;
2931 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
2932 void __iomem *add;
2933
2934 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
2935 /* Enable all Multicast addresses */
2936 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
2937 &bar0->rmac_addr_data0_mem);
2938 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
2939 &bar0->rmac_addr_data1_mem);
2940 val64 = RMAC_ADDR_CMD_MEM_WE |
2941 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2942 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
2943 writeq(val64, &bar0->rmac_addr_cmd_mem);
2944 /* Wait till command completes */
2945 wait_for_cmd_complete(sp);
2946
2947 sp->m_cast_flg = 1;
2948 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
2949 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
2950 /* Disable all Multicast addresses */
2951 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
2952 &bar0->rmac_addr_data0_mem);
2953 val64 = RMAC_ADDR_CMD_MEM_WE |
2954 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2955 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
2956 writeq(val64, &bar0->rmac_addr_cmd_mem);
2957 /* Wait till command completes */
2958 wait_for_cmd_complete(sp);
2959
2960 sp->m_cast_flg = 0;
2961 sp->all_multi_pos = 0;
2962 }
2963
2964 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
2965 /* Put the NIC into promiscuous mode */
2966 add = &bar0->mac_cfg;
2967 val64 = readq(&bar0->mac_cfg);
2968 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
2969
2970 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2971 writel((u32) val64, add);
2972 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2973 writel((u32) (val64 >> 32), (add + 4));
2974
2975 val64 = readq(&bar0->mac_cfg);
2976 sp->promisc_flg = 1;
2977 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
2978 dev->name);
2979 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
2980 /* Remove the NIC from promiscuous mode */
2981 add = &bar0->mac_cfg;
2982 val64 = readq(&bar0->mac_cfg);
2983 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
2984
2985 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2986 writel((u32) val64, add);
2987 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2988 writel((u32) (val64 >> 32), (add + 4));
2989
2990 val64 = readq(&bar0->mac_cfg);
2991 sp->promisc_flg = 0;
2992 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
2993 dev->name);
2994 }
2995
2996 /* Update individual M_CAST address list */
2997 if ((!sp->m_cast_flg) && dev->mc_count) {
2998 if (dev->mc_count >
2999 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3000 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3001 dev->name);
3002 DBG_PRINT(ERR_DBG, "can be added, please enable ");
3003 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3004 return;
3005 }
3006
3007 prev_cnt = sp->mc_addr_count;
3008 sp->mc_addr_count = dev->mc_count;
3009
3010 /* Clear out the previous list of Mc in the H/W. */
3011 for (i = 0; i < prev_cnt; i++) {
3012 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3013 &bar0->rmac_addr_data0_mem);
3014 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3015 &bar0->rmac_addr_data1_mem);
3016 val64 = RMAC_ADDR_CMD_MEM_WE |
3017 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3018 RMAC_ADDR_CMD_MEM_OFFSET
3019 (MAC_MC_ADDR_START_OFFSET + i);
3020 writeq(val64, &bar0->rmac_addr_cmd_mem);
3021
3022 /* Wait for command completes */
3023 if (wait_for_cmd_complete(sp)) {
3024 DBG_PRINT(ERR_DBG, "%s: Adding ",
3025 dev->name);
3026 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3027 return;
3028 }
3029 }
3030
3031 /* Create the new Rx filter list and update the same in H/W. */
3032 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3033 i++, mclist = mclist->next) {
3034 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3035 ETH_ALEN);
3036 for (j = 0; j < ETH_ALEN; j++) {
3037 mac_addr |= mclist->dmi_addr[j];
3038 mac_addr <<= 8;
3039 }
3040 mac_addr >>= 8;
3041 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3042 &bar0->rmac_addr_data0_mem);
3043 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3044 &bar0->rmac_addr_data1_mem);
3045
3046 val64 = RMAC_ADDR_CMD_MEM_WE |
3047 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3048 RMAC_ADDR_CMD_MEM_OFFSET
3049 (i + MAC_MC_ADDR_START_OFFSET);
3050 writeq(val64, &bar0->rmac_addr_cmd_mem);
3051
3052 /* Wait for command completes */
3053 if (wait_for_cmd_complete(sp)) {
3054 DBG_PRINT(ERR_DBG, "%s: Adding ",
3055 dev->name);
3056 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3057 return;
3058 }
3059 }
3060 }
3061}
3062
3063/**
3064 * s2io_set_mac_addr - Programs the Xframe mac address
3065 * @dev : pointer to the device structure.
3066 * @addr: a uchar pointer to the new mac address which is to be set.
3067 * Description : This procedure will program the Xframe to receive
3068 * frames with new Mac Address
3069 * Return value: SUCCESS on success and an appropriate (-)ve integer
3070 * as defined in errno.h file on failure.
3071 */
3072
3073int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3074{
3075 nic_t *sp = dev->priv;
3076 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3077 register u64 val64, mac_addr = 0;
3078 int i;
3079
3080 /*
3081 * Set the new MAC address as the new unicast filter and reflect this
3082 * change on the device address registered with the OS. It will be
3083 * at offset 0.
3084 */
3085 for (i = 0; i < ETH_ALEN; i++) {
3086 mac_addr <<= 8;
3087 mac_addr |= addr[i];
3088 }
3089
3090 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3091 &bar0->rmac_addr_data0_mem);
3092
3093 val64 =
3094 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3095 RMAC_ADDR_CMD_MEM_OFFSET(0);
3096 writeq(val64, &bar0->rmac_addr_cmd_mem);
3097 /* Wait till command completes */
3098 if (wait_for_cmd_complete(sp)) {
3099 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3100 return FAILURE;
3101 }
3102
3103 return SUCCESS;
3104}
3105
3106/**
3107 * s2io_ethtool_sset - Sets different link parameters.
3108 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3109 * @info: pointer to the structure with parameters given by ethtool to set
3110 * link information.
3111 * Description:
3112 * The function sets different link parameters provided by the user onto
3113 * the NIC.
3114 * Return value:
3115 * 0 on success.
3116*/
3117
3118static int s2io_ethtool_sset(struct net_device *dev,
3119 struct ethtool_cmd *info)
3120{
3121 nic_t *sp = dev->priv;
3122 if ((info->autoneg == AUTONEG_ENABLE) ||
3123 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3124 return -EINVAL;
3125 else {
3126 s2io_close(sp->dev);
3127 s2io_open(sp->dev);
3128 }
3129
3130 return 0;
3131}
3132
3133/**
3134 * s2io_ethtol_gset - Return link specific information.
3135 * @sp : private member of the device structure, pointer to the
3136 * s2io_nic structure.
3137 * @info : pointer to the structure with parameters given by ethtool
3138 * to return link information.
3139 * Description:
3140 * Returns link specific information like speed, duplex etc.. to ethtool.
3141 * Return value :
3142 * return 0 on success.
3143 */
3144
3145static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3146{
3147 nic_t *sp = dev->priv;
3148 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3149 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3150 info->port = PORT_FIBRE;
3151 /* info->transceiver?? TODO */
3152
3153 if (netif_carrier_ok(sp->dev)) {
3154 info->speed = 10000;
3155 info->duplex = DUPLEX_FULL;
3156 } else {
3157 info->speed = -1;
3158 info->duplex = -1;
3159 }
3160
3161 info->autoneg = AUTONEG_DISABLE;
3162 return 0;
3163}
3164
3165/**
3166 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3167 * @sp : private member of the device structure, which is a pointer to the
3168 * s2io_nic structure.
3169 * @info : pointer to the structure with parameters given by ethtool to
3170 * return driver information.
3171 * Description:
3172 * Returns driver specefic information like name, version etc.. to ethtool.
3173 * Return value:
3174 * void
3175 */
3176
3177static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3178 struct ethtool_drvinfo *info)
3179{
3180 nic_t *sp = dev->priv;
3181
3182 strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3183 strncpy(info->version, s2io_driver_version,
3184 sizeof(s2io_driver_version));
3185 strncpy(info->fw_version, "", 32);
3186 strncpy(info->bus_info, pci_name(sp->pdev), 32);
3187 info->regdump_len = XENA_REG_SPACE;
3188 info->eedump_len = XENA_EEPROM_SPACE;
3189 info->testinfo_len = S2IO_TEST_LEN;
3190 info->n_stats = S2IO_STAT_LEN;
3191}
3192
3193/**
3194 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3195 * @sp: private member of the device structure, which is a pointer to the
3196 * s2io_nic structure.
3197 * @regs : pointer to the structure with parameters given by ethtool for
3198 * dumping the registers.
3199 * @reg_space: The input argumnet into which all the registers are dumped.
3200 * Description:
3201 * Dumps the entire register space of xFrame NIC into the user given
3202 * buffer area.
3203 * Return value :
3204 * void .
3205*/
3206
3207static void s2io_ethtool_gregs(struct net_device *dev,
3208 struct ethtool_regs *regs, void *space)
3209{
3210 int i;
3211 u64 reg;
3212 u8 *reg_space = (u8 *) space;
3213 nic_t *sp = dev->priv;
3214
3215 regs->len = XENA_REG_SPACE;
3216 regs->version = sp->pdev->subsystem_device;
3217
3218 for (i = 0; i < regs->len; i += 8) {
3219 reg = readq(sp->bar0 + i);
3220 memcpy((reg_space + i), &reg, 8);
3221 }
3222}
3223
3224/**
3225 * s2io_phy_id - timer function that alternates adapter LED.
3226 * @data : address of the private member of the device structure, which
3227 * is a pointer to the s2io_nic structure, provided as an u32.
3228 * Description: This is actually the timer function that alternates the
3229 * adapter LED bit of the adapter control bit to set/reset every time on
3230 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3231 * once every second.
3232*/
3233static void s2io_phy_id(unsigned long data)
3234{
3235 nic_t *sp = (nic_t *) data;
3236 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3237 u64 val64 = 0;
3238 u16 subid;
3239
3240 subid = sp->pdev->subsystem_device;
3241 if ((subid & 0xFF) >= 0x07) {
3242 val64 = readq(&bar0->gpio_control);
3243 val64 ^= GPIO_CTRL_GPIO_0;
3244 writeq(val64, &bar0->gpio_control);
3245 } else {
3246 val64 = readq(&bar0->adapter_control);
3247 val64 ^= ADAPTER_LED_ON;
3248 writeq(val64, &bar0->adapter_control);
3249 }
3250
3251 mod_timer(&sp->id_timer, jiffies + HZ / 2);
3252}
3253
3254/**
3255 * s2io_ethtool_idnic - To physically identify the nic on the system.
3256 * @sp : private member of the device structure, which is a pointer to the
3257 * s2io_nic structure.
3258 * @id : pointer to the structure with identification parameters given by
3259 * ethtool.
3260 * Description: Used to physically identify the NIC on the system.
3261 * The Link LED will blink for a time specified by the user for
3262 * identification.
3263 * NOTE: The Link has to be Up to be able to blink the LED. Hence
3264 * identification is possible only if it's link is up.
3265 * Return value:
3266 * int , returns 0 on success
3267 */
3268
3269static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3270{
3271 u64 val64 = 0, last_gpio_ctrl_val;
3272 nic_t *sp = dev->priv;
3273 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3274 u16 subid;
3275
3276 subid = sp->pdev->subsystem_device;
3277 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3278 if ((subid & 0xFF) < 0x07) {
3279 val64 = readq(&bar0->adapter_control);
3280 if (!(val64 & ADAPTER_CNTL_EN)) {
3281 printk(KERN_ERR
3282 "Adapter Link down, cannot blink LED\n");
3283 return -EFAULT;
3284 }
3285 }
3286 if (sp->id_timer.function == NULL) {
3287 init_timer(&sp->id_timer);
3288 sp->id_timer.function = s2io_phy_id;
3289 sp->id_timer.data = (unsigned long) sp;
3290 }
3291 mod_timer(&sp->id_timer, jiffies);
3292 if (data)
3293 msleep(data * 1000);
3294 else
3295 msleep(0xFFFFFFFF);
3296 del_timer_sync(&sp->id_timer);
3297
3298 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
3299 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3300 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3301 }
3302
3303 return 0;
3304}
3305
3306/**
3307 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3308 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3309 * @ep : pointer to the structure with pause parameters given by ethtool.
3310 * Description:
3311 * Returns the Pause frame generation and reception capability of the NIC.
3312 * Return value:
3313 * void
3314 */
3315static void s2io_ethtool_getpause_data(struct net_device *dev,
3316 struct ethtool_pauseparam *ep)
3317{
3318 u64 val64;
3319 nic_t *sp = dev->priv;
3320 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3321
3322 val64 = readq(&bar0->rmac_pause_cfg);
3323 if (val64 & RMAC_PAUSE_GEN_ENABLE)
3324 ep->tx_pause = TRUE;
3325 if (val64 & RMAC_PAUSE_RX_ENABLE)
3326 ep->rx_pause = TRUE;
3327 ep->autoneg = FALSE;
3328}
3329
3330/**
3331 * s2io_ethtool_setpause_data - set/reset pause frame generation.
3332 * @sp : private member of the device structure, which is a pointer to the
3333 * s2io_nic structure.
3334 * @ep : pointer to the structure with pause parameters given by ethtool.
3335 * Description:
3336 * It can be used to set or reset Pause frame generation or reception
3337 * support of the NIC.
3338 * Return value:
3339 * int, returns 0 on Success
3340 */
3341
3342static int s2io_ethtool_setpause_data(struct net_device *dev,
3343 struct ethtool_pauseparam *ep)
3344{
3345 u64 val64;
3346 nic_t *sp = dev->priv;
3347 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3348
3349 val64 = readq(&bar0->rmac_pause_cfg);
3350 if (ep->tx_pause)
3351 val64 |= RMAC_PAUSE_GEN_ENABLE;
3352 else
3353 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3354 if (ep->rx_pause)
3355 val64 |= RMAC_PAUSE_RX_ENABLE;
3356 else
3357 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3358 writeq(val64, &bar0->rmac_pause_cfg);
3359 return 0;
3360}
3361
3362/**
3363 * read_eeprom - reads 4 bytes of data from user given offset.
3364 * @sp : private member of the device structure, which is a pointer to the
3365 * s2io_nic structure.
3366 * @off : offset at which the data must be written
3367 * @data : Its an output parameter where the data read at the given
3368 * offset is stored.
3369 * Description:
3370 * Will read 4 bytes of data from the user given offset and return the
3371 * read data.
3372 * NOTE: Will allow to read only part of the EEPROM visible through the
3373 * I2C bus.
3374 * Return value:
3375 * -1 on failure and 0 on success.
3376 */
3377
3378#define S2IO_DEV_ID 5
3379static int read_eeprom(nic_t * sp, int off, u32 * data)
3380{
3381 int ret = -1;
3382 u32 exit_cnt = 0;
3383 u64 val64;
3384 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3385
3386 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3387 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3388 I2C_CONTROL_CNTL_START;
3389 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3390
3391 while (exit_cnt < 5) {
3392 val64 = readq(&bar0->i2c_control);
3393 if (I2C_CONTROL_CNTL_END(val64)) {
3394 *data = I2C_CONTROL_GET_DATA(val64);
3395 ret = 0;
3396 break;
3397 }
3398 msleep(50);
3399 exit_cnt++;
3400 }
3401
3402 return ret;
3403}
3404
3405/**
3406 * write_eeprom - actually writes the relevant part of the data value.
3407 * @sp : private member of the device structure, which is a pointer to the
3408 * s2io_nic structure.
3409 * @off : offset at which the data must be written
3410 * @data : The data that is to be written
3411 * @cnt : Number of bytes of the data that are actually to be written into
3412 * the Eeprom. (max of 3)
3413 * Description:
3414 * Actually writes the relevant part of the data value into the Eeprom
3415 * through the I2C bus.
3416 * Return value:
3417 * 0 on success, -1 on failure.
3418 */
3419
3420static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3421{
3422 int exit_cnt = 0, ret = -1;
3423 u64 val64;
3424 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3425
3426 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3427 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3428 I2C_CONTROL_CNTL_START;
3429 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3430
3431 while (exit_cnt < 5) {
3432 val64 = readq(&bar0->i2c_control);
3433 if (I2C_CONTROL_CNTL_END(val64)) {
3434 if (!(val64 & I2C_CONTROL_NACK))
3435 ret = 0;
3436 break;
3437 }
3438 msleep(50);
3439 exit_cnt++;
3440 }
3441
3442 return ret;
3443}
3444
3445/**
3446 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
3447 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3448 * @eeprom : pointer to the user level structure provided by ethtool,
3449 * containing all relevant information.
3450 * @data_buf : user defined value to be written into Eeprom.
3451 * Description: Reads the values stored in the Eeprom at given offset
3452 * for a given length. Stores these values int the input argument data
3453 * buffer 'data_buf' and returns these to the caller (ethtool.)
3454 * Return value:
3455 * int 0 on success
3456 */
3457
3458static int s2io_ethtool_geeprom(struct net_device *dev,
3459 struct ethtool_eeprom *eeprom, u8 * data_buf)
3460{
3461 u32 data, i, valid;
3462 nic_t *sp = dev->priv;
3463
3464 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3465
3466 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3467 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3468
3469 for (i = 0; i < eeprom->len; i += 4) {
3470 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3471 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3472 return -EFAULT;
3473 }
3474 valid = INV(data);
3475 memcpy((data_buf + i), &valid, 4);
3476 }
3477 return 0;
3478}
3479
3480/**
3481 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3482 * @sp : private member of the device structure, which is a pointer to the
3483 * s2io_nic structure.
3484 * @eeprom : pointer to the user level structure provided by ethtool,
3485 * containing all relevant information.
3486 * @data_buf ; user defined value to be written into Eeprom.
3487 * Description:
3488 * Tries to write the user provided value in the Eeprom, at the offset
3489 * given by the user.
3490 * Return value:
3491 * 0 on success, -EFAULT on failure.
3492 */
3493
3494static int s2io_ethtool_seeprom(struct net_device *dev,
3495 struct ethtool_eeprom *eeprom,
3496 u8 * data_buf)
3497{
3498 int len = eeprom->len, cnt = 0;
3499 u32 valid = 0, data;
3500 nic_t *sp = dev->priv;
3501
3502 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
3503 DBG_PRINT(ERR_DBG,
3504 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
3505 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
3506 eeprom->magic);
3507 return -EFAULT;
3508 }
3509
3510 while (len) {
3511 data = (u32) data_buf[cnt] & 0x000000FF;
3512 if (data) {
3513 valid = (u32) (data << 24);
3514 } else
3515 valid = data;
3516
3517 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
3518 DBG_PRINT(ERR_DBG,
3519 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
3520 DBG_PRINT(ERR_DBG,
3521 "write into the specified offset\n");
3522 return -EFAULT;
3523 }
3524 cnt++;
3525 len--;
3526 }
3527
3528 return 0;
3529}
3530
3531/**
3532 * s2io_register_test - reads and writes into all clock domains.
3533 * @sp : private member of the device structure, which is a pointer to the
3534 * s2io_nic structure.
3535 * @data : variable that returns the result of each of the test conducted b
3536 * by the driver.
3537 * Description:
3538 * Read and write into all clock domains. The NIC has 3 clock domains,
3539 * see that registers in all the three regions are accessible.
3540 * Return value:
3541 * 0 on success.
3542 */
3543
3544static int s2io_register_test(nic_t * sp, uint64_t * data)
3545{
3546 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3547 u64 val64 = 0;
3548 int fail = 0;
3549
3550 val64 = readq(&bar0->pcc_enable);
3551 if (val64 != 0xff00000000000000ULL) {
3552 fail = 1;
3553 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3554 }
3555
3556 val64 = readq(&bar0->rmac_pause_cfg);
3557 if (val64 != 0xc000ffff00000000ULL) {
3558 fail = 1;
3559 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
3560 }
3561
3562 val64 = readq(&bar0->rx_queue_cfg);
3563 if (val64 != 0x0808080808080808ULL) {
3564 fail = 1;
3565 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
3566 }
3567
3568 val64 = readq(&bar0->xgxs_efifo_cfg);
3569 if (val64 != 0x000000001923141EULL) {
3570 fail = 1;
3571 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
3572 }
3573
3574 val64 = 0x5A5A5A5A5A5A5A5AULL;
3575 writeq(val64, &bar0->xmsi_data);
3576 val64 = readq(&bar0->xmsi_data);
3577 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
3578 fail = 1;
3579 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
3580 }
3581
3582 val64 = 0xA5A5A5A5A5A5A5A5ULL;
3583 writeq(val64, &bar0->xmsi_data);
3584 val64 = readq(&bar0->xmsi_data);
3585 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
3586 fail = 1;
3587 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
3588 }
3589
3590 *data = fail;
3591 return 0;
3592}
3593
3594/**
3595 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3596 * @sp : private member of the device structure, which is a pointer to the
3597 * s2io_nic structure.
3598 * @data:variable that returns the result of each of the test conducted by
3599 * the driver.
3600 * Description:
3601 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
3602 * register.
3603 * Return value:
3604 * 0 on success.
3605 */
3606
3607static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
3608{
3609 int fail = 0;
3610 u32 ret_data;
3611
3612 /* Test Write Error at offset 0 */
3613 if (!write_eeprom(sp, 0, 0, 3))
3614 fail = 1;
3615
3616 /* Test Write at offset 4f0 */
3617 if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
3618 fail = 1;
3619 if (read_eeprom(sp, 0x4F0, &ret_data))
3620 fail = 1;
3621
3622 if (ret_data != 0x01234567)
3623 fail = 1;
3624
3625 /* Reset the EEPROM data go FFFF */
3626 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
3627
3628 /* Test Write Request Error at offset 0x7c */
3629 if (!write_eeprom(sp, 0x07C, 0, 3))
3630 fail = 1;
3631
3632 /* Test Write Request at offset 0x7fc */
3633 if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
3634 fail = 1;
3635 if (read_eeprom(sp, 0x7FC, &ret_data))
3636 fail = 1;
3637
3638 if (ret_data != 0x01234567)
3639 fail = 1;
3640
3641 /* Reset the EEPROM data go FFFF */
3642 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
3643
3644 /* Test Write Error at offset 0x80 */
3645 if (!write_eeprom(sp, 0x080, 0, 3))
3646 fail = 1;
3647
3648 /* Test Write Error at offset 0xfc */
3649 if (!write_eeprom(sp, 0x0FC, 0, 3))
3650 fail = 1;
3651
3652 /* Test Write Error at offset 0x100 */
3653 if (!write_eeprom(sp, 0x100, 0, 3))
3654 fail = 1;
3655
3656 /* Test Write Error at offset 4ec */
3657 if (!write_eeprom(sp, 0x4EC, 0, 3))
3658 fail = 1;
3659
3660 *data = fail;
3661 return 0;
3662}
3663
3664/**
3665 * s2io_bist_test - invokes the MemBist test of the card .
3666 * @sp : private member of the device structure, which is a pointer to the
3667 * s2io_nic structure.
3668 * @data:variable that returns the result of each of the test conducted by
3669 * the driver.
3670 * Description:
3671 * This invokes the MemBist test of the card. We give around
3672 * 2 secs time for the Test to complete. If it's still not complete
3673 * within this peiod, we consider that the test failed.
3674 * Return value:
3675 * 0 on success and -1 on failure.
3676 */
3677
3678static int s2io_bist_test(nic_t * sp, uint64_t * data)
3679{
3680 u8 bist = 0;
3681 int cnt = 0, ret = -1;
3682
3683 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3684 bist |= PCI_BIST_START;
3685 pci_write_config_word(sp->pdev, PCI_BIST, bist);
3686
3687 while (cnt < 20) {
3688 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3689 if (!(bist & PCI_BIST_START)) {
3690 *data = (bist & PCI_BIST_CODE_MASK);
3691 ret = 0;
3692 break;
3693 }
3694 msleep(100);
3695 cnt++;
3696 }
3697
3698 return ret;
3699}
3700
3701/**
3702 * s2io-link_test - verifies the link state of the nic
3703 * @sp ; private member of the device structure, which is a pointer to the
3704 * s2io_nic structure.
3705 * @data: variable that returns the result of each of the test conducted by
3706 * the driver.
3707 * Description:
3708 * The function verifies the link state of the NIC and updates the input
3709 * argument 'data' appropriately.
3710 * Return value:
3711 * 0 on success.
3712 */
3713
3714static int s2io_link_test(nic_t * sp, uint64_t * data)
3715{
3716 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3717 u64 val64;
3718
3719 val64 = readq(&bar0->adapter_status);
3720 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
3721 *data = 1;
3722
3723 return 0;
3724}
3725
3726/**
3727 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
3728 * @sp - private member of the device structure, which is a pointer to the
3729 * s2io_nic structure.
3730 * @data - variable that returns the result of each of the test
3731 * conducted by the driver.
3732 * Description:
3733 * This is one of the offline test that tests the read and write
3734 * access to the RldRam chip on the NIC.
3735 * Return value:
3736 * 0 on success.
3737 */
3738
3739static int s2io_rldram_test(nic_t * sp, uint64_t * data)
3740{
3741 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3742 u64 val64;
3743 int cnt, iteration = 0, test_pass = 0;
3744
3745 val64 = readq(&bar0->adapter_control);
3746 val64 &= ~ADAPTER_ECC_EN;
3747 writeq(val64, &bar0->adapter_control);
3748
3749 val64 = readq(&bar0->mc_rldram_test_ctrl);
3750 val64 |= MC_RLDRAM_TEST_MODE;
3751 writeq(val64, &bar0->mc_rldram_test_ctrl);
3752
3753 val64 = readq(&bar0->mc_rldram_mrs);
3754 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
3755 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3756
3757 val64 |= MC_RLDRAM_MRS_ENABLE;
3758 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3759
3760 while (iteration < 2) {
3761 val64 = 0x55555555aaaa0000ULL;
3762 if (iteration == 1) {
3763 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3764 }
3765 writeq(val64, &bar0->mc_rldram_test_d0);
3766
3767 val64 = 0xaaaa5a5555550000ULL;
3768 if (iteration == 1) {
3769 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3770 }
3771 writeq(val64, &bar0->mc_rldram_test_d1);
3772
3773 val64 = 0x55aaaaaaaa5a0000ULL;
3774 if (iteration == 1) {
3775 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3776 }
3777 writeq(val64, &bar0->mc_rldram_test_d2);
3778
3779 val64 = (u64) (0x0000003fffff0000ULL);
3780 writeq(val64, &bar0->mc_rldram_test_add);
3781
3782
3783 val64 = MC_RLDRAM_TEST_MODE;
3784 writeq(val64, &bar0->mc_rldram_test_ctrl);
3785
3786 val64 |=
3787 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
3788 MC_RLDRAM_TEST_GO;
3789 writeq(val64, &bar0->mc_rldram_test_ctrl);
3790
3791 for (cnt = 0; cnt < 5; cnt++) {
3792 val64 = readq(&bar0->mc_rldram_test_ctrl);
3793 if (val64 & MC_RLDRAM_TEST_DONE)
3794 break;
3795 msleep(200);
3796 }
3797
3798 if (cnt == 5)
3799 break;
3800
3801 val64 = MC_RLDRAM_TEST_MODE;
3802 writeq(val64, &bar0->mc_rldram_test_ctrl);
3803
3804 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
3805 writeq(val64, &bar0->mc_rldram_test_ctrl);
3806
3807 for (cnt = 0; cnt < 5; cnt++) {
3808 val64 = readq(&bar0->mc_rldram_test_ctrl);
3809 if (val64 & MC_RLDRAM_TEST_DONE)
3810 break;
3811 msleep(500);
3812 }
3813
3814 if (cnt == 5)
3815 break;
3816
3817 val64 = readq(&bar0->mc_rldram_test_ctrl);
3818 if (val64 & MC_RLDRAM_TEST_PASS)
3819 test_pass = 1;
3820
3821 iteration++;
3822 }
3823
3824 if (!test_pass)
3825 *data = 1;
3826 else
3827 *data = 0;
3828
3829 return 0;
3830}
3831
3832/**
3833 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
3834 * @sp : private member of the device structure, which is a pointer to the
3835 * s2io_nic structure.
3836 * @ethtest : pointer to a ethtool command specific structure that will be
3837 * returned to the user.
3838 * @data : variable that returns the result of each of the test
3839 * conducted by the driver.
3840 * Description:
3841 * This function conducts 6 tests ( 4 offline and 2 online) to determine
3842 * the health of the card.
3843 * Return value:
3844 * void
3845 */
3846
3847static void s2io_ethtool_test(struct net_device *dev,
3848 struct ethtool_test *ethtest,
3849 uint64_t * data)
3850{
3851 nic_t *sp = dev->priv;
3852 int orig_state = netif_running(sp->dev);
3853
3854 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
3855 /* Offline Tests. */
3856 if (orig_state) {
3857 s2io_close(sp->dev);
3858 s2io_set_swapper(sp);
3859 } else
3860 s2io_set_swapper(sp);
3861
3862 if (s2io_register_test(sp, &data[0]))
3863 ethtest->flags |= ETH_TEST_FL_FAILED;
3864
3865 s2io_reset(sp);
3866 s2io_set_swapper(sp);
3867
3868 if (s2io_rldram_test(sp, &data[3]))
3869 ethtest->flags |= ETH_TEST_FL_FAILED;
3870
3871 s2io_reset(sp);
3872 s2io_set_swapper(sp);
3873
3874 if (s2io_eeprom_test(sp, &data[1]))
3875 ethtest->flags |= ETH_TEST_FL_FAILED;
3876
3877 if (s2io_bist_test(sp, &data[4]))
3878 ethtest->flags |= ETH_TEST_FL_FAILED;
3879
3880 if (orig_state)
3881 s2io_open(sp->dev);
3882
3883 data[2] = 0;
3884 } else {
3885 /* Online Tests. */
3886 if (!orig_state) {
3887 DBG_PRINT(ERR_DBG,
3888 "%s: is not up, cannot run test\n",
3889 dev->name);
3890 data[0] = -1;
3891 data[1] = -1;
3892 data[2] = -1;
3893 data[3] = -1;
3894 data[4] = -1;
3895 }
3896
3897 if (s2io_link_test(sp, &data[2]))
3898 ethtest->flags |= ETH_TEST_FL_FAILED;
3899
3900 data[0] = 0;
3901 data[1] = 0;
3902 data[3] = 0;
3903 data[4] = 0;
3904 }
3905}
3906
3907static void s2io_get_ethtool_stats(struct net_device *dev,
3908 struct ethtool_stats *estats,
3909 u64 * tmp_stats)
3910{
3911 int i = 0;
3912 nic_t *sp = dev->priv;
3913 StatInfo_t *stat_info = sp->mac_control.stats_info;
3914
3915 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms);
3916 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets);
3917 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
3918 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms);
3919 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms);
3920 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
3921 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms);
3922 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
3923 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip);
3924 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip);
3925 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp);
3926 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp);
3927 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
3928 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp);
3929 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms);
3930 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets);
3931 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
3932 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
3933 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms);
3934 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms);
3935 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
3936 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
3937 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
3938 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms);
3939 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms);
3940 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms);
3941 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms);
3942 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms);
3943 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip);
3944 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
3945 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
3946 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip);
3947 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp);
3948 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
3949 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp);
3950 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp);
3951 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt);
3952 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip);
3953 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
3954}
3955
3956static int s2io_ethtool_get_regs_len(struct net_device *dev)
3957{
3958 return (XENA_REG_SPACE);
3959}
3960
3961
3962static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
3963{
3964 nic_t *sp = dev->priv;
3965
3966 return (sp->rx_csum);
3967}
3968
3969static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
3970{
3971 nic_t *sp = dev->priv;
3972
3973 if (data)
3974 sp->rx_csum = 1;
3975 else
3976 sp->rx_csum = 0;
3977
3978 return 0;
3979}
3980
3981static int s2io_get_eeprom_len(struct net_device *dev)
3982{
3983 return (XENA_EEPROM_SPACE);
3984}
3985
3986static int s2io_ethtool_self_test_count(struct net_device *dev)
3987{
3988 return (S2IO_TEST_LEN);
3989}
3990
3991static void s2io_ethtool_get_strings(struct net_device *dev,
3992 u32 stringset, u8 * data)
3993{
3994 switch (stringset) {
3995 case ETH_SS_TEST:
3996 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
3997 break;
3998 case ETH_SS_STATS:
3999 memcpy(data, &ethtool_stats_keys,
4000 sizeof(ethtool_stats_keys));
4001 }
4002}
4003
4004static int s2io_ethtool_get_stats_count(struct net_device *dev)
4005{
4006 return (S2IO_STAT_LEN);
4007}
4008
4009static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4010{
4011 if (data)
4012 dev->features |= NETIF_F_IP_CSUM;
4013 else
4014 dev->features &= ~NETIF_F_IP_CSUM;
4015
4016 return 0;
4017}
4018
4019
4020static struct ethtool_ops netdev_ethtool_ops = {
4021 .get_settings = s2io_ethtool_gset,
4022 .set_settings = s2io_ethtool_sset,
4023 .get_drvinfo = s2io_ethtool_gdrvinfo,
4024 .get_regs_len = s2io_ethtool_get_regs_len,
4025 .get_regs = s2io_ethtool_gregs,
4026 .get_link = ethtool_op_get_link,
4027 .get_eeprom_len = s2io_get_eeprom_len,
4028 .get_eeprom = s2io_ethtool_geeprom,
4029 .set_eeprom = s2io_ethtool_seeprom,
4030 .get_pauseparam = s2io_ethtool_getpause_data,
4031 .set_pauseparam = s2io_ethtool_setpause_data,
4032 .get_rx_csum = s2io_ethtool_get_rx_csum,
4033 .set_rx_csum = s2io_ethtool_set_rx_csum,
4034 .get_tx_csum = ethtool_op_get_tx_csum,
4035 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4036 .get_sg = ethtool_op_get_sg,
4037 .set_sg = ethtool_op_set_sg,
4038#ifdef NETIF_F_TSO
4039 .get_tso = ethtool_op_get_tso,
4040 .set_tso = ethtool_op_set_tso,
4041#endif
4042 .self_test_count = s2io_ethtool_self_test_count,
4043 .self_test = s2io_ethtool_test,
4044 .get_strings = s2io_ethtool_get_strings,
4045 .phys_id = s2io_ethtool_idnic,
4046 .get_stats_count = s2io_ethtool_get_stats_count,
4047 .get_ethtool_stats = s2io_get_ethtool_stats
4048};
4049
4050/**
4051 * s2io_ioctl - Entry point for the Ioctl
4052 * @dev : Device pointer.
4053 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4054 * a proprietary structure used to pass information to the driver.
4055 * @cmd : This is used to distinguish between the different commands that
4056 * can be passed to the IOCTL functions.
4057 * Description:
4058 * This function has support for ethtool, adding multiple MAC addresses on
4059 * the NIC and some DBG commands for the util tool.
4060 * Return value:
4061 * Currently the IOCTL supports no operations, hence by default this
4062 * function returns OP NOT SUPPORTED value.
4063 */
4064
4065static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4066{
4067 return -EOPNOTSUPP;
4068}
4069
4070/**
4071 * s2io_change_mtu - entry point to change MTU size for the device.
4072 * @dev : device pointer.
4073 * @new_mtu : the new MTU size for the device.
4074 * Description: A driver entry point to change MTU size for the device.
4075 * Before changing the MTU the device must be stopped.
4076 * Return value:
4077 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4078 * file on failure.
4079 */
4080
4081static int s2io_change_mtu(struct net_device *dev, int new_mtu)
4082{
4083 nic_t *sp = dev->priv;
4084 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4085 register u64 val64;
4086
4087 if (netif_running(dev)) {
4088 DBG_PRINT(ERR_DBG, "%s: Must be stopped to ", dev->name);
4089 DBG_PRINT(ERR_DBG, "change its MTU \n");
4090 return -EBUSY;
4091 }
4092
4093 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4094 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4095 dev->name);
4096 return -EPERM;
4097 }
4098
4099 /* Set the new MTU into the PYLD register of the NIC */
4100 val64 = new_mtu;
4101 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4102
4103 dev->mtu = new_mtu;
4104
4105 return 0;
4106}
4107
4108/**
4109 * s2io_tasklet - Bottom half of the ISR.
4110 * @dev_adr : address of the device structure in dma_addr_t format.
4111 * Description:
4112 * This is the tasklet or the bottom half of the ISR. This is
4113 * an extension of the ISR which is scheduled by the scheduler to be run
4114 * when the load on the CPU is low. All low priority tasks of the ISR can
4115 * be pushed into the tasklet. For now the tasklet is used only to
4116 * replenish the Rx buffers in the Rx buffer descriptors.
4117 * Return value:
4118 * void.
4119 */
4120
4121static void s2io_tasklet(unsigned long dev_addr)
4122{
4123 struct net_device *dev = (struct net_device *) dev_addr;
4124 nic_t *sp = dev->priv;
4125 int i, ret;
4126 mac_info_t *mac_control;
4127 struct config_param *config;
4128
4129 mac_control = &sp->mac_control;
4130 config = &sp->config;
4131
4132 if (!TASKLET_IN_USE) {
4133 for (i = 0; i < config->rx_ring_num; i++) {
4134 ret = fill_rx_buffers(sp, i);
4135 if (ret == -ENOMEM) {
4136 DBG_PRINT(ERR_DBG, "%s: Out of ",
4137 dev->name);
4138 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4139 break;
4140 } else if (ret == -EFILL) {
4141 DBG_PRINT(ERR_DBG,
4142 "%s: Rx Ring %d is full\n",
4143 dev->name, i);
4144 break;
4145 }
4146 }
4147 clear_bit(0, (&sp->tasklet_status));
4148 }
4149}
4150
4151/**
4152 * s2io_set_link - Set the LInk status
4153 * @data: long pointer to device private structue
4154 * Description: Sets the link status for the adapter
4155 */
4156
4157static void s2io_set_link(unsigned long data)
4158{
4159 nic_t *nic = (nic_t *) data;
4160 struct net_device *dev = nic->dev;
4161 XENA_dev_config_t __iomem *bar0 = nic->bar0;
4162 register u64 val64;
4163 u16 subid;
4164
4165 if (test_and_set_bit(0, &(nic->link_state))) {
4166 /* The card is being reset, no point doing anything */
4167 return;
4168 }
4169
4170 subid = nic->pdev->subsystem_device;
4171 /*
4172 * Allow a small delay for the NICs self initiated
4173 * cleanup to complete.
4174 */
4175 msleep(100);
4176
4177 val64 = readq(&bar0->adapter_status);
4178 if (verify_xena_quiescence(val64, nic->device_enabled_once)) {
4179 if (LINK_IS_UP(val64)) {
4180 val64 = readq(&bar0->adapter_control);
4181 val64 |= ADAPTER_CNTL_EN;
4182 writeq(val64, &bar0->adapter_control);
4183 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4184 val64 = readq(&bar0->gpio_control);
4185 val64 |= GPIO_CTRL_GPIO_0;
4186 writeq(val64, &bar0->gpio_control);
4187 val64 = readq(&bar0->gpio_control);
4188 } else {
4189 val64 |= ADAPTER_LED_ON;
4190 writeq(val64, &bar0->adapter_control);
4191 }
4192 val64 = readq(&bar0->adapter_status);
4193 if (!LINK_IS_UP(val64)) {
4194 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4195 DBG_PRINT(ERR_DBG, " Link down");
4196 DBG_PRINT(ERR_DBG, "after ");
4197 DBG_PRINT(ERR_DBG, "enabling ");
4198 DBG_PRINT(ERR_DBG, "device \n");
4199 }
4200 if (nic->device_enabled_once == FALSE) {
4201 nic->device_enabled_once = TRUE;
4202 }
4203 s2io_link(nic, LINK_UP);
4204 } else {
4205 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4206 val64 = readq(&bar0->gpio_control);
4207 val64 &= ~GPIO_CTRL_GPIO_0;
4208 writeq(val64, &bar0->gpio_control);
4209 val64 = readq(&bar0->gpio_control);
4210 }
4211 s2io_link(nic, LINK_DOWN);
4212 }
4213 } else { /* NIC is not Quiescent. */
4214 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4215 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4216 netif_stop_queue(dev);
4217 }
4218 clear_bit(0, &(nic->link_state));
4219}
4220
4221static void s2io_card_down(nic_t * sp)
4222{
4223 int cnt = 0;
4224 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4225 unsigned long flags;
4226 register u64 val64 = 0;
4227
4228 /* If s2io_set_link task is executing, wait till it completes. */
4229 while (test_and_set_bit(0, &(sp->link_state)))
4230 msleep(50);
4231 atomic_set(&sp->card_state, CARD_DOWN);
4232
4233 /* disable Tx and Rx traffic on the NIC */
4234 stop_nic(sp);
4235
4236 /* Kill tasklet. */
4237 tasklet_kill(&sp->task);
4238
4239 /* Check if the device is Quiescent and then Reset the NIC */
4240 do {
4241 val64 = readq(&bar0->adapter_status);
4242 if (verify_xena_quiescence(val64, sp->device_enabled_once)) {
4243 break;
4244 }
4245
4246 msleep(50);
4247 cnt++;
4248 if (cnt == 10) {
4249 DBG_PRINT(ERR_DBG,
4250 "s2io_close:Device not Quiescent ");
4251 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4252 (unsigned long long) val64);
4253 break;
4254 }
4255 } while (1);
4256 spin_lock_irqsave(&sp->tx_lock, flags);
4257 s2io_reset(sp);
4258
4259 /* Free all unused Tx and Rx buffers */
4260 free_tx_buffers(sp);
4261 free_rx_buffers(sp);
4262
4263 spin_unlock_irqrestore(&sp->tx_lock, flags);
4264 clear_bit(0, &(sp->link_state));
4265}
4266
4267static int s2io_card_up(nic_t * sp)
4268{
4269 int i, ret;
4270 mac_info_t *mac_control;
4271 struct config_param *config;
4272 struct net_device *dev = (struct net_device *) sp->dev;
4273
4274 /* Initialize the H/W I/O registers */
4275 if (init_nic(sp) != 0) {
4276 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4277 dev->name);
4278 return -ENODEV;
4279 }
4280
4281 /*
4282 * Initializing the Rx buffers. For now we are considering only 1
4283 * Rx ring and initializing buffers into 30 Rx blocks
4284 */
4285 mac_control = &sp->mac_control;
4286 config = &sp->config;
4287
4288 for (i = 0; i < config->rx_ring_num; i++) {
4289 if ((ret = fill_rx_buffers(sp, i))) {
4290 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4291 dev->name);
4292 s2io_reset(sp);
4293 free_rx_buffers(sp);
4294 return -ENOMEM;
4295 }
4296 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4297 atomic_read(&sp->rx_bufs_left[i]));
4298 }
4299
4300 /* Setting its receive mode */
4301 s2io_set_multicast(dev);
4302
4303 /* Enable tasklet for the device */
4304 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4305
4306 /* Enable Rx Traffic and interrupts on the NIC */
4307 if (start_nic(sp)) {
4308 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4309 tasklet_kill(&sp->task);
4310 s2io_reset(sp);
4311 free_irq(dev->irq, dev);
4312 free_rx_buffers(sp);
4313 return -ENODEV;
4314 }
4315
4316 atomic_set(&sp->card_state, CARD_UP);
4317 return 0;
4318}
4319
4320/**
4321 * s2io_restart_nic - Resets the NIC.
4322 * @data : long pointer to the device private structure
4323 * Description:
4324 * This function is scheduled to be run by the s2io_tx_watchdog
4325 * function after 0.5 secs to reset the NIC. The idea is to reduce
4326 * the run time of the watch dog routine which is run holding a
4327 * spin lock.
4328 */
4329
4330static void s2io_restart_nic(unsigned long data)
4331{
4332 struct net_device *dev = (struct net_device *) data;
4333 nic_t *sp = dev->priv;
4334
4335 s2io_card_down(sp);
4336 if (s2io_card_up(sp)) {
4337 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4338 dev->name);
4339 }
4340 netif_wake_queue(dev);
4341 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4342 dev->name);
4343}
4344
4345/**
4346 * s2io_tx_watchdog - Watchdog for transmit side.
4347 * @dev : Pointer to net device structure
4348 * Description:
4349 * This function is triggered if the Tx Queue is stopped
4350 * for a pre-defined amount of time when the Interface is still up.
4351 * If the Interface is jammed in such a situation, the hardware is
4352 * reset (by s2io_close) and restarted again (by s2io_open) to
4353 * overcome any problem that might have been caused in the hardware.
4354 * Return value:
4355 * void
4356 */
4357
4358static void s2io_tx_watchdog(struct net_device *dev)
4359{
4360 nic_t *sp = dev->priv;
4361
4362 if (netif_carrier_ok(dev)) {
4363 schedule_work(&sp->rst_timer_task);
4364 }
4365}
4366
4367/**
4368 * rx_osm_handler - To perform some OS related operations on SKB.
4369 * @sp: private member of the device structure,pointer to s2io_nic structure.
4370 * @skb : the socket buffer pointer.
4371 * @len : length of the packet
4372 * @cksum : FCS checksum of the frame.
4373 * @ring_no : the ring from which this RxD was extracted.
4374 * Description:
4375 * This function is called by the Tx interrupt serivce routine to perform
4376 * some OS related operations on the SKB before passing it to the upper
4377 * layers. It mainly checks if the checksum is OK, if so adds it to the
4378 * SKBs cksum variable, increments the Rx packet count and passes the SKB
4379 * to the upper layer. If the checksum is wrong, it increments the Rx
4380 * packet error count, frees the SKB and returns error.
4381 * Return value:
4382 * SUCCESS on success and -1 on failure.
4383 */
4384#ifndef CONFIG_2BUFF_MODE
4385static int rx_osm_handler(nic_t * sp, u16 len, RxD_t * rxdp, int ring_no)
4386#else
4387static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
4388 buffAdd_t * ba)
4389#endif
4390{
4391 struct net_device *dev = (struct net_device *) sp->dev;
4392 struct sk_buff *skb =
4393 (struct sk_buff *) ((unsigned long) rxdp->Host_Control);
4394 u16 l3_csum, l4_csum;
4395#ifdef CONFIG_2BUFF_MODE
4396 int buf0_len, buf2_len;
4397 unsigned char *buff;
4398#endif
4399
4400 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4401 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && (sp->rx_csum)) {
4402 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4403 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4404 /*
4405 * NIC verifies if the Checksum of the received
4406 * frame is Ok or not and accordingly returns
4407 * a flag in the RxD.
4408 */
4409 skb->ip_summed = CHECKSUM_UNNECESSARY;
4410 } else {
4411 /*
4412 * Packet with erroneous checksum, let the
4413 * upper layers deal with it.
4414 */
4415 skb->ip_summed = CHECKSUM_NONE;
4416 }
4417 } else {
4418 skb->ip_summed = CHECKSUM_NONE;
4419 }
4420
4421 if (rxdp->Control_1 & RXD_T_CODE) {
4422 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4423 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4424 dev->name, err);
4425 }
4426#ifdef CONFIG_2BUFF_MODE
4427 buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4428 buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4429#endif
4430
4431 skb->dev = dev;
4432#ifndef CONFIG_2BUFF_MODE
4433 skb_put(skb, len);
4434 skb->protocol = eth_type_trans(skb, dev);
4435#else
4436 buff = skb_push(skb, buf0_len);
4437 memcpy(buff, ba->ba_0, buf0_len);
4438 skb_put(skb, buf2_len);
4439 skb->protocol = eth_type_trans(skb, dev);
4440#endif
4441
4442#ifdef CONFIG_S2IO_NAPI
4443 netif_receive_skb(skb);
4444#else
4445 netif_rx(skb);
4446#endif
4447
4448 dev->last_rx = jiffies;
4449 sp->rx_pkt_count++;
4450 sp->stats.rx_packets++;
4451#ifndef CONFIG_2BUFF_MODE
4452 sp->stats.rx_bytes += len;
4453#else
4454 sp->stats.rx_bytes += buf0_len + buf2_len;
4455#endif
4456
4457 atomic_dec(&sp->rx_bufs_left[ring_no]);
4458 rxdp->Host_Control = 0;
4459 return SUCCESS;
4460}
4461
4462/**
4463 * s2io_link - stops/starts the Tx queue.
4464 * @sp : private member of the device structure, which is a pointer to the
4465 * s2io_nic structure.
4466 * @link : inidicates whether link is UP/DOWN.
4467 * Description:
4468 * This function stops/starts the Tx queue depending on whether the link
4469 * status of the NIC is is down or up. This is called by the Alarm
4470 * interrupt handler whenever a link change interrupt comes up.
4471 * Return value:
4472 * void.
4473 */
4474
4475static void s2io_link(nic_t * sp, int link)
4476{
4477 struct net_device *dev = (struct net_device *) sp->dev;
4478
4479 if (link != sp->last_link_state) {
4480 if (link == LINK_DOWN) {
4481 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
4482 netif_carrier_off(dev);
4483 } else {
4484 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
4485 netif_carrier_on(dev);
4486 }
4487 }
4488 sp->last_link_state = link;
4489}
4490
4491/**
4492 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
4493 * @sp : private member of the device structure, which is a pointer to the
4494 * s2io_nic structure.
4495 * Description:
4496 * This function initializes a few of the PCI and PCI-X configuration registers
4497 * with recommended values.
4498 * Return value:
4499 * void
4500 */
4501
4502static void s2io_init_pci(nic_t * sp)
4503{
4504 u16 pci_cmd = 0;
4505
4506 /* Enable Data Parity Error Recovery in PCI-X command register. */
4507 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4508 &(sp->pcix_cmd));
4509 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4510 (sp->pcix_cmd | 1));
4511 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4512 &(sp->pcix_cmd));
4513
4514 /* Set the PErr Response bit in PCI command register. */
4515 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4516 pci_write_config_word(sp->pdev, PCI_COMMAND,
4517 (pci_cmd | PCI_COMMAND_PARITY));
4518 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4519
4520 /* Set MMRB count to 1024 in PCI-X Command register. */
4521 sp->pcix_cmd &= 0xFFF3;
4522 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, (sp->pcix_cmd | (0x1 << 2))); /* MMRBC 1K */
4523 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4524 &(sp->pcix_cmd));
4525
4526 /* Setting Maximum outstanding splits based on system type. */
4527 sp->pcix_cmd &= 0xFF8F;
4528
4529 sp->pcix_cmd |= XENA_MAX_OUTSTANDING_SPLITS(0x1); /* 2 splits. */
4530 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4531 sp->pcix_cmd);
4532 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4533 &(sp->pcix_cmd));
4534 /* Forcibly disabling relaxed ordering capability of the card. */
4535 sp->pcix_cmd &= 0xfffd;
4536 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4537 sp->pcix_cmd);
4538 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4539 &(sp->pcix_cmd));
4540}
4541
4542MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
4543MODULE_LICENSE("GPL");
4544module_param(tx_fifo_num, int, 0);
4545module_param_array(tx_fifo_len, int, NULL, 0);
4546module_param(rx_ring_num, int, 0);
4547module_param_array(rx_ring_sz, int, NULL, 0);
4548module_param(Stats_refresh_time, int, 0);
4549module_param(rmac_pause_time, int, 0);
4550module_param(mc_pause_threshold_q0q3, int, 0);
4551module_param(mc_pause_threshold_q4q7, int, 0);
4552module_param(shared_splits, int, 0);
4553module_param(tmac_util_period, int, 0);
4554module_param(rmac_util_period, int, 0);
4555#ifndef CONFIG_S2IO_NAPI
4556module_param(indicate_max_pkts, int, 0);
4557#endif
4558/**
4559 * s2io_init_nic - Initialization of the adapter .
4560 * @pdev : structure containing the PCI related information of the device.
4561 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
4562 * Description:
4563 * The function initializes an adapter identified by the pci_dec structure.
4564 * All OS related initialization including memory and device structure and
4565 * initlaization of the device private variable is done. Also the swapper
4566 * control register is initialized to enable read and write into the I/O
4567 * registers of the device.
4568 * Return value:
4569 * returns 0 on success and negative on failure.
4570 */
4571
4572static int __devinit
4573s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4574{
4575 nic_t *sp;
4576 struct net_device *dev;
4577 char *dev_name = "S2IO 10GE NIC";
4578 int i, j, ret;
4579 int dma_flag = FALSE;
4580 u32 mac_up, mac_down;
4581 u64 val64 = 0, tmp64 = 0;
4582 XENA_dev_config_t __iomem *bar0 = NULL;
4583 u16 subid;
4584 mac_info_t *mac_control;
4585 struct config_param *config;
4586
4587
4588 DBG_PRINT(ERR_DBG, "Loading S2IO driver with %s\n",
4589 s2io_driver_version);
4590
4591 if ((ret = pci_enable_device(pdev))) {
4592 DBG_PRINT(ERR_DBG,
4593 "s2io_init_nic: pci_enable_device failed\n");
4594 return ret;
4595 }
4596
1e7f0bd8 4597 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1da177e4
LT
4598 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
4599 dma_flag = TRUE;
4600
4601 if (pci_set_consistent_dma_mask
1e7f0bd8 4602 (pdev, DMA_64BIT_MASK)) {
1da177e4
LT
4603 DBG_PRINT(ERR_DBG,
4604 "Unable to obtain 64bit DMA for \
4605 consistent allocations\n");
4606 pci_disable_device(pdev);
4607 return -ENOMEM;
4608 }
1e7f0bd8 4609 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
1da177e4
LT
4610 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
4611 } else {
4612 pci_disable_device(pdev);
4613 return -ENOMEM;
4614 }
4615
4616 if (pci_request_regions(pdev, s2io_driver_name)) {
4617 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
4618 pci_disable_device(pdev);
4619 return -ENODEV;
4620 }
4621
4622 dev = alloc_etherdev(sizeof(nic_t));
4623 if (dev == NULL) {
4624 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
4625 pci_disable_device(pdev);
4626 pci_release_regions(pdev);
4627 return -ENODEV;
4628 }
4629
4630 pci_set_master(pdev);
4631 pci_set_drvdata(pdev, dev);
4632 SET_MODULE_OWNER(dev);
4633 SET_NETDEV_DEV(dev, &pdev->dev);
4634
4635 /* Private member variable initialized to s2io NIC structure */
4636 sp = dev->priv;
4637 memset(sp, 0, sizeof(nic_t));
4638 sp->dev = dev;
4639 sp->pdev = pdev;
4640 sp->vendor_id = pdev->vendor;
4641 sp->device_id = pdev->device;
4642 sp->high_dma_flag = dma_flag;
4643 sp->irq = pdev->irq;
4644 sp->device_enabled_once = FALSE;
4645 strcpy(sp->name, dev_name);
4646
4647 /* Initialize some PCI/PCI-X fields of the NIC. */
4648 s2io_init_pci(sp);
4649
4650 /*
4651 * Setting the device configuration parameters.
4652 * Most of these parameters can be specified by the user during
4653 * module insertion as they are module loadable parameters. If
4654 * these parameters are not not specified during load time, they
4655 * are initialized with default values.
4656 */
4657 mac_control = &sp->mac_control;
4658 config = &sp->config;
4659
4660 /* Tx side parameters. */
4661 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
4662 config->tx_fifo_num = tx_fifo_num;
4663 for (i = 0; i < MAX_TX_FIFOS; i++) {
4664 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
4665 config->tx_cfg[i].fifo_priority = i;
4666 }
4667
4668 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
4669 for (i = 0; i < config->tx_fifo_num; i++) {
4670 config->tx_cfg[i].f_no_snoop =
4671 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
4672 if (config->tx_cfg[i].fifo_len < 65) {
4673 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
4674 break;
4675 }
4676 }
4677 config->max_txds = MAX_SKB_FRAGS;
4678
4679 /* Rx side parameters. */
4680 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
4681 config->rx_ring_num = rx_ring_num;
4682 for (i = 0; i < MAX_RX_RINGS; i++) {
4683 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
4684 (MAX_RXDS_PER_BLOCK + 1);
4685 config->rx_cfg[i].ring_priority = i;
4686 }
4687
4688 for (i = 0; i < rx_ring_num; i++) {
4689 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
4690 config->rx_cfg[i].f_no_snoop =
4691 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
4692 }
4693
4694 /* Setting Mac Control parameters */
4695 mac_control->rmac_pause_time = rmac_pause_time;
4696 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
4697 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
4698
4699
4700 /* Initialize Ring buffer parameters. */
4701 for (i = 0; i < config->rx_ring_num; i++)
4702 atomic_set(&sp->rx_bufs_left[i], 0);
4703
4704 /* initialize the shared memory used by the NIC and the host */
4705 if (init_shared_mem(sp)) {
4706 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
4707 dev->name);
4708 ret = -ENOMEM;
4709 goto mem_alloc_failed;
4710 }
4711
4712 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
4713 pci_resource_len(pdev, 0));
4714 if (!sp->bar0) {
4715 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
4716 dev->name);
4717 ret = -ENOMEM;
4718 goto bar0_remap_failed;
4719 }
4720
4721 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
4722 pci_resource_len(pdev, 2));
4723 if (!sp->bar1) {
4724 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
4725 dev->name);
4726 ret = -ENOMEM;
4727 goto bar1_remap_failed;
4728 }
4729
4730 dev->irq = pdev->irq;
4731 dev->base_addr = (unsigned long) sp->bar0;
4732
4733 /* Initializing the BAR1 address as the start of the FIFO pointer. */
4734 for (j = 0; j < MAX_TX_FIFOS; j++) {
4735 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
4736 (sp->bar1 + (j * 0x00020000));
4737 }
4738
4739 /* Driver entry points */
4740 dev->open = &s2io_open;
4741 dev->stop = &s2io_close;
4742 dev->hard_start_xmit = &s2io_xmit;
4743 dev->get_stats = &s2io_get_stats;
4744 dev->set_multicast_list = &s2io_set_multicast;
4745 dev->do_ioctl = &s2io_ioctl;
4746 dev->change_mtu = &s2io_change_mtu;
4747 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
4748 /*
4749 * will use eth_mac_addr() for dev->set_mac_address
4750 * mac address will be set every time dev->open() is called
4751 */
4752#ifdef CONFIG_S2IO_NAPI
4753 dev->poll = s2io_poll;
4754 dev->weight = 90;
4755#endif
4756
4757 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
4758 if (sp->high_dma_flag == TRUE)
4759 dev->features |= NETIF_F_HIGHDMA;
4760#ifdef NETIF_F_TSO
4761 dev->features |= NETIF_F_TSO;
4762#endif
4763
4764 dev->tx_timeout = &s2io_tx_watchdog;
4765 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
4766 INIT_WORK(&sp->rst_timer_task,
4767 (void (*)(void *)) s2io_restart_nic, dev);
4768 INIT_WORK(&sp->set_link_task,
4769 (void (*)(void *)) s2io_set_link, sp);
4770
4771 pci_save_state(sp->pdev);
4772
4773 /* Setting swapper control on the NIC, for proper reset operation */
4774 if (s2io_set_swapper(sp)) {
4775 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
4776 dev->name);
4777 ret = -EAGAIN;
4778 goto set_swap_failed;
4779 }
4780
4781 /* Fix for all "FFs" MAC address problems observed on Alpha platforms */
4782 fix_mac_address(sp);
4783 s2io_reset(sp);
4784
4785 /*
4786 * Setting swapper control on the NIC, so the MAC address can be read.
4787 */
4788 if (s2io_set_swapper(sp)) {
4789 DBG_PRINT(ERR_DBG,
4790 "%s: S2IO: swapper settings are wrong\n",
4791 dev->name);
4792 ret = -EAGAIN;
4793 goto set_swap_failed;
4794 }
4795
4796 /*
4797 * MAC address initialization.
4798 * For now only one mac address will be read and used.
4799 */
4800 bar0 = sp->bar0;
4801 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4802 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
4803 writeq(val64, &bar0->rmac_addr_cmd_mem);
4804 wait_for_cmd_complete(sp);
4805
4806 tmp64 = readq(&bar0->rmac_addr_data0_mem);
4807 mac_down = (u32) tmp64;
4808 mac_up = (u32) (tmp64 >> 32);
4809
4810 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4811
4812 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
4813 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
4814 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
4815 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
4816 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
4817 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
4818
4819 DBG_PRINT(INIT_DBG,
4820 "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
4821 sp->def_mac_addr[0].mac_addr[0],
4822 sp->def_mac_addr[0].mac_addr[1],
4823 sp->def_mac_addr[0].mac_addr[2],
4824 sp->def_mac_addr[0].mac_addr[3],
4825 sp->def_mac_addr[0].mac_addr[4],
4826 sp->def_mac_addr[0].mac_addr[5]);
4827
4828 /* Set the factory defined MAC address initially */
4829 dev->addr_len = ETH_ALEN;
4830 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
4831
4832 /*
4833 * Initialize the tasklet status and link state flags
4834 * and the card statte parameter
4835 */
4836 atomic_set(&(sp->card_state), 0);
4837 sp->tasklet_status = 0;
4838 sp->link_state = 0;
4839
4840
4841 /* Initialize spinlocks */
4842 spin_lock_init(&sp->tx_lock);
4843#ifndef CONFIG_S2IO_NAPI
4844 spin_lock_init(&sp->put_lock);
4845#endif
4846
4847 /*
4848 * SXE-002: Configure link and activity LED to init state
4849 * on driver load.
4850 */
4851 subid = sp->pdev->subsystem_device;
4852 if ((subid & 0xFF) >= 0x07) {
4853 val64 = readq(&bar0->gpio_control);
4854 val64 |= 0x0000800000000000ULL;
4855 writeq(val64, &bar0->gpio_control);
4856 val64 = 0x0411040400000000ULL;
4857 writeq(val64, (void __iomem *) bar0 + 0x2700);
4858 val64 = readq(&bar0->gpio_control);
4859 }
4860
4861 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
4862
4863 if (register_netdev(dev)) {
4864 DBG_PRINT(ERR_DBG, "Device registration failed\n");
4865 ret = -ENODEV;
4866 goto register_failed;
4867 }
4868
4869 /*
4870 * Make Link state as off at this point, when the Link change
4871 * interrupt comes the state will be automatically changed to
4872 * the right state.
4873 */
4874 netif_carrier_off(dev);
4875 sp->last_link_state = LINK_DOWN;
4876
4877 return 0;
4878
4879 register_failed:
4880 set_swap_failed:
4881 iounmap(sp->bar1);
4882 bar1_remap_failed:
4883 iounmap(sp->bar0);
4884 bar0_remap_failed:
4885 mem_alloc_failed:
4886 free_shared_mem(sp);
4887 pci_disable_device(pdev);
4888 pci_release_regions(pdev);
4889 pci_set_drvdata(pdev, NULL);
4890 free_netdev(dev);
4891
4892 return ret;
4893}
4894
4895/**
4896 * s2io_rem_nic - Free the PCI device
4897 * @pdev: structure containing the PCI related information of the device.
4898 * Description: This function is called by the Pci subsystem to release a
4899 * PCI device and free up all resource held up by the device. This could
4900 * be in response to a Hot plug event or when the driver is to be removed
4901 * from memory.
4902 */
4903
4904static void __devexit s2io_rem_nic(struct pci_dev *pdev)
4905{
4906 struct net_device *dev =
4907 (struct net_device *) pci_get_drvdata(pdev);
4908 nic_t *sp;
4909
4910 if (dev == NULL) {
4911 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
4912 return;
4913 }
4914
4915 sp = dev->priv;
4916 unregister_netdev(dev);
4917
4918 free_shared_mem(sp);
4919 iounmap(sp->bar0);
4920 iounmap(sp->bar1);
4921 pci_disable_device(pdev);
4922 pci_release_regions(pdev);
4923 pci_set_drvdata(pdev, NULL);
4924
4925 free_netdev(dev);
4926}
4927
4928/**
4929 * s2io_starter - Entry point for the driver
4930 * Description: This function is the entry point for the driver. It verifies
4931 * the module loadable parameters and initializes PCI configuration space.
4932 */
4933
4934int __init s2io_starter(void)
4935{
4936 return pci_module_init(&s2io_driver);
4937}
4938
4939/**
4940 * s2io_closer - Cleanup routine for the driver
4941 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
4942 */
4943
4944static void s2io_closer(void)
4945{
4946 pci_unregister_driver(&s2io_driver);
4947 DBG_PRINT(INIT_DBG, "cleanup done\n");
4948}
4949
4950module_init(s2io_starter);
4951module_exit(s2io_closer);