1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
31 * rx_ring_sz: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO.
36 ************************************************************************/
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
58 #include <linux/if_vlan.h>
60 #include <asm/system.h>
61 #include <asm/uaccess.h>
66 #include "s2io-regs.h"
68 /* S2io Driver name & version. */
69 static char s2io_driver_name[] = "Neterion";
70 static char s2io_driver_version[] = "Version 2.0.9.1";
72 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
76 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
77 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
83 * Cards with following subsystem_id have a link state indication
84 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
85 * macro below identifies these cards given the subsystem_id.
87 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
88 (dev_type == XFRAME_I_DEVICE) ? \
89 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
90 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
92 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
93 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
94 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
97 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
100 mac_info_t *mac_control;
102 mac_control = &sp->mac_control;
103 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
105 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
113 /* Ethtool related variables and Macros. */
114 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
115 "Register test\t(offline)",
116 "Eeprom test\t(offline)",
117 "Link test\t(online)",
118 "RLDRAM test\t(offline)",
119 "BIST Test\t(offline)"
122 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
124 {"tmac_data_octets"},
128 {"tmac_pause_ctrl_frms"},
129 {"tmac_any_err_frms"},
130 {"tmac_vld_ip_octets"},
138 {"rmac_data_octets"},
139 {"rmac_fcs_err_frms"},
141 {"rmac_vld_mcst_frms"},
142 {"rmac_vld_bcst_frms"},
143 {"rmac_in_rng_len_err_frms"},
145 {"rmac_pause_ctrl_frms"},
146 {"rmac_discarded_frms"},
147 {"rmac_usized_frms"},
148 {"rmac_osized_frms"},
150 {"rmac_jabber_frms"},
158 {"rmac_err_drp_udp"},
160 {"rmac_accepted_ip"},
162 {"\n DRIVER STATISTICS"},
163 {"single_bit_ecc_errs"},
164 {"double_bit_ecc_errs"},
167 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
168 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
170 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
171 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
173 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
174 init_timer(&timer); \
175 timer.function = handle; \
176 timer.data = (unsigned long) arg; \
177 mod_timer(&timer, (jiffies + exp)) \
180 static void s2io_vlan_rx_register(struct net_device *dev,
181 struct vlan_group *grp)
183 nic_t *nic = dev->priv;
186 spin_lock_irqsave(&nic->tx_lock, flags);
188 spin_unlock_irqrestore(&nic->tx_lock, flags);
191 /* Unregister the vlan */
192 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
194 nic_t *nic = dev->priv;
197 spin_lock_irqsave(&nic->tx_lock, flags);
199 nic->vlgrp->vlan_devices[vid] = NULL;
200 spin_unlock_irqrestore(&nic->tx_lock, flags);
204 * Constants to be programmed into the Xena's registers, to configure
208 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
211 static u64 herc_act_dtx_cfg[] = {
213 0x8000051536750000ULL, 0x80000515367500E0ULL,
215 0x8000051536750004ULL, 0x80000515367500E4ULL,
217 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
219 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
221 0x801205150D440000ULL, 0x801205150D4400E0ULL,
223 0x801205150D440004ULL, 0x801205150D4400E4ULL,
225 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
227 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
232 static u64 xena_mdio_cfg[] = {
234 0xC001010000000000ULL, 0xC0010100000000E0ULL,
235 0xC0010100008000E4ULL,
236 /* Remove Reset from PMA PLL */
237 0xC001010000000000ULL, 0xC0010100000000E0ULL,
238 0xC0010100000000E4ULL,
242 static u64 xena_dtx_cfg[] = {
243 0x8000051500000000ULL, 0x80000515000000E0ULL,
244 0x80000515D93500E4ULL, 0x8001051500000000ULL,
245 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
246 0x8002051500000000ULL, 0x80020515000000E0ULL,
247 0x80020515F21000E4ULL,
248 /* Set PADLOOPBACKN */
249 0x8002051500000000ULL, 0x80020515000000E0ULL,
250 0x80020515B20000E4ULL, 0x8003051500000000ULL,
251 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
252 0x8004051500000000ULL, 0x80040515000000E0ULL,
253 0x80040515B20000E4ULL, 0x8005051500000000ULL,
254 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
256 /* Remove PADLOOPBACKN */
257 0x8002051500000000ULL, 0x80020515000000E0ULL,
258 0x80020515F20000E4ULL, 0x8003051500000000ULL,
259 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
260 0x8004051500000000ULL, 0x80040515000000E0ULL,
261 0x80040515F20000E4ULL, 0x8005051500000000ULL,
262 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
267 * Constants for Fixing the MacAddress problem seen mostly on
270 static u64 fix_mac[] = {
271 0x0060000000000000ULL, 0x0060600000000000ULL,
272 0x0040600000000000ULL, 0x0000600000000000ULL,
273 0x0020600000000000ULL, 0x0060600000000000ULL,
274 0x0020600000000000ULL, 0x0060600000000000ULL,
275 0x0020600000000000ULL, 0x0060600000000000ULL,
276 0x0020600000000000ULL, 0x0060600000000000ULL,
277 0x0020600000000000ULL, 0x0060600000000000ULL,
278 0x0020600000000000ULL, 0x0060600000000000ULL,
279 0x0020600000000000ULL, 0x0060600000000000ULL,
280 0x0020600000000000ULL, 0x0060600000000000ULL,
281 0x0020600000000000ULL, 0x0060600000000000ULL,
282 0x0020600000000000ULL, 0x0060600000000000ULL,
283 0x0020600000000000ULL, 0x0000600000000000ULL,
284 0x0040600000000000ULL, 0x0060600000000000ULL,
288 /* Module Loadable parameters. */
289 static unsigned int tx_fifo_num = 1;
290 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
291 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
292 static unsigned int rx_ring_num = 1;
293 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
294 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
295 static unsigned int rts_frm_len[MAX_RX_RINGS] =
296 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
297 static unsigned int use_continuous_tx_intrs = 1;
298 static unsigned int rmac_pause_time = 65535;
299 static unsigned int mc_pause_threshold_q0q3 = 187;
300 static unsigned int mc_pause_threshold_q4q7 = 187;
301 static unsigned int shared_splits;
302 static unsigned int tmac_util_period = 5;
303 static unsigned int rmac_util_period = 5;
304 static unsigned int bimodal = 0;
305 #ifndef CONFIG_S2IO_NAPI
306 static unsigned int indicate_max_pkts;
308 /* Frequency of Rx desc syncs expressed as power of 2 */
309 static unsigned int rxsync_frequency = 3;
310 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
311 static unsigned int intr_type = 0;
315 * This table lists all the devices that this driver supports.
317 static struct pci_device_id s2io_tbl[] __devinitdata = {
318 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
319 PCI_ANY_ID, PCI_ANY_ID},
320 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
321 PCI_ANY_ID, PCI_ANY_ID},
322 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
323 PCI_ANY_ID, PCI_ANY_ID},
324 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
325 PCI_ANY_ID, PCI_ANY_ID},
329 MODULE_DEVICE_TABLE(pci, s2io_tbl);
331 static struct pci_driver s2io_driver = {
333 .id_table = s2io_tbl,
334 .probe = s2io_init_nic,
335 .remove = __devexit_p(s2io_rem_nic),
338 /* A simplifier macro used both by init and free shared_mem Fns(). */
339 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
342 * init_shared_mem - Allocation and Initialization of Memory
343 * @nic: Device private variable.
344 * Description: The function allocates all the memory areas shared
345 * between the NIC and the driver. This includes Tx descriptors,
346 * Rx descriptors and the statistics block.
349 static int init_shared_mem(struct s2io_nic *nic)
352 void *tmp_v_addr, *tmp_v_addr_next;
353 dma_addr_t tmp_p_addr, tmp_p_addr_next;
354 RxD_block_t *pre_rxd_blk = NULL;
355 int i, j, blk_cnt, rx_sz, tx_sz;
356 int lst_size, lst_per_page;
357 struct net_device *dev = nic->dev;
358 #ifdef CONFIG_2BUFF_MODE
363 mac_info_t *mac_control;
364 struct config_param *config;
366 mac_control = &nic->mac_control;
367 config = &nic->config;
370 /* Allocation and initialization of TXDLs in FIOFs */
372 for (i = 0; i < config->tx_fifo_num; i++) {
373 size += config->tx_cfg[i].fifo_len;
375 if (size > MAX_AVAILABLE_TXDS) {
376 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
378 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
382 lst_size = (sizeof(TxD_t) * config->max_txds);
383 tx_sz = lst_size * size;
384 lst_per_page = PAGE_SIZE / lst_size;
386 for (i = 0; i < config->tx_fifo_num; i++) {
387 int fifo_len = config->tx_cfg[i].fifo_len;
388 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
389 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
391 if (!mac_control->fifos[i].list_info) {
393 "Malloc failed for list_info\n");
396 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
398 for (i = 0; i < config->tx_fifo_num; i++) {
399 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
401 mac_control->fifos[i].tx_curr_put_info.offset = 0;
402 mac_control->fifos[i].tx_curr_put_info.fifo_len =
403 config->tx_cfg[i].fifo_len - 1;
404 mac_control->fifos[i].tx_curr_get_info.offset = 0;
405 mac_control->fifos[i].tx_curr_get_info.fifo_len =
406 config->tx_cfg[i].fifo_len - 1;
407 mac_control->fifos[i].fifo_no = i;
408 mac_control->fifos[i].nic = nic;
409 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 1;
411 for (j = 0; j < page_num; j++) {
415 tmp_v = pci_alloc_consistent(nic->pdev,
419 "pci_alloc_consistent ");
420 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
423 /* If we got a zero DMA address(can happen on
424 * certain platforms like PPC), reallocate.
425 * Store virtual address of page we don't want,
429 mac_control->zerodma_virt_addr = tmp_v;
431 "%s: Zero DMA address for TxDL. ", dev->name);
433 "Virtual address %p\n", tmp_v);
434 tmp_v = pci_alloc_consistent(nic->pdev,
438 "pci_alloc_consistent ");
439 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
443 while (k < lst_per_page) {
444 int l = (j * lst_per_page) + k;
445 if (l == config->tx_cfg[i].fifo_len)
447 mac_control->fifos[i].list_info[l].list_virt_addr =
448 tmp_v + (k * lst_size);
449 mac_control->fifos[i].list_info[l].list_phy_addr =
450 tmp_p + (k * lst_size);
456 /* Allocation and initialization of RXDs in Rings */
458 for (i = 0; i < config->rx_ring_num; i++) {
459 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
460 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
461 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
463 DBG_PRINT(ERR_DBG, "RxDs per Block");
466 size += config->rx_cfg[i].num_rxd;
467 mac_control->rings[i].block_count =
468 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
469 mac_control->rings[i].pkt_cnt =
470 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
472 size = (size * (sizeof(RxD_t)));
475 for (i = 0; i < config->rx_ring_num; i++) {
476 mac_control->rings[i].rx_curr_get_info.block_index = 0;
477 mac_control->rings[i].rx_curr_get_info.offset = 0;
478 mac_control->rings[i].rx_curr_get_info.ring_len =
479 config->rx_cfg[i].num_rxd - 1;
480 mac_control->rings[i].rx_curr_put_info.block_index = 0;
481 mac_control->rings[i].rx_curr_put_info.offset = 0;
482 mac_control->rings[i].rx_curr_put_info.ring_len =
483 config->rx_cfg[i].num_rxd - 1;
484 mac_control->rings[i].nic = nic;
485 mac_control->rings[i].ring_no = i;
488 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
489 /* Allocating all the Rx blocks */
490 for (j = 0; j < blk_cnt; j++) {
491 #ifndef CONFIG_2BUFF_MODE
492 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
494 size = SIZE_OF_BLOCK;
496 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
498 if (tmp_v_addr == NULL) {
500 * In case of failure, free_shared_mem()
501 * is called, which should free any
502 * memory that was alloced till the
505 mac_control->rings[i].rx_blocks[j].block_virt_addr =
509 memset(tmp_v_addr, 0, size);
510 mac_control->rings[i].rx_blocks[j].block_virt_addr =
512 mac_control->rings[i].rx_blocks[j].block_dma_addr =
515 /* Interlinking all Rx Blocks */
516 for (j = 0; j < blk_cnt; j++) {
518 mac_control->rings[i].rx_blocks[j].block_virt_addr;
520 mac_control->rings[i].rx_blocks[(j + 1) %
521 blk_cnt].block_virt_addr;
523 mac_control->rings[i].rx_blocks[j].block_dma_addr;
525 mac_control->rings[i].rx_blocks[(j + 1) %
526 blk_cnt].block_dma_addr;
528 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
529 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
532 #ifndef CONFIG_2BUFF_MODE
533 pre_rxd_blk->reserved_2_pNext_RxD_block =
534 (unsigned long) tmp_v_addr_next;
536 pre_rxd_blk->pNext_RxD_Blk_physical =
537 (u64) tmp_p_addr_next;
541 #ifdef CONFIG_2BUFF_MODE
543 * Allocation of Storages for buffer addresses in 2BUFF mode
544 * and the buffers as well.
546 for (i = 0; i < config->rx_ring_num; i++) {
548 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
549 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
551 if (!mac_control->rings[i].ba)
553 for (j = 0; j < blk_cnt; j++) {
555 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
556 (MAX_RXDS_PER_BLOCK + 1)),
558 if (!mac_control->rings[i].ba[j])
560 while (k != MAX_RXDS_PER_BLOCK) {
561 ba = &mac_control->rings[i].ba[j][k];
563 ba->ba_0_org = (void *) kmalloc
564 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
567 tmp = (unsigned long) ba->ba_0_org;
569 tmp &= ~((unsigned long) ALIGN_SIZE);
570 ba->ba_0 = (void *) tmp;
572 ba->ba_1_org = (void *) kmalloc
573 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
576 tmp = (unsigned long) ba->ba_1_org;
578 tmp &= ~((unsigned long) ALIGN_SIZE);
579 ba->ba_1 = (void *) tmp;
586 /* Allocation and initialization of Statistics block */
587 size = sizeof(StatInfo_t);
588 mac_control->stats_mem = pci_alloc_consistent
589 (nic->pdev, size, &mac_control->stats_mem_phy);
591 if (!mac_control->stats_mem) {
593 * In case of failure, free_shared_mem() is called, which
594 * should free any memory that was alloced till the
599 mac_control->stats_mem_sz = size;
601 tmp_v_addr = mac_control->stats_mem;
602 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
603 memset(tmp_v_addr, 0, size);
604 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
605 (unsigned long long) tmp_p_addr);
611 * free_shared_mem - Free the allocated Memory
612 * @nic: Device private variable.
613 * Description: This function is to free all memory locations allocated by
614 * the init_shared_mem() function and return it to the kernel.
617 static void free_shared_mem(struct s2io_nic *nic)
619 int i, j, blk_cnt, size;
621 dma_addr_t tmp_p_addr;
622 mac_info_t *mac_control;
623 struct config_param *config;
624 int lst_size, lst_per_page;
625 struct net_device *dev = nic->dev;
630 mac_control = &nic->mac_control;
631 config = &nic->config;
633 lst_size = (sizeof(TxD_t) * config->max_txds);
634 lst_per_page = PAGE_SIZE / lst_size;
636 for (i = 0; i < config->tx_fifo_num; i++) {
637 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
639 for (j = 0; j < page_num; j++) {
640 int mem_blks = (j * lst_per_page);
641 if (!mac_control->fifos[i].list_info)
643 if (!mac_control->fifos[i].list_info[mem_blks].
646 pci_free_consistent(nic->pdev, PAGE_SIZE,
647 mac_control->fifos[i].
650 mac_control->fifos[i].
654 /* If we got a zero DMA address during allocation,
657 if (mac_control->zerodma_virt_addr) {
658 pci_free_consistent(nic->pdev, PAGE_SIZE,
659 mac_control->zerodma_virt_addr,
662 "%s: Freeing TxDL with zero DMA addr. ",
664 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
665 mac_control->zerodma_virt_addr);
667 kfree(mac_control->fifos[i].list_info);
670 #ifndef CONFIG_2BUFF_MODE
671 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
673 size = SIZE_OF_BLOCK;
675 for (i = 0; i < config->rx_ring_num; i++) {
676 blk_cnt = mac_control->rings[i].block_count;
677 for (j = 0; j < blk_cnt; j++) {
678 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
680 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
682 if (tmp_v_addr == NULL)
684 pci_free_consistent(nic->pdev, size,
685 tmp_v_addr, tmp_p_addr);
689 #ifdef CONFIG_2BUFF_MODE
690 /* Freeing buffer storage addresses in 2BUFF mode. */
691 for (i = 0; i < config->rx_ring_num; i++) {
693 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
694 for (j = 0; j < blk_cnt; j++) {
696 if (!mac_control->rings[i].ba[j])
698 while (k != MAX_RXDS_PER_BLOCK) {
699 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
704 kfree(mac_control->rings[i].ba[j]);
706 if (mac_control->rings[i].ba)
707 kfree(mac_control->rings[i].ba);
711 if (mac_control->stats_mem) {
712 pci_free_consistent(nic->pdev,
713 mac_control->stats_mem_sz,
714 mac_control->stats_mem,
715 mac_control->stats_mem_phy);
720 * s2io_verify_pci_mode -
723 static int s2io_verify_pci_mode(nic_t *nic)
725 XENA_dev_config_t __iomem *bar0 = nic->bar0;
726 register u64 val64 = 0;
729 val64 = readq(&bar0->pci_mode);
730 mode = (u8)GET_PCI_MODE(val64);
732 if ( val64 & PCI_MODE_UNKNOWN_MODE)
733 return -1; /* Unknown PCI mode */
739 * s2io_print_pci_mode -
741 static int s2io_print_pci_mode(nic_t *nic)
743 XENA_dev_config_t __iomem *bar0 = nic->bar0;
744 register u64 val64 = 0;
746 struct config_param *config = &nic->config;
748 val64 = readq(&bar0->pci_mode);
749 mode = (u8)GET_PCI_MODE(val64);
751 if ( val64 & PCI_MODE_UNKNOWN_MODE)
752 return -1; /* Unknown PCI mode */
754 if (val64 & PCI_MODE_32_BITS) {
755 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
757 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
761 case PCI_MODE_PCI_33:
762 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
763 config->bus_speed = 33;
765 case PCI_MODE_PCI_66:
766 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
767 config->bus_speed = 133;
769 case PCI_MODE_PCIX_M1_66:
770 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
771 config->bus_speed = 133; /* Herc doubles the clock rate */
773 case PCI_MODE_PCIX_M1_100:
774 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
775 config->bus_speed = 200;
777 case PCI_MODE_PCIX_M1_133:
778 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
779 config->bus_speed = 266;
781 case PCI_MODE_PCIX_M2_66:
782 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
783 config->bus_speed = 133;
785 case PCI_MODE_PCIX_M2_100:
786 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
787 config->bus_speed = 200;
789 case PCI_MODE_PCIX_M2_133:
790 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
791 config->bus_speed = 266;
794 return -1; /* Unsupported bus speed */
801 * init_nic - Initialization of hardware
802 * @nic: device peivate variable
803 * Description: The function sequentially configures every block
804 * of the H/W from their reset values.
805 * Return Value: SUCCESS on success and
806 * '-1' on failure (endian settings incorrect).
809 static int init_nic(struct s2io_nic *nic)
811 XENA_dev_config_t __iomem *bar0 = nic->bar0;
812 struct net_device *dev = nic->dev;
813 register u64 val64 = 0;
817 mac_info_t *mac_control;
818 struct config_param *config;
819 int mdio_cnt = 0, dtx_cnt = 0;
820 unsigned long long mem_share;
823 mac_control = &nic->mac_control;
824 config = &nic->config;
826 /* to set the swapper controle on the card */
827 if(s2io_set_swapper(nic)) {
828 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
833 * Herc requires EOI to be removed from reset before XGXS, so..
835 if (nic->device_type & XFRAME_II_DEVICE) {
836 val64 = 0xA500000000ULL;
837 writeq(val64, &bar0->sw_reset);
839 val64 = readq(&bar0->sw_reset);
842 /* Remove XGXS from reset state */
844 writeq(val64, &bar0->sw_reset);
846 val64 = readq(&bar0->sw_reset);
848 /* Enable Receiving broadcasts */
849 add = &bar0->mac_cfg;
850 val64 = readq(&bar0->mac_cfg);
851 val64 |= MAC_RMAC_BCAST_ENABLE;
852 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
853 writel((u32) val64, add);
854 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
855 writel((u32) (val64 >> 32), (add + 4));
857 /* Read registers in all blocks */
858 val64 = readq(&bar0->mac_int_mask);
859 val64 = readq(&bar0->mc_int_mask);
860 val64 = readq(&bar0->xgxs_int_mask);
864 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
867 * Configuring the XAUI Interface of Xena.
868 * ***************************************
869 * To Configure the Xena's XAUI, one has to write a series
870 * of 64 bit values into two registers in a particular
871 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
872 * which will be defined in the array of configuration values
873 * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
874 * to switch writing from one regsiter to another. We continue
875 * writing these values until we encounter the 'END_SIGN' macro.
876 * For example, After making a series of 21 writes into
877 * dtx_control register the 'SWITCH_SIGN' appears and hence we
878 * start writing into mdio_control until we encounter END_SIGN.
880 if (nic->device_type & XFRAME_II_DEVICE) {
881 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
882 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
883 &bar0->dtx_control, UF);
885 msleep(1); /* Necessary!! */
891 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
892 if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
896 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
897 &bar0->dtx_control, UF);
898 val64 = readq(&bar0->dtx_control);
902 while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
903 if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
907 SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
908 &bar0->mdio_control, UF);
909 val64 = readq(&bar0->mdio_control);
912 if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
913 (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
921 /* Tx DMA Initialization */
923 writeq(val64, &bar0->tx_fifo_partition_0);
924 writeq(val64, &bar0->tx_fifo_partition_1);
925 writeq(val64, &bar0->tx_fifo_partition_2);
926 writeq(val64, &bar0->tx_fifo_partition_3);
929 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
931 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
932 13) | vBIT(config->tx_cfg[i].fifo_priority,
935 if (i == (config->tx_fifo_num - 1)) {
942 writeq(val64, &bar0->tx_fifo_partition_0);
946 writeq(val64, &bar0->tx_fifo_partition_1);
950 writeq(val64, &bar0->tx_fifo_partition_2);
954 writeq(val64, &bar0->tx_fifo_partition_3);
959 /* Enable Tx FIFO partition 0. */
960 val64 = readq(&bar0->tx_fifo_partition_0);
961 val64 |= BIT(0); /* To enable the FIFO partition. */
962 writeq(val64, &bar0->tx_fifo_partition_0);
965 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
966 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
968 if ((nic->device_type == XFRAME_I_DEVICE) &&
969 (get_xena_rev_id(nic->pdev) < 4))
970 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
972 val64 = readq(&bar0->tx_fifo_partition_0);
973 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
974 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
977 * Initialization of Tx_PA_CONFIG register to ignore packet
978 * integrity checking.
980 val64 = readq(&bar0->tx_pa_cfg);
981 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
982 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
983 writeq(val64, &bar0->tx_pa_cfg);
985 /* Rx DMA intialization. */
987 for (i = 0; i < config->rx_ring_num; i++) {
989 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
992 writeq(val64, &bar0->rx_queue_priority);
995 * Allocating equal share of memory to all the
999 if (nic->device_type & XFRAME_II_DEVICE)
1004 for (i = 0; i < config->rx_ring_num; i++) {
1007 mem_share = (mem_size / config->rx_ring_num +
1008 mem_size % config->rx_ring_num);
1009 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1012 mem_share = (mem_size / config->rx_ring_num);
1013 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1016 mem_share = (mem_size / config->rx_ring_num);
1017 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1020 mem_share = (mem_size / config->rx_ring_num);
1021 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1024 mem_share = (mem_size / config->rx_ring_num);
1025 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1028 mem_share = (mem_size / config->rx_ring_num);
1029 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1032 mem_share = (mem_size / config->rx_ring_num);
1033 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1036 mem_share = (mem_size / config->rx_ring_num);
1037 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1041 writeq(val64, &bar0->rx_queue_cfg);
1044 * Filling Tx round robin registers
1045 * as per the number of FIFOs
1047 switch (config->tx_fifo_num) {
1049 val64 = 0x0000000000000000ULL;
1050 writeq(val64, &bar0->tx_w_round_robin_0);
1051 writeq(val64, &bar0->tx_w_round_robin_1);
1052 writeq(val64, &bar0->tx_w_round_robin_2);
1053 writeq(val64, &bar0->tx_w_round_robin_3);
1054 writeq(val64, &bar0->tx_w_round_robin_4);
1057 val64 = 0x0000010000010000ULL;
1058 writeq(val64, &bar0->tx_w_round_robin_0);
1059 val64 = 0x0100000100000100ULL;
1060 writeq(val64, &bar0->tx_w_round_robin_1);
1061 val64 = 0x0001000001000001ULL;
1062 writeq(val64, &bar0->tx_w_round_robin_2);
1063 val64 = 0x0000010000010000ULL;
1064 writeq(val64, &bar0->tx_w_round_robin_3);
1065 val64 = 0x0100000000000000ULL;
1066 writeq(val64, &bar0->tx_w_round_robin_4);
1069 val64 = 0x0001000102000001ULL;
1070 writeq(val64, &bar0->tx_w_round_robin_0);
1071 val64 = 0x0001020000010001ULL;
1072 writeq(val64, &bar0->tx_w_round_robin_1);
1073 val64 = 0x0200000100010200ULL;
1074 writeq(val64, &bar0->tx_w_round_robin_2);
1075 val64 = 0x0001000102000001ULL;
1076 writeq(val64, &bar0->tx_w_round_robin_3);
1077 val64 = 0x0001020000000000ULL;
1078 writeq(val64, &bar0->tx_w_round_robin_4);
1081 val64 = 0x0001020300010200ULL;
1082 writeq(val64, &bar0->tx_w_round_robin_0);
1083 val64 = 0x0100000102030001ULL;
1084 writeq(val64, &bar0->tx_w_round_robin_1);
1085 val64 = 0x0200010000010203ULL;
1086 writeq(val64, &bar0->tx_w_round_robin_2);
1087 val64 = 0x0001020001000001ULL;
1088 writeq(val64, &bar0->tx_w_round_robin_3);
1089 val64 = 0x0203000100000000ULL;
1090 writeq(val64, &bar0->tx_w_round_robin_4);
1093 val64 = 0x0001000203000102ULL;
1094 writeq(val64, &bar0->tx_w_round_robin_0);
1095 val64 = 0x0001020001030004ULL;
1096 writeq(val64, &bar0->tx_w_round_robin_1);
1097 val64 = 0x0001000203000102ULL;
1098 writeq(val64, &bar0->tx_w_round_robin_2);
1099 val64 = 0x0001020001030004ULL;
1100 writeq(val64, &bar0->tx_w_round_robin_3);
1101 val64 = 0x0001000000000000ULL;
1102 writeq(val64, &bar0->tx_w_round_robin_4);
1105 val64 = 0x0001020304000102ULL;
1106 writeq(val64, &bar0->tx_w_round_robin_0);
1107 val64 = 0x0304050001020001ULL;
1108 writeq(val64, &bar0->tx_w_round_robin_1);
1109 val64 = 0x0203000100000102ULL;
1110 writeq(val64, &bar0->tx_w_round_robin_2);
1111 val64 = 0x0304000102030405ULL;
1112 writeq(val64, &bar0->tx_w_round_robin_3);
1113 val64 = 0x0001000200000000ULL;
1114 writeq(val64, &bar0->tx_w_round_robin_4);
1117 val64 = 0x0001020001020300ULL;
1118 writeq(val64, &bar0->tx_w_round_robin_0);
1119 val64 = 0x0102030400010203ULL;
1120 writeq(val64, &bar0->tx_w_round_robin_1);
1121 val64 = 0x0405060001020001ULL;
1122 writeq(val64, &bar0->tx_w_round_robin_2);
1123 val64 = 0x0304050000010200ULL;
1124 writeq(val64, &bar0->tx_w_round_robin_3);
1125 val64 = 0x0102030000000000ULL;
1126 writeq(val64, &bar0->tx_w_round_robin_4);
1129 val64 = 0x0001020300040105ULL;
1130 writeq(val64, &bar0->tx_w_round_robin_0);
1131 val64 = 0x0200030106000204ULL;
1132 writeq(val64, &bar0->tx_w_round_robin_1);
1133 val64 = 0x0103000502010007ULL;
1134 writeq(val64, &bar0->tx_w_round_robin_2);
1135 val64 = 0x0304010002060500ULL;
1136 writeq(val64, &bar0->tx_w_round_robin_3);
1137 val64 = 0x0103020400000000ULL;
1138 writeq(val64, &bar0->tx_w_round_robin_4);
1142 /* Filling the Rx round robin registers as per the
1143 * number of Rings and steering based on QoS.
1145 switch (config->rx_ring_num) {
1147 val64 = 0x8080808080808080ULL;
1148 writeq(val64, &bar0->rts_qos_steering);
1151 val64 = 0x0000010000010000ULL;
1152 writeq(val64, &bar0->rx_w_round_robin_0);
1153 val64 = 0x0100000100000100ULL;
1154 writeq(val64, &bar0->rx_w_round_robin_1);
1155 val64 = 0x0001000001000001ULL;
1156 writeq(val64, &bar0->rx_w_round_robin_2);
1157 val64 = 0x0000010000010000ULL;
1158 writeq(val64, &bar0->rx_w_round_robin_3);
1159 val64 = 0x0100000000000000ULL;
1160 writeq(val64, &bar0->rx_w_round_robin_4);
1162 val64 = 0x8080808040404040ULL;
1163 writeq(val64, &bar0->rts_qos_steering);
1166 val64 = 0x0001000102000001ULL;
1167 writeq(val64, &bar0->rx_w_round_robin_0);
1168 val64 = 0x0001020000010001ULL;
1169 writeq(val64, &bar0->rx_w_round_robin_1);
1170 val64 = 0x0200000100010200ULL;
1171 writeq(val64, &bar0->rx_w_round_robin_2);
1172 val64 = 0x0001000102000001ULL;
1173 writeq(val64, &bar0->rx_w_round_robin_3);
1174 val64 = 0x0001020000000000ULL;
1175 writeq(val64, &bar0->rx_w_round_robin_4);
1177 val64 = 0x8080804040402020ULL;
1178 writeq(val64, &bar0->rts_qos_steering);
1181 val64 = 0x0001020300010200ULL;
1182 writeq(val64, &bar0->rx_w_round_robin_0);
1183 val64 = 0x0100000102030001ULL;
1184 writeq(val64, &bar0->rx_w_round_robin_1);
1185 val64 = 0x0200010000010203ULL;
1186 writeq(val64, &bar0->rx_w_round_robin_2);
1187 val64 = 0x0001020001000001ULL;
1188 writeq(val64, &bar0->rx_w_round_robin_3);
1189 val64 = 0x0203000100000000ULL;
1190 writeq(val64, &bar0->rx_w_round_robin_4);
1192 val64 = 0x8080404020201010ULL;
1193 writeq(val64, &bar0->rts_qos_steering);
1196 val64 = 0x0001000203000102ULL;
1197 writeq(val64, &bar0->rx_w_round_robin_0);
1198 val64 = 0x0001020001030004ULL;
1199 writeq(val64, &bar0->rx_w_round_robin_1);
1200 val64 = 0x0001000203000102ULL;
1201 writeq(val64, &bar0->rx_w_round_robin_2);
1202 val64 = 0x0001020001030004ULL;
1203 writeq(val64, &bar0->rx_w_round_robin_3);
1204 val64 = 0x0001000000000000ULL;
1205 writeq(val64, &bar0->rx_w_round_robin_4);
1207 val64 = 0x8080404020201008ULL;
1208 writeq(val64, &bar0->rts_qos_steering);
1211 val64 = 0x0001020304000102ULL;
1212 writeq(val64, &bar0->rx_w_round_robin_0);
1213 val64 = 0x0304050001020001ULL;
1214 writeq(val64, &bar0->rx_w_round_robin_1);
1215 val64 = 0x0203000100000102ULL;
1216 writeq(val64, &bar0->rx_w_round_robin_2);
1217 val64 = 0x0304000102030405ULL;
1218 writeq(val64, &bar0->rx_w_round_robin_3);
1219 val64 = 0x0001000200000000ULL;
1220 writeq(val64, &bar0->rx_w_round_robin_4);
1222 val64 = 0x8080404020100804ULL;
1223 writeq(val64, &bar0->rts_qos_steering);
1226 val64 = 0x0001020001020300ULL;
1227 writeq(val64, &bar0->rx_w_round_robin_0);
1228 val64 = 0x0102030400010203ULL;
1229 writeq(val64, &bar0->rx_w_round_robin_1);
1230 val64 = 0x0405060001020001ULL;
1231 writeq(val64, &bar0->rx_w_round_robin_2);
1232 val64 = 0x0304050000010200ULL;
1233 writeq(val64, &bar0->rx_w_round_robin_3);
1234 val64 = 0x0102030000000000ULL;
1235 writeq(val64, &bar0->rx_w_round_robin_4);
1237 val64 = 0x8080402010080402ULL;
1238 writeq(val64, &bar0->rts_qos_steering);
1241 val64 = 0x0001020300040105ULL;
1242 writeq(val64, &bar0->rx_w_round_robin_0);
1243 val64 = 0x0200030106000204ULL;
1244 writeq(val64, &bar0->rx_w_round_robin_1);
1245 val64 = 0x0103000502010007ULL;
1246 writeq(val64, &bar0->rx_w_round_robin_2);
1247 val64 = 0x0304010002060500ULL;
1248 writeq(val64, &bar0->rx_w_round_robin_3);
1249 val64 = 0x0103020400000000ULL;
1250 writeq(val64, &bar0->rx_w_round_robin_4);
1252 val64 = 0x8040201008040201ULL;
1253 writeq(val64, &bar0->rts_qos_steering);
1259 for (i = 0; i < 8; i++)
1260 writeq(val64, &bar0->rts_frm_len_n[i]);
1262 /* Set the default rts frame length for the rings configured */
1263 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1264 for (i = 0 ; i < config->rx_ring_num ; i++)
1265 writeq(val64, &bar0->rts_frm_len_n[i]);
1267 /* Set the frame length for the configured rings
1268 * desired by the user
1270 for (i = 0; i < config->rx_ring_num; i++) {
1271 /* If rts_frm_len[i] == 0 then it is assumed that user not
1272 * specified frame length steering.
1273 * If the user provides the frame length then program
1274 * the rts_frm_len register for those values or else
1275 * leave it as it is.
1277 if (rts_frm_len[i] != 0) {
1278 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1279 &bar0->rts_frm_len_n[i]);
1283 /* Program statistics memory */
1284 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1286 if (nic->device_type == XFRAME_II_DEVICE) {
1287 val64 = STAT_BC(0x320);
1288 writeq(val64, &bar0->stat_byte_cnt);
1292 * Initializing the sampling rate for the device to calculate the
1293 * bandwidth utilization.
1295 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1296 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1297 writeq(val64, &bar0->mac_link_util);
1301 * Initializing the Transmit and Receive Traffic Interrupt
1305 * TTI Initialization. Default Tx timer gets us about
1306 * 250 interrupts per sec. Continuous interrupts are enabled
1309 if (nic->device_type == XFRAME_II_DEVICE) {
1310 int count = (nic->config.bus_speed * 125)/2;
1311 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1314 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1316 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1317 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1318 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1319 if (use_continuous_tx_intrs)
1320 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1321 writeq(val64, &bar0->tti_data1_mem);
1323 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1324 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1325 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1326 writeq(val64, &bar0->tti_data2_mem);
1328 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1329 writeq(val64, &bar0->tti_command_mem);
1332 * Once the operation completes, the Strobe bit of the command
1333 * register will be reset. We poll for this particular condition
1334 * We wait for a maximum of 500ms for the operation to complete,
1335 * if it's not complete by then we return error.
1339 val64 = readq(&bar0->tti_command_mem);
1340 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1344 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1352 if (nic->config.bimodal) {
1354 for (k = 0; k < config->rx_ring_num; k++) {
1355 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1356 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1357 writeq(val64, &bar0->tti_command_mem);
1360 * Once the operation completes, the Strobe bit of the command
1361 * register will be reset. We poll for this particular condition
1362 * We wait for a maximum of 500ms for the operation to complete,
1363 * if it's not complete by then we return error.
1367 val64 = readq(&bar0->tti_command_mem);
1368 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1373 "%s: TTI init Failed\n",
1383 /* RTI Initialization */
1384 if (nic->device_type == XFRAME_II_DEVICE) {
1386 * Programmed to generate Apprx 500 Intrs per
1389 int count = (nic->config.bus_speed * 125)/4;
1390 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1392 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1394 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1395 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1396 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1398 writeq(val64, &bar0->rti_data1_mem);
1400 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1401 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1402 if (nic->intr_type == MSI_X)
1403 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1404 RTI_DATA2_MEM_RX_UFC_D(0x40));
1406 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1407 RTI_DATA2_MEM_RX_UFC_D(0x80));
1408 writeq(val64, &bar0->rti_data2_mem);
1410 for (i = 0; i < config->rx_ring_num; i++) {
1411 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1412 | RTI_CMD_MEM_OFFSET(i);
1413 writeq(val64, &bar0->rti_command_mem);
1416 * Once the operation completes, the Strobe bit of the
1417 * command register will be reset. We poll for this
1418 * particular condition. We wait for a maximum of 500ms
1419 * for the operation to complete, if it's not complete
1420 * by then we return error.
1424 val64 = readq(&bar0->rti_command_mem);
1425 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1429 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1440 * Initializing proper values as Pause threshold into all
1441 * the 8 Queues on Rx side.
1443 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1444 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1446 /* Disable RMAC PAD STRIPPING */
1447 add = &bar0->mac_cfg;
1448 val64 = readq(&bar0->mac_cfg);
1449 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1450 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1451 writel((u32) (val64), add);
1452 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1453 writel((u32) (val64 >> 32), (add + 4));
1454 val64 = readq(&bar0->mac_cfg);
1457 * Set the time value to be inserted in the pause frame
1458 * generated by xena.
1460 val64 = readq(&bar0->rmac_pause_cfg);
1461 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1462 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1463 writeq(val64, &bar0->rmac_pause_cfg);
1466 * Set the Threshold Limit for Generating the pause frame
1467 * If the amount of data in any Queue exceeds ratio of
1468 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1469 * pause frame is generated
1472 for (i = 0; i < 4; i++) {
1474 (((u64) 0xFF00 | nic->mac_control.
1475 mc_pause_threshold_q0q3)
1478 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1481 for (i = 0; i < 4; i++) {
1483 (((u64) 0xFF00 | nic->mac_control.
1484 mc_pause_threshold_q4q7)
1487 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1490 * TxDMA will stop Read request if the number of read split has
1491 * exceeded the limit pointed by shared_splits
1493 val64 = readq(&bar0->pic_control);
1494 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1495 writeq(val64, &bar0->pic_control);
1498 * Programming the Herc to split every write transaction
1499 * that does not start on an ADB to reduce disconnects.
1501 if (nic->device_type == XFRAME_II_DEVICE) {
1502 val64 = WREQ_SPLIT_MASK_SET_MASK(255);
1503 writeq(val64, &bar0->wreq_split_mask);
1506 /* Setting Link stability period to 64 ms */
1507 if (nic->device_type == XFRAME_II_DEVICE) {
1508 val64 = MISC_LINK_STABILITY_PRD(3);
1509 writeq(val64, &bar0->misc_control);
1514 #define LINK_UP_DOWN_INTERRUPT 1
1515 #define MAC_RMAC_ERR_TIMER 2
1517 int s2io_link_fault_indication(nic_t *nic)
1519 if (nic->intr_type != INTA)
1520 return MAC_RMAC_ERR_TIMER;
1521 if (nic->device_type == XFRAME_II_DEVICE)
1522 return LINK_UP_DOWN_INTERRUPT;
1524 return MAC_RMAC_ERR_TIMER;
1528 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1529 * @nic: device private variable,
1530 * @mask: A mask indicating which Intr block must be modified and,
1531 * @flag: A flag indicating whether to enable or disable the Intrs.
1532 * Description: This function will either disable or enable the interrupts
1533 * depending on the flag argument. The mask argument can be used to
1534 * enable/disable any Intr block.
1535 * Return Value: NONE.
1538 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1540 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1541 register u64 val64 = 0, temp64 = 0;
1543 /* Top level interrupt classification */
1544 /* PIC Interrupts */
1545 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1546 /* Enable PIC Intrs in the general intr mask register */
1547 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1548 if (flag == ENABLE_INTRS) {
1549 temp64 = readq(&bar0->general_int_mask);
1550 temp64 &= ~((u64) val64);
1551 writeq(temp64, &bar0->general_int_mask);
1553 * If Hercules adapter enable GPIO otherwise
1554 * disabled all PCIX, Flash, MDIO, IIC and GPIO
1555 * interrupts for now.
1558 if (s2io_link_fault_indication(nic) ==
1559 LINK_UP_DOWN_INTERRUPT ) {
1560 temp64 = readq(&bar0->pic_int_mask);
1561 temp64 &= ~((u64) PIC_INT_GPIO);
1562 writeq(temp64, &bar0->pic_int_mask);
1563 temp64 = readq(&bar0->gpio_int_mask);
1564 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1565 writeq(temp64, &bar0->gpio_int_mask);
1567 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1570 * No MSI Support is available presently, so TTI and
1571 * RTI interrupts are also disabled.
1573 } else if (flag == DISABLE_INTRS) {
1575 * Disable PIC Intrs in the general
1576 * intr mask register
1578 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1579 temp64 = readq(&bar0->general_int_mask);
1581 writeq(val64, &bar0->general_int_mask);
1585 /* DMA Interrupts */
1586 /* Enabling/Disabling Tx DMA interrupts */
1587 if (mask & TX_DMA_INTR) {
1588 /* Enable TxDMA Intrs in the general intr mask register */
1589 val64 = TXDMA_INT_M;
1590 if (flag == ENABLE_INTRS) {
1591 temp64 = readq(&bar0->general_int_mask);
1592 temp64 &= ~((u64) val64);
1593 writeq(temp64, &bar0->general_int_mask);
1595 * Keep all interrupts other than PFC interrupt
1596 * and PCC interrupt disabled in DMA level.
1598 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1600 writeq(val64, &bar0->txdma_int_mask);
1602 * Enable only the MISC error 1 interrupt in PFC block
1604 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1605 writeq(val64, &bar0->pfc_err_mask);
1607 * Enable only the FB_ECC error interrupt in PCC block
1609 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1610 writeq(val64, &bar0->pcc_err_mask);
1611 } else if (flag == DISABLE_INTRS) {
1613 * Disable TxDMA Intrs in the general intr mask
1616 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1617 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1618 temp64 = readq(&bar0->general_int_mask);
1620 writeq(val64, &bar0->general_int_mask);
1624 /* Enabling/Disabling Rx DMA interrupts */
1625 if (mask & RX_DMA_INTR) {
1626 /* Enable RxDMA Intrs in the general intr mask register */
1627 val64 = RXDMA_INT_M;
1628 if (flag == ENABLE_INTRS) {
1629 temp64 = readq(&bar0->general_int_mask);
1630 temp64 &= ~((u64) val64);
1631 writeq(temp64, &bar0->general_int_mask);
1633 * All RxDMA block interrupts are disabled for now
1636 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1637 } else if (flag == DISABLE_INTRS) {
1639 * Disable RxDMA Intrs in the general intr mask
1642 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1643 temp64 = readq(&bar0->general_int_mask);
1645 writeq(val64, &bar0->general_int_mask);
1649 /* MAC Interrupts */
1650 /* Enabling/Disabling MAC interrupts */
1651 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1652 val64 = TXMAC_INT_M | RXMAC_INT_M;
1653 if (flag == ENABLE_INTRS) {
1654 temp64 = readq(&bar0->general_int_mask);
1655 temp64 &= ~((u64) val64);
1656 writeq(temp64, &bar0->general_int_mask);
1658 * All MAC block error interrupts are disabled for now
1661 } else if (flag == DISABLE_INTRS) {
1663 * Disable MAC Intrs in the general intr mask register
1665 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1666 writeq(DISABLE_ALL_INTRS,
1667 &bar0->mac_rmac_err_mask);
1669 temp64 = readq(&bar0->general_int_mask);
1671 writeq(val64, &bar0->general_int_mask);
1675 /* XGXS Interrupts */
1676 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1677 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1678 if (flag == ENABLE_INTRS) {
1679 temp64 = readq(&bar0->general_int_mask);
1680 temp64 &= ~((u64) val64);
1681 writeq(temp64, &bar0->general_int_mask);
1683 * All XGXS block error interrupts are disabled for now
1686 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1687 } else if (flag == DISABLE_INTRS) {
1689 * Disable MC Intrs in the general intr mask register
1691 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1692 temp64 = readq(&bar0->general_int_mask);
1694 writeq(val64, &bar0->general_int_mask);
1698 /* Memory Controller(MC) interrupts */
1699 if (mask & MC_INTR) {
1701 if (flag == ENABLE_INTRS) {
1702 temp64 = readq(&bar0->general_int_mask);
1703 temp64 &= ~((u64) val64);
1704 writeq(temp64, &bar0->general_int_mask);
1706 * Enable all MC Intrs.
1708 writeq(0x0, &bar0->mc_int_mask);
1709 writeq(0x0, &bar0->mc_err_mask);
1710 } else if (flag == DISABLE_INTRS) {
1712 * Disable MC Intrs in the general intr mask register
1714 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1715 temp64 = readq(&bar0->general_int_mask);
1717 writeq(val64, &bar0->general_int_mask);
1722 /* Tx traffic interrupts */
1723 if (mask & TX_TRAFFIC_INTR) {
1724 val64 = TXTRAFFIC_INT_M;
1725 if (flag == ENABLE_INTRS) {
1726 temp64 = readq(&bar0->general_int_mask);
1727 temp64 &= ~((u64) val64);
1728 writeq(temp64, &bar0->general_int_mask);
1730 * Enable all the Tx side interrupts
1731 * writing 0 Enables all 64 TX interrupt levels
1733 writeq(0x0, &bar0->tx_traffic_mask);
1734 } else if (flag == DISABLE_INTRS) {
1736 * Disable Tx Traffic Intrs in the general intr mask
1739 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1740 temp64 = readq(&bar0->general_int_mask);
1742 writeq(val64, &bar0->general_int_mask);
1746 /* Rx traffic interrupts */
1747 if (mask & RX_TRAFFIC_INTR) {
1748 val64 = RXTRAFFIC_INT_M;
1749 if (flag == ENABLE_INTRS) {
1750 temp64 = readq(&bar0->general_int_mask);
1751 temp64 &= ~((u64) val64);
1752 writeq(temp64, &bar0->general_int_mask);
1753 /* writing 0 Enables all 8 RX interrupt levels */
1754 writeq(0x0, &bar0->rx_traffic_mask);
1755 } else if (flag == DISABLE_INTRS) {
1757 * Disable Rx Traffic Intrs in the general intr mask
1760 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1761 temp64 = readq(&bar0->general_int_mask);
1763 writeq(val64, &bar0->general_int_mask);
1768 static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1772 if (flag == FALSE) {
1773 if ((!herc && (rev_id >= 4)) || herc) {
1774 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1775 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1776 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1780 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1781 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1782 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1787 if ((!herc && (rev_id >= 4)) || herc) {
1788 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1789 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1790 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1791 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1792 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1796 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1797 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1798 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1799 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1800 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1809 * verify_xena_quiescence - Checks whether the H/W is ready
1810 * @val64 : Value read from adapter status register.
1811 * @flag : indicates if the adapter enable bit was ever written once
1813 * Description: Returns whether the H/W is ready to go or not. Depending
1814 * on whether adapter enable bit was written or not the comparison
1815 * differs and the calling function passes the input argument flag to
1817 * Return: 1 If xena is quiescence
1818 * 0 If Xena is not quiescence
1821 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1824 u64 tmp64 = ~((u64) val64);
1825 int rev_id = get_xena_rev_id(sp->pdev);
1827 herc = (sp->device_type == XFRAME_II_DEVICE);
1830 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1831 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1832 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1833 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1834 ADAPTER_STATUS_P_PLL_LOCK))) {
1835 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1842 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1843 * @sp: Pointer to device specifc structure
1845 * New procedure to clear mac address reading problems on Alpha platforms
1849 void fix_mac_address(nic_t * sp)
1851 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1855 while (fix_mac[i] != END_SIGN) {
1856 writeq(fix_mac[i++], &bar0->gpio_control);
1858 val64 = readq(&bar0->gpio_control);
1863 * start_nic - Turns the device on
1864 * @nic : device private variable.
1866 * This function actually turns the device on. Before this function is
1867 * called,all Registers are configured from their reset states
1868 * and shared memory is allocated but the NIC is still quiescent. On
1869 * calling this function, the device interrupts are cleared and the NIC is
1870 * literally switched on by writing into the adapter control register.
1872 * SUCCESS on success and -1 on failure.
1875 static int start_nic(struct s2io_nic *nic)
1877 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1878 struct net_device *dev = nic->dev;
1879 register u64 val64 = 0;
1882 mac_info_t *mac_control;
1883 struct config_param *config;
1885 mac_control = &nic->mac_control;
1886 config = &nic->config;
1888 /* PRC Initialization and configuration */
1889 for (i = 0; i < config->rx_ring_num; i++) {
1890 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1891 &bar0->prc_rxd0_n[i]);
1893 val64 = readq(&bar0->prc_ctrl_n[i]);
1894 if (nic->config.bimodal)
1895 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1896 #ifndef CONFIG_2BUFF_MODE
1897 val64 |= PRC_CTRL_RC_ENABLED;
1899 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1901 writeq(val64, &bar0->prc_ctrl_n[i]);
1904 #ifdef CONFIG_2BUFF_MODE
1905 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1906 val64 = readq(&bar0->rx_pa_cfg);
1907 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1908 writeq(val64, &bar0->rx_pa_cfg);
1912 * Enabling MC-RLDRAM. After enabling the device, we timeout
1913 * for around 100ms, which is approximately the time required
1914 * for the device to be ready for operation.
1916 val64 = readq(&bar0->mc_rldram_mrs);
1917 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1918 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1919 val64 = readq(&bar0->mc_rldram_mrs);
1921 msleep(100); /* Delay by around 100 ms. */
1923 /* Enabling ECC Protection. */
1924 val64 = readq(&bar0->adapter_control);
1925 val64 &= ~ADAPTER_ECC_EN;
1926 writeq(val64, &bar0->adapter_control);
1929 * Clearing any possible Link state change interrupts that
1930 * could have popped up just before Enabling the card.
1932 val64 = readq(&bar0->mac_rmac_err_reg);
1934 writeq(val64, &bar0->mac_rmac_err_reg);
1937 * Verify if the device is ready to be enabled, if so enable
1940 val64 = readq(&bar0->adapter_status);
1941 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1942 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1943 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1944 (unsigned long long) val64);
1948 /* Enable select interrupts */
1949 if (nic->intr_type != INTA)
1950 en_dis_able_nic_intrs(nic, ENA_ALL_INTRS, DISABLE_INTRS);
1952 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
1953 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
1954 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1955 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1959 * With some switches, link might be already up at this point.
1960 * Because of this weird behavior, when we enable laser,
1961 * we may not get link. We need to handle this. We cannot
1962 * figure out which switch is misbehaving. So we are forced to
1963 * make a global change.
1966 /* Enabling Laser. */
1967 val64 = readq(&bar0->adapter_control);
1968 val64 |= ADAPTER_EOI_TX_ON;
1969 writeq(val64, &bar0->adapter_control);
1971 /* SXE-002: Initialize link and activity LED */
1972 subid = nic->pdev->subsystem_device;
1973 if (((subid & 0xFF) >= 0x07) &&
1974 (nic->device_type == XFRAME_I_DEVICE)) {
1975 val64 = readq(&bar0->gpio_control);
1976 val64 |= 0x0000800000000000ULL;
1977 writeq(val64, &bar0->gpio_control);
1978 val64 = 0x0411040400000000ULL;
1979 writeq(val64, (void __iomem *)bar0 + 0x2700);
1983 * Don't see link state interrupts on certain switches, so
1984 * directly scheduling a link state task from here.
1986 schedule_work(&nic->set_link_task);
1992 * free_tx_buffers - Free all queued Tx buffers
1993 * @nic : device private variable.
1995 * Free all queued Tx buffers.
1996 * Return Value: void
1999 static void free_tx_buffers(struct s2io_nic *nic)
2001 struct net_device *dev = nic->dev;
2002 struct sk_buff *skb;
2005 mac_info_t *mac_control;
2006 struct config_param *config;
2007 int cnt = 0, frg_cnt;
2009 mac_control = &nic->mac_control;
2010 config = &nic->config;
2012 for (i = 0; i < config->tx_fifo_num; i++) {
2013 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2014 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
2017 (struct sk_buff *) ((unsigned long) txdp->
2020 memset(txdp, 0, sizeof(TxD_t) *
2024 frg_cnt = skb_shinfo(skb)->nr_frags;
2025 pci_unmap_single(nic->pdev, (dma_addr_t)
2026 txdp->Buffer_Pointer,
2027 skb->len - skb->data_len,
2033 for (j = 0; j < frg_cnt; j++, txdp++) {
2035 &skb_shinfo(skb)->frags[j];
2036 pci_unmap_page(nic->pdev,
2046 memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
2050 "%s:forcibly freeing %d skbs on FIFO%d\n",
2052 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2053 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2058 * stop_nic - To stop the nic
2059 * @nic ; device private variable.
2061 * This function does exactly the opposite of what the start_nic()
2062 * function does. This function is called to stop the device.
2067 static void stop_nic(struct s2io_nic *nic)
2069 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2070 register u64 val64 = 0;
2071 u16 interruptible, i;
2072 mac_info_t *mac_control;
2073 struct config_param *config;
2075 mac_control = &nic->mac_control;
2076 config = &nic->config;
2078 /* Disable all interrupts */
2079 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2080 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2081 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2082 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2085 for (i = 0; i < config->rx_ring_num; i++) {
2086 val64 = readq(&bar0->prc_ctrl_n[i]);
2087 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
2088 writeq(val64, &bar0->prc_ctrl_n[i]);
2093 * fill_rx_buffers - Allocates the Rx side skbs
2094 * @nic: device private variable
2095 * @ring_no: ring number
2097 * The function allocates Rx side skbs and puts the physical
2098 * address of these buffers into the RxD buffer pointers, so that the NIC
2099 * can DMA the received frame into these locations.
2100 * The NIC supports 3 receive modes, viz
2102 * 2. three buffer and
2103 * 3. Five buffer modes.
2104 * Each mode defines how many fragments the received frame will be split
2105 * up into by the NIC. The frame is split into L3 header, L4 Header,
2106 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2107 * is split into 3 fragments. As of now only single buffer mode is
2110 * SUCCESS on success or an appropriate -ve value on failure.
2113 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2115 struct net_device *dev = nic->dev;
2116 struct sk_buff *skb;
2118 int off, off1, size, block_no, block_no1;
2119 int offset, offset1;
2122 mac_info_t *mac_control;
2123 struct config_param *config;
2124 #ifdef CONFIG_2BUFF_MODE
2129 dma_addr_t rxdpphys;
2131 #ifndef CONFIG_S2IO_NAPI
2132 unsigned long flags;
2134 RxD_t *first_rxdp = NULL;
2136 mac_control = &nic->mac_control;
2137 config = &nic->config;
2138 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2139 atomic_read(&nic->rx_bufs_left[ring_no]);
2140 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2141 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2143 while (alloc_tab < alloc_cnt) {
2144 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2146 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
2148 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2149 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2150 #ifndef CONFIG_2BUFF_MODE
2151 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
2152 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
2154 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
2155 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
2158 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2159 block_virt_addr + off;
2160 if ((offset == offset1) && (rxdp->Host_Control)) {
2161 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
2162 DBG_PRINT(INTR_DBG, " info equated\n");
2165 #ifndef CONFIG_2BUFF_MODE
2166 if (rxdp->Control_1 == END_OF_BLOCK) {
2167 mac_control->rings[ring_no].rx_curr_put_info.
2169 mac_control->rings[ring_no].rx_curr_put_info.
2170 block_index %= mac_control->rings[ring_no].block_count;
2171 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2174 off %= (MAX_RXDS_PER_BLOCK + 1);
2175 mac_control->rings[ring_no].rx_curr_put_info.offset =
2177 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
2178 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2181 #ifndef CONFIG_S2IO_NAPI
2182 spin_lock_irqsave(&nic->put_lock, flags);
2183 mac_control->rings[ring_no].put_pos =
2184 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
2185 spin_unlock_irqrestore(&nic->put_lock, flags);
2188 if (rxdp->Host_Control == END_OF_BLOCK) {
2189 mac_control->rings[ring_no].rx_curr_put_info.
2191 mac_control->rings[ring_no].rx_curr_put_info.block_index
2192 %= mac_control->rings[ring_no].block_count;
2193 block_no = mac_control->rings[ring_no].rx_curr_put_info
2196 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
2197 dev->name, block_no,
2198 (unsigned long long) rxdp->Control_1);
2199 mac_control->rings[ring_no].rx_curr_put_info.offset =
2201 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2204 #ifndef CONFIG_S2IO_NAPI
2205 spin_lock_irqsave(&nic->put_lock, flags);
2206 mac_control->rings[ring_no].put_pos = (block_no *
2207 (MAX_RXDS_PER_BLOCK + 1)) + off;
2208 spin_unlock_irqrestore(&nic->put_lock, flags);
2212 #ifndef CONFIG_2BUFF_MODE
2213 if (rxdp->Control_1 & RXD_OWN_XENA)
2215 if (rxdp->Control_2 & BIT(0))
2218 mac_control->rings[ring_no].rx_curr_put_info.
2222 #ifdef CONFIG_2BUFF_MODE
2224 * RxDs Spanning cache lines will be replenished only
2225 * if the succeeding RxD is also owned by Host. It
2226 * will always be the ((8*i)+3) and ((8*i)+6)
2227 * descriptors for the 48 byte descriptor. The offending
2228 * decsriptor is of-course the 3rd descriptor.
2230 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
2231 block_dma_addr + (off * sizeof(RxD_t));
2232 if (((u64) (rxdpphys)) % 128 > 80) {
2233 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
2234 block_virt_addr + (off + 1);
2235 if (rxdpnext->Host_Control == END_OF_BLOCK) {
2236 nextblk = (block_no + 1) %
2237 (mac_control->rings[ring_no].block_count);
2238 rxdpnext = mac_control->rings[ring_no].rx_blocks
2239 [nextblk].block_virt_addr;
2241 if (rxdpnext->Control_2 & BIT(0))
2246 #ifndef CONFIG_2BUFF_MODE
2247 skb = dev_alloc_skb(size + NET_IP_ALIGN);
2249 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
2252 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2253 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2256 first_rxdp->Control_1 |= RXD_OWN_XENA;
2260 #ifndef CONFIG_2BUFF_MODE
2261 skb_reserve(skb, NET_IP_ALIGN);
2262 memset(rxdp, 0, sizeof(RxD_t));
2263 rxdp->Buffer0_ptr = pci_map_single
2264 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2265 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
2266 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
2267 rxdp->Host_Control = (unsigned long) (skb);
2268 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2269 rxdp->Control_1 |= RXD_OWN_XENA;
2271 off %= (MAX_RXDS_PER_BLOCK + 1);
2272 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2274 ba = &mac_control->rings[ring_no].ba[block_no][off];
2275 skb_reserve(skb, BUF0_LEN);
2276 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
2278 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
2280 memset(rxdp, 0, sizeof(RxD_t));
2281 rxdp->Buffer2_ptr = pci_map_single
2282 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
2283 PCI_DMA_FROMDEVICE);
2285 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2286 PCI_DMA_FROMDEVICE);
2288 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2289 PCI_DMA_FROMDEVICE);
2291 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
2292 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
2293 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
2294 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
2295 rxdp->Host_Control = (u64) ((unsigned long) (skb));
2296 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2297 rxdp->Control_1 |= RXD_OWN_XENA;
2299 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2301 rxdp->Control_2 |= SET_RXD_MARKER;
2303 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2306 first_rxdp->Control_1 |= RXD_OWN_XENA;
2310 atomic_inc(&nic->rx_bufs_left[ring_no]);
2315 /* Transfer ownership of first descriptor to adapter just before
2316 * exiting. Before that, use memory barrier so that ownership
2317 * and other fields are seen by adapter correctly.
2321 first_rxdp->Control_1 |= RXD_OWN_XENA;
2328 * free_rx_buffers - Frees all Rx buffers
2329 * @sp: device private variable.
2331 * This function will free all Rx buffers allocated by host.
2336 static void free_rx_buffers(struct s2io_nic *sp)
2338 struct net_device *dev = sp->dev;
2339 int i, j, blk = 0, off, buf_cnt = 0;
2341 struct sk_buff *skb;
2342 mac_info_t *mac_control;
2343 struct config_param *config;
2344 #ifdef CONFIG_2BUFF_MODE
2348 mac_control = &sp->mac_control;
2349 config = &sp->config;
2351 for (i = 0; i < config->rx_ring_num; i++) {
2352 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2353 off = j % (MAX_RXDS_PER_BLOCK + 1);
2354 rxdp = mac_control->rings[i].rx_blocks[blk].
2355 block_virt_addr + off;
2357 #ifndef CONFIG_2BUFF_MODE
2358 if (rxdp->Control_1 == END_OF_BLOCK) {
2360 (RxD_t *) ((unsigned long) rxdp->
2366 if (rxdp->Host_Control == END_OF_BLOCK) {
2372 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2373 memset(rxdp, 0, sizeof(RxD_t));
2378 (struct sk_buff *) ((unsigned long) rxdp->
2381 #ifndef CONFIG_2BUFF_MODE
2382 pci_unmap_single(sp->pdev, (dma_addr_t)
2385 HEADER_ETHERNET_II_802_3_SIZE
2386 + HEADER_802_2_SIZE +
2388 PCI_DMA_FROMDEVICE);
2390 ba = &mac_control->rings[i].ba[blk][off];
2391 pci_unmap_single(sp->pdev, (dma_addr_t)
2394 PCI_DMA_FROMDEVICE);
2395 pci_unmap_single(sp->pdev, (dma_addr_t)
2398 PCI_DMA_FROMDEVICE);
2399 pci_unmap_single(sp->pdev, (dma_addr_t)
2401 dev->mtu + BUF0_LEN + 4,
2402 PCI_DMA_FROMDEVICE);
2405 atomic_dec(&sp->rx_bufs_left[i]);
2408 memset(rxdp, 0, sizeof(RxD_t));
2410 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2411 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2412 mac_control->rings[i].rx_curr_put_info.offset = 0;
2413 mac_control->rings[i].rx_curr_get_info.offset = 0;
2414 atomic_set(&sp->rx_bufs_left[i], 0);
2415 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2416 dev->name, buf_cnt, i);
2421 * s2io_poll - Rx interrupt handler for NAPI support
2422 * @dev : pointer to the device structure.
2423 * @budget : The number of packets that were budgeted to be processed
2424 * during one pass through the 'Poll" function.
2426 * Comes into picture only if NAPI support has been incorporated. It does
2427 * the same thing that rx_intr_handler does, but not in a interrupt context
2428 * also It will process only a given number of packets.
2430 * 0 on success and 1 if there are No Rx packets to be processed.
2433 #if defined(CONFIG_S2IO_NAPI)
2434 static int s2io_poll(struct net_device *dev, int *budget)
2436 nic_t *nic = dev->priv;
2437 int pkt_cnt = 0, org_pkts_to_process;
2438 mac_info_t *mac_control;
2439 struct config_param *config;
2440 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2444 atomic_inc(&nic->isr_cnt);
2445 mac_control = &nic->mac_control;
2446 config = &nic->config;
2448 nic->pkts_to_process = *budget;
2449 if (nic->pkts_to_process > dev->quota)
2450 nic->pkts_to_process = dev->quota;
2451 org_pkts_to_process = nic->pkts_to_process;
2453 val64 = readq(&bar0->rx_traffic_int);
2454 writeq(val64, &bar0->rx_traffic_int);
2456 for (i = 0; i < config->rx_ring_num; i++) {
2457 rx_intr_handler(&mac_control->rings[i]);
2458 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2459 if (!nic->pkts_to_process) {
2460 /* Quota for the current iteration has been met */
2467 dev->quota -= pkt_cnt;
2469 netif_rx_complete(dev);
2471 for (i = 0; i < config->rx_ring_num; i++) {
2472 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2473 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2474 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2478 /* Re enable the Rx interrupts. */
2479 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2480 atomic_dec(&nic->isr_cnt);
2484 dev->quota -= pkt_cnt;
2487 for (i = 0; i < config->rx_ring_num; i++) {
2488 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2489 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2490 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2494 atomic_dec(&nic->isr_cnt);
2500 * rx_intr_handler - Rx interrupt handler
2501 * @nic: device private variable.
2503 * If the interrupt is because of a received frame or if the
2504 * receive ring contains fresh as yet un-processed frames,this function is
2505 * called. It picks out the RxD at which place the last Rx processing had
2506 * stopped and sends the skb to the OSM's Rx handler and then increments
2511 static void rx_intr_handler(ring_info_t *ring_data)
2513 nic_t *nic = ring_data->nic;
2514 struct net_device *dev = (struct net_device *) nic->dev;
2515 int get_block, get_offset, put_block, put_offset, ring_bufs;
2516 rx_curr_get_info_t get_info, put_info;
2518 struct sk_buff *skb;
2519 #ifndef CONFIG_S2IO_NAPI
2522 spin_lock(&nic->rx_lock);
2523 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2524 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2525 __FUNCTION__, dev->name);
2526 spin_unlock(&nic->rx_lock);
2530 get_info = ring_data->rx_curr_get_info;
2531 get_block = get_info.block_index;
2532 put_info = ring_data->rx_curr_put_info;
2533 put_block = put_info.block_index;
2534 ring_bufs = get_info.ring_len+1;
2535 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2537 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2539 #ifndef CONFIG_S2IO_NAPI
2540 spin_lock(&nic->put_lock);
2541 put_offset = ring_data->put_pos;
2542 spin_unlock(&nic->put_lock);
2544 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2547 while (RXD_IS_UP2DT(rxdp) &&
2548 (((get_offset + 1) % ring_bufs) != put_offset)) {
2549 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2551 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2553 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2554 spin_unlock(&nic->rx_lock);
2557 #ifndef CONFIG_2BUFF_MODE
2558 pci_unmap_single(nic->pdev, (dma_addr_t)
2561 HEADER_ETHERNET_II_802_3_SIZE +
2564 PCI_DMA_FROMDEVICE);
2566 pci_unmap_single(nic->pdev, (dma_addr_t)
2568 BUF0_LEN, PCI_DMA_FROMDEVICE);
2569 pci_unmap_single(nic->pdev, (dma_addr_t)
2571 BUF1_LEN, PCI_DMA_FROMDEVICE);
2572 pci_unmap_single(nic->pdev, (dma_addr_t)
2574 dev->mtu + BUF0_LEN + 4,
2575 PCI_DMA_FROMDEVICE);
2577 rx_osm_handler(ring_data, rxdp);
2579 ring_data->rx_curr_get_info.offset =
2581 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2583 if (get_info.offset &&
2584 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2585 get_info.offset = 0;
2586 ring_data->rx_curr_get_info.offset
2589 get_block %= ring_data->block_count;
2590 ring_data->rx_curr_get_info.block_index
2592 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2595 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2597 #ifdef CONFIG_S2IO_NAPI
2598 nic->pkts_to_process -= 1;
2599 if (!nic->pkts_to_process)
2603 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2607 spin_unlock(&nic->rx_lock);
2611 * tx_intr_handler - Transmit interrupt handler
2612 * @nic : device private variable
2614 * If an interrupt was raised to indicate DMA complete of the
2615 * Tx packet, this function is called. It identifies the last TxD
2616 * whose buffer was freed and frees all skbs whose data have already
2617 * DMA'ed into the NICs internal memory.
2622 static void tx_intr_handler(fifo_info_t *fifo_data)
2624 nic_t *nic = fifo_data->nic;
2625 struct net_device *dev = (struct net_device *) nic->dev;
2626 tx_curr_get_info_t get_info, put_info;
2627 struct sk_buff *skb;
2631 get_info = fifo_data->tx_curr_get_info;
2632 put_info = fifo_data->tx_curr_put_info;
2633 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2635 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2636 (get_info.offset != put_info.offset) &&
2637 (txdlp->Host_Control)) {
2638 /* Check for TxD errors */
2639 if (txdlp->Control_1 & TXD_T_CODE) {
2640 unsigned long long err;
2641 err = txdlp->Control_1 & TXD_T_CODE;
2642 if ((err >> 48) == 0xA) {
2643 DBG_PRINT(TX_DBG, "TxD returned due \
2644 to loss of link\n");
2647 DBG_PRINT(ERR_DBG, "***TxD error \
2652 skb = (struct sk_buff *) ((unsigned long)
2653 txdlp->Host_Control);
2655 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2657 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2661 frg_cnt = skb_shinfo(skb)->nr_frags;
2662 nic->tx_pkt_count++;
2664 pci_unmap_single(nic->pdev, (dma_addr_t)
2665 txdlp->Buffer_Pointer,
2666 skb->len - skb->data_len,
2672 for (j = 0; j < frg_cnt; j++, txdlp++) {
2674 &skb_shinfo(skb)->frags[j];
2675 if (!txdlp->Buffer_Pointer)
2677 pci_unmap_page(nic->pdev,
2687 (sizeof(TxD_t) * fifo_data->max_txds));
2689 /* Updating the statistics block */
2690 nic->stats.tx_bytes += skb->len;
2691 dev_kfree_skb_irq(skb);
2694 get_info.offset %= get_info.fifo_len + 1;
2695 txdlp = (TxD_t *) fifo_data->list_info
2696 [get_info.offset].list_virt_addr;
2697 fifo_data->tx_curr_get_info.offset =
2701 spin_lock(&nic->tx_lock);
2702 if (netif_queue_stopped(dev))
2703 netif_wake_queue(dev);
2704 spin_unlock(&nic->tx_lock);
2708 * alarm_intr_handler - Alarm Interrrupt handler
2709 * @nic: device private variable
2710 * Description: If the interrupt was neither because of Rx packet or Tx
2711 * complete, this function is called. If the interrupt was to indicate
2712 * a loss of link, the OSM link status handler is invoked for any other
2713 * alarm interrupt the block that raised the interrupt is displayed
2714 * and a H/W reset is issued.
2719 static void alarm_intr_handler(struct s2io_nic *nic)
2721 struct net_device *dev = (struct net_device *) nic->dev;
2722 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2723 register u64 val64 = 0, err_reg = 0;
2725 /* Handling link status change error Intr */
2726 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2727 err_reg = readq(&bar0->mac_rmac_err_reg);
2728 writeq(err_reg, &bar0->mac_rmac_err_reg);
2729 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2730 schedule_work(&nic->set_link_task);
2734 /* Handling Ecc errors */
2735 val64 = readq(&bar0->mc_err_reg);
2736 writeq(val64, &bar0->mc_err_reg);
2737 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2738 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2739 nic->mac_control.stats_info->sw_stat.
2741 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
2743 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
2744 if (nic->device_type != XFRAME_II_DEVICE) {
2745 /* Reset XframeI only if critical error */
2746 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
2747 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
2748 netif_stop_queue(dev);
2749 schedule_work(&nic->rst_timer_task);
2753 nic->mac_control.stats_info->sw_stat.
2758 /* In case of a serious error, the device will be Reset. */
2759 val64 = readq(&bar0->serr_source);
2760 if (val64 & SERR_SOURCE_ANY) {
2761 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2762 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
2763 (unsigned long long)val64);
2764 netif_stop_queue(dev);
2765 schedule_work(&nic->rst_timer_task);
2769 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2770 * Error occurs, the adapter will be recycled by disabling the
2771 * adapter enable bit and enabling it again after the device
2772 * becomes Quiescent.
2774 val64 = readq(&bar0->pcc_err_reg);
2775 writeq(val64, &bar0->pcc_err_reg);
2776 if (val64 & PCC_FB_ECC_DB_ERR) {
2777 u64 ac = readq(&bar0->adapter_control);
2778 ac &= ~(ADAPTER_CNTL_EN);
2779 writeq(ac, &bar0->adapter_control);
2780 ac = readq(&bar0->adapter_control);
2781 schedule_work(&nic->set_link_task);
2784 /* Other type of interrupts are not being handled now, TODO */
2788 * wait_for_cmd_complete - waits for a command to complete.
2789 * @sp : private member of the device structure, which is a pointer to the
2790 * s2io_nic structure.
2791 * Description: Function that waits for a command to Write into RMAC
2792 * ADDR DATA registers to be completed and returns either success or
2793 * error depending on whether the command was complete or not.
2795 * SUCCESS on success and FAILURE on failure.
2798 int wait_for_cmd_complete(nic_t * sp)
2800 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2801 int ret = FAILURE, cnt = 0;
2805 val64 = readq(&bar0->rmac_addr_cmd_mem);
2806 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2819 * s2io_reset - Resets the card.
2820 * @sp : private member of the device structure.
2821 * Description: Function to Reset the card. This function then also
2822 * restores the previously saved PCI configuration space registers as
2823 * the card reset also resets the configuration space.
2828 void s2io_reset(nic_t * sp)
2830 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2834 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
2835 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
2837 val64 = SW_RESET_ALL;
2838 writeq(val64, &bar0->sw_reset);
2841 * At this stage, if the PCI write is indeed completed, the
2842 * card is reset and so is the PCI Config space of the device.
2843 * So a read cannot be issued at this stage on any of the
2844 * registers to ensure the write into "sw_reset" register
2846 * Question: Is there any system call that will explicitly force
2847 * all the write commands still pending on the bus to be pushed
2849 * As of now I'am just giving a 250ms delay and hoping that the
2850 * PCI write to sw_reset register is done by this time.
2854 /* Restore the PCI state saved during initialization. */
2855 pci_restore_state(sp->pdev);
2856 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
2862 /* Set swapper to enable I/O register access */
2863 s2io_set_swapper(sp);
2865 /* Restore the MSIX table entries from local variables */
2866 restore_xmsi_data(sp);
2868 /* Clear certain PCI/PCI-X fields after reset */
2869 if (sp->device_type == XFRAME_II_DEVICE) {
2870 /* Clear parity err detect bit */
2871 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
2873 /* Clearing PCIX Ecc status register */
2874 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
2876 /* Clearing PCI_STATUS error reflected here */
2877 writeq(BIT(62), &bar0->txpic_int_reg);
2880 /* Reset device statistics maintained by OS */
2881 memset(&sp->stats, 0, sizeof (struct net_device_stats));
2883 /* SXE-002: Configure link and activity LED to turn it off */
2884 subid = sp->pdev->subsystem_device;
2885 if (((subid & 0xFF) >= 0x07) &&
2886 (sp->device_type == XFRAME_I_DEVICE)) {
2887 val64 = readq(&bar0->gpio_control);
2888 val64 |= 0x0000800000000000ULL;
2889 writeq(val64, &bar0->gpio_control);
2890 val64 = 0x0411040400000000ULL;
2891 writeq(val64, (void __iomem *)bar0 + 0x2700);
2895 * Clear spurious ECC interrupts that would have occured on
2896 * XFRAME II cards after reset.
2898 if (sp->device_type == XFRAME_II_DEVICE) {
2899 val64 = readq(&bar0->pcc_err_reg);
2900 writeq(val64, &bar0->pcc_err_reg);
2903 sp->device_enabled_once = FALSE;
2907 * s2io_set_swapper - to set the swapper controle on the card
2908 * @sp : private member of the device structure,
2909 * pointer to the s2io_nic structure.
2910 * Description: Function to set the swapper control on the card
2911 * correctly depending on the 'endianness' of the system.
2913 * SUCCESS on success and FAILURE on failure.
2916 int s2io_set_swapper(nic_t * sp)
2918 struct net_device *dev = sp->dev;
2919 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2920 u64 val64, valt, valr;
2923 * Set proper endian settings and verify the same by reading
2924 * the PIF Feed-back register.
2927 val64 = readq(&bar0->pif_rd_swapper_fb);
2928 if (val64 != 0x0123456789ABCDEFULL) {
2930 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2931 0x8100008181000081ULL, /* FE=1, SE=0 */
2932 0x4200004242000042ULL, /* FE=0, SE=1 */
2933 0}; /* FE=0, SE=0 */
2936 writeq(value[i], &bar0->swapper_ctrl);
2937 val64 = readq(&bar0->pif_rd_swapper_fb);
2938 if (val64 == 0x0123456789ABCDEFULL)
2943 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2945 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2946 (unsigned long long) val64);
2951 valr = readq(&bar0->swapper_ctrl);
2954 valt = 0x0123456789ABCDEFULL;
2955 writeq(valt, &bar0->xmsi_address);
2956 val64 = readq(&bar0->xmsi_address);
2960 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2961 0x0081810000818100ULL, /* FE=1, SE=0 */
2962 0x0042420000424200ULL, /* FE=0, SE=1 */
2963 0}; /* FE=0, SE=0 */
2966 writeq((value[i] | valr), &bar0->swapper_ctrl);
2967 writeq(valt, &bar0->xmsi_address);
2968 val64 = readq(&bar0->xmsi_address);
2974 unsigned long long x = val64;
2975 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2976 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2980 val64 = readq(&bar0->swapper_ctrl);
2981 val64 &= 0xFFFF000000000000ULL;
2985 * The device by default set to a big endian format, so a
2986 * big endian driver need not set anything.
2988 val64 |= (SWAPPER_CTRL_TXP_FE |
2989 SWAPPER_CTRL_TXP_SE |
2990 SWAPPER_CTRL_TXD_R_FE |
2991 SWAPPER_CTRL_TXD_W_FE |
2992 SWAPPER_CTRL_TXF_R_FE |
2993 SWAPPER_CTRL_RXD_R_FE |
2994 SWAPPER_CTRL_RXD_W_FE |
2995 SWAPPER_CTRL_RXF_W_FE |
2996 SWAPPER_CTRL_XMSI_FE |
2997 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2998 if (nic->intr_type == INTA)
2999 val64 |= SWAPPER_CTRL_XMSI_SE;
3000 writeq(val64, &bar0->swapper_ctrl);
3003 * Initially we enable all bits to make it accessible by the
3004 * driver, then we selectively enable only those bits that
3007 val64 |= (SWAPPER_CTRL_TXP_FE |
3008 SWAPPER_CTRL_TXP_SE |
3009 SWAPPER_CTRL_TXD_R_FE |
3010 SWAPPER_CTRL_TXD_R_SE |
3011 SWAPPER_CTRL_TXD_W_FE |
3012 SWAPPER_CTRL_TXD_W_SE |
3013 SWAPPER_CTRL_TXF_R_FE |
3014 SWAPPER_CTRL_RXD_R_FE |
3015 SWAPPER_CTRL_RXD_R_SE |
3016 SWAPPER_CTRL_RXD_W_FE |
3017 SWAPPER_CTRL_RXD_W_SE |
3018 SWAPPER_CTRL_RXF_W_FE |
3019 SWAPPER_CTRL_XMSI_FE |
3020 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3021 if (sp->intr_type == INTA)
3022 val64 |= SWAPPER_CTRL_XMSI_SE;
3023 writeq(val64, &bar0->swapper_ctrl);
3025 val64 = readq(&bar0->swapper_ctrl);
3028 * Verifying if endian settings are accurate by reading a
3029 * feedback register.
3031 val64 = readq(&bar0->pif_rd_swapper_fb);
3032 if (val64 != 0x0123456789ABCDEFULL) {
3033 /* Endian settings are incorrect, calls for another dekko. */
3034 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3036 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3037 (unsigned long long) val64);
3044 int wait_for_msix_trans(nic_t *nic, int i)
3046 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3048 int ret = 0, cnt = 0;
3051 val64 = readq(&bar0->xmsi_access);
3052 if (!(val64 & BIT(15)))
3058 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3065 void restore_xmsi_data(nic_t *nic)
3067 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3071 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3072 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3073 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3074 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3075 writeq(val64, &bar0->xmsi_access);
3076 if (wait_for_msix_trans(nic, i)) {
3077 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3083 void store_xmsi_data(nic_t *nic)
3085 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3086 u64 val64, addr, data;
3089 /* Store and display */
3090 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3091 val64 = (BIT(15) | vBIT(i, 26, 6));
3092 writeq(val64, &bar0->xmsi_access);
3093 if (wait_for_msix_trans(nic, i)) {
3094 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3097 addr = readq(&bar0->xmsi_address);
3098 data = readq(&bar0->xmsi_data);
3100 nic->msix_info[i].addr = addr;
3101 nic->msix_info[i].data = data;
3106 int s2io_enable_msi(nic_t *nic)
3108 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3109 u16 msi_ctrl, msg_val;
3110 struct config_param *config = &nic->config;
3111 struct net_device *dev = nic->dev;
3112 u64 val64, tx_mat, rx_mat;
3115 val64 = readq(&bar0->pic_control);
3117 writeq(val64, &bar0->pic_control);
3119 err = pci_enable_msi(nic->pdev);
3121 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3127 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3128 * for interrupt handling.
3130 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3132 pci_write_config_word(nic->pdev, 0x4c, msg_val);
3133 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3135 pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3137 pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3139 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3140 tx_mat = readq(&bar0->tx_mat0_n[0]);
3141 for (i=0; i<config->tx_fifo_num; i++) {
3142 tx_mat |= TX_MAT_SET(i, 1);
3144 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3146 rx_mat = readq(&bar0->rx_mat);
3147 for (i=0; i<config->rx_ring_num; i++) {
3148 rx_mat |= RX_MAT_SET(i, 1);
3150 writeq(rx_mat, &bar0->rx_mat);
3152 dev->irq = nic->pdev->irq;
3156 int s2io_enable_msi_x(nic_t *nic)
3158 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3160 u16 msi_control; /* Temp variable */
3161 int ret, i, j, msix_indx = 1;
3163 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3165 if (nic->entries == NULL) {
3166 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3169 memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3172 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3174 if (nic->s2io_entries == NULL) {
3175 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3176 kfree(nic->entries);
3179 memset(nic->s2io_entries, 0,
3180 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3182 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3183 nic->entries[i].entry = i;
3184 nic->s2io_entries[i].entry = i;
3185 nic->s2io_entries[i].arg = NULL;
3186 nic->s2io_entries[i].in_use = 0;
3189 tx_mat = readq(&bar0->tx_mat0_n[0]);
3190 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3191 tx_mat |= TX_MAT_SET(i, msix_indx);
3192 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3193 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3194 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3196 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3198 if (!nic->config.bimodal) {
3199 rx_mat = readq(&bar0->rx_mat);
3200 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3201 rx_mat |= RX_MAT_SET(j, msix_indx);
3202 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3203 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3204 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3206 writeq(rx_mat, &bar0->rx_mat);
3208 tx_mat = readq(&bar0->tx_mat0_n[7]);
3209 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3210 tx_mat |= TX_MAT_SET(i, msix_indx);
3211 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3212 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3213 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3215 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3218 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3220 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3221 kfree(nic->entries);
3222 kfree(nic->s2io_entries);
3223 nic->entries = NULL;
3224 nic->s2io_entries = NULL;
3229 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3230 * in the herc NIC. (Temp change, needs to be removed later)
3232 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3233 msi_control |= 0x1; /* Enable MSI */
3234 pci_write_config_word(nic->pdev, 0x42, msi_control);
3239 /* ********************************************************* *
3240 * Functions defined below concern the OS part of the driver *
3241 * ********************************************************* */
3244 * s2io_open - open entry point of the driver
3245 * @dev : pointer to the device structure.
3247 * This function is the open entry point of the driver. It mainly calls a
3248 * function to allocate Rx buffers and inserts them into the buffer
3249 * descriptors and then enables the Rx part of the NIC.
3251 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3255 int s2io_open(struct net_device *dev)
3257 nic_t *sp = dev->priv;
3260 u16 msi_control; /* Temp variable */
3263 * Make sure you have link off by default every time
3264 * Nic is initialized
3266 netif_carrier_off(dev);
3267 sp->last_link_state = 0;
3269 /* Initialize H/W and enable interrupts */
3270 if (s2io_card_up(sp)) {
3271 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3274 goto hw_init_failed;
3277 /* Store the values of the MSIX table in the nic_t structure */
3278 store_xmsi_data(sp);
3280 /* After proper initialization of H/W, register ISR */
3281 if (sp->intr_type == MSI) {
3282 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
3283 SA_SHIRQ, sp->name, dev);
3285 DBG_PRINT(ERR_DBG, "%s: MSI registration \
3286 failed\n", dev->name);
3287 goto isr_registration_failed;
3290 if (sp->intr_type == MSI_X) {
3291 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
3292 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
3293 sprintf(sp->desc1, "%s:MSI-X-%d-TX",
3295 err = request_irq(sp->entries[i].vector,
3296 s2io_msix_fifo_handle, 0, sp->desc1,
3297 sp->s2io_entries[i].arg);
3298 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc1,
3299 sp->msix_info[i].addr);
3301 sprintf(sp->desc2, "%s:MSI-X-%d-RX",
3303 err = request_irq(sp->entries[i].vector,
3304 s2io_msix_ring_handle, 0, sp->desc2,
3305 sp->s2io_entries[i].arg);
3306 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc2,
3307 sp->msix_info[i].addr);
3310 DBG_PRINT(ERR_DBG, "%s: MSI-X-%d registration \
3311 failed\n", dev->name, i);
3312 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
3313 goto isr_registration_failed;
3315 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
3318 if (sp->intr_type == INTA) {
3319 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
3322 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
3324 goto isr_registration_failed;
3328 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3329 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3331 goto setting_mac_address_failed;
3334 netif_start_queue(dev);
3337 setting_mac_address_failed:
3338 if (sp->intr_type != MSI_X)
3339 free_irq(sp->pdev->irq, dev);
3340 isr_registration_failed:
3341 del_timer_sync(&sp->alarm_timer);
3342 if (sp->intr_type == MSI_X) {
3343 if (sp->device_type == XFRAME_II_DEVICE) {
3344 for (i=1; (sp->s2io_entries[i].in_use ==
3345 MSIX_REGISTERED_SUCCESS); i++) {
3346 int vector = sp->entries[i].vector;
3347 void *arg = sp->s2io_entries[i].arg;
3349 free_irq(vector, arg);
3351 pci_disable_msix(sp->pdev);
3354 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3355 msi_control &= 0xFFFE; /* Disable MSI */
3356 pci_write_config_word(sp->pdev, 0x42, msi_control);
3359 else if (sp->intr_type == MSI)
3360 pci_disable_msi(sp->pdev);
3363 if (sp->intr_type == MSI_X) {
3366 if (sp->s2io_entries)
3367 kfree(sp->s2io_entries);
3373 * s2io_close -close entry point of the driver
3374 * @dev : device pointer.
3376 * This is the stop entry point of the driver. It needs to undo exactly
3377 * whatever was done by the open entry point,thus it's usually referred to
3378 * as the close function.Among other things this function mainly stops the
3379 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3381 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3385 int s2io_close(struct net_device *dev)
3387 nic_t *sp = dev->priv;
3391 flush_scheduled_work();
3392 netif_stop_queue(dev);
3393 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3396 if (sp->intr_type == MSI_X) {
3397 if (sp->device_type == XFRAME_II_DEVICE) {
3398 for (i=1; (sp->s2io_entries[i].in_use ==
3399 MSIX_REGISTERED_SUCCESS); i++) {
3400 int vector = sp->entries[i].vector;
3401 void *arg = sp->s2io_entries[i].arg;
3403 free_irq(vector, arg);
3405 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3406 msi_control &= 0xFFFE; /* Disable MSI */
3407 pci_write_config_word(sp->pdev, 0x42, msi_control);
3409 pci_disable_msix(sp->pdev);
3413 free_irq(sp->pdev->irq, dev);
3414 if (sp->intr_type == MSI)
3415 pci_disable_msi(sp->pdev);
3417 sp->device_close_flag = TRUE; /* Device is shut down. */
3422 * s2io_xmit - Tx entry point of te driver
3423 * @skb : the socket buffer containing the Tx data.
3424 * @dev : device pointer.
3426 * This function is the Tx entry point of the driver. S2IO NIC supports
3427 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3428 * NOTE: when device cant queue the pkt,just the trans_start variable will
3431 * 0 on success & 1 on failure.
3434 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3436 nic_t *sp = dev->priv;
3437 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3440 TxFIFO_element_t __iomem *tx_fifo;
3441 unsigned long flags;
3446 int vlan_priority = 0;
3447 mac_info_t *mac_control;
3448 struct config_param *config;
3450 mac_control = &sp->mac_control;
3451 config = &sp->config;
3453 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3454 spin_lock_irqsave(&sp->tx_lock, flags);
3455 if (atomic_read(&sp->card_state) == CARD_DOWN) {
3456 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3458 spin_unlock_irqrestore(&sp->tx_lock, flags);
3465 /* Get Fifo number to Transmit based on vlan priority */
3466 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3467 vlan_tag = vlan_tx_tag_get(skb);
3468 vlan_priority = vlan_tag >> 13;
3469 queue = config->fifo_mapping[vlan_priority];
3472 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3473 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3474 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3477 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3478 /* Avoid "put" pointer going beyond "get" pointer */
3479 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
3480 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3481 netif_stop_queue(dev);
3483 spin_unlock_irqrestore(&sp->tx_lock, flags);
3487 /* A buffer with no data will be dropped */
3489 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3491 spin_unlock_irqrestore(&sp->tx_lock, flags);
3496 mss = skb_shinfo(skb)->tso_size;
3498 txdp->Control_1 |= TXD_TCP_LSO_EN;
3499 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
3503 frg_cnt = skb_shinfo(skb)->nr_frags;
3504 frg_len = skb->len - skb->data_len;
3506 txdp->Buffer_Pointer = pci_map_single
3507 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3508 txdp->Host_Control = (unsigned long) skb;
3509 if (skb->ip_summed == CHECKSUM_HW) {
3511 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3515 txdp->Control_2 |= config->tx_intr_type;
3517 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3518 txdp->Control_2 |= TXD_VLAN_ENABLE;
3519 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3522 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
3523 TXD_GATHER_CODE_FIRST);
3524 txdp->Control_1 |= TXD_LIST_OWN_XENA;
3526 /* For fragmented SKB. */
3527 for (i = 0; i < frg_cnt; i++) {
3528 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3529 /* A '0' length fragment will be ignored */
3533 txdp->Buffer_Pointer = (u64) pci_map_page
3534 (sp->pdev, frag->page, frag->page_offset,
3535 frag->size, PCI_DMA_TODEVICE);
3536 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
3538 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3540 tx_fifo = mac_control->tx_FIFO_start[queue];
3541 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
3542 writeq(val64, &tx_fifo->TxDL_Pointer);
3544 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3549 val64 |= TX_FIFO_SPECIAL_FUNC;
3551 writeq(val64, &tx_fifo->List_Control);
3556 put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3557 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
3559 /* Avoid "put" pointer going beyond "get" pointer */
3560 if (((put_off + 1) % queue_len) == get_off) {
3562 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3564 netif_stop_queue(dev);
3567 dev->trans_start = jiffies;
3568 spin_unlock_irqrestore(&sp->tx_lock, flags);
3574 s2io_alarm_handle(unsigned long data)
3576 nic_t *sp = (nic_t *)data;
3578 alarm_intr_handler(sp);
3579 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3583 s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
3585 struct net_device *dev = (struct net_device *) dev_id;
3586 nic_t *sp = dev->priv;
3589 mac_info_t *mac_control;
3590 struct config_param *config;
3592 atomic_inc(&sp->isr_cnt);
3593 mac_control = &sp->mac_control;
3594 config = &sp->config;
3595 DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
3597 /* If Intr is because of Rx Traffic */
3598 for (i = 0; i < config->rx_ring_num; i++)
3599 rx_intr_handler(&mac_control->rings[i]);
3601 /* If Intr is because of Tx Traffic */
3602 for (i = 0; i < config->tx_fifo_num; i++)
3603 tx_intr_handler(&mac_control->fifos[i]);
3606 * If the Rx buffer count is below the panic threshold then
3607 * reallocate the buffers from the interrupt handler itself,
3608 * else schedule a tasklet to reallocate the buffers.
3610 for (i = 0; i < config->rx_ring_num; i++) {
3611 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3612 int level = rx_buffer_level(sp, rxb_size, i);
3614 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3615 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3616 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3617 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3618 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3620 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3621 clear_bit(0, (&sp->tasklet_status));
3622 atomic_dec(&sp->isr_cnt);
3625 clear_bit(0, (&sp->tasklet_status));
3626 } else if (level == LOW) {
3627 tasklet_schedule(&sp->task);
3631 atomic_dec(&sp->isr_cnt);
3636 s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs)
3638 ring_info_t *ring = (ring_info_t *)dev_id;
3639 nic_t *sp = ring->nic;
3640 int rxb_size, level, rng_n;
3642 atomic_inc(&sp->isr_cnt);
3643 rx_intr_handler(ring);
3645 rng_n = ring->ring_no;
3646 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
3647 level = rx_buffer_level(sp, rxb_size, rng_n);
3649 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3651 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
3652 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3653 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
3654 DBG_PRINT(ERR_DBG, "Out of memory in %s",
3656 clear_bit(0, (&sp->tasklet_status));
3659 clear_bit(0, (&sp->tasklet_status));
3660 } else if (level == LOW) {
3661 tasklet_schedule(&sp->task);
3663 atomic_dec(&sp->isr_cnt);
3669 s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs)
3671 fifo_info_t *fifo = (fifo_info_t *)dev_id;
3672 nic_t *sp = fifo->nic;
3674 atomic_inc(&sp->isr_cnt);
3675 tx_intr_handler(fifo);
3676 atomic_dec(&sp->isr_cnt);
3680 static void s2io_txpic_intr_handle(nic_t *sp)
3682 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3685 val64 = readq(&bar0->pic_int_status);
3686 if (val64 & PIC_INT_GPIO) {
3687 val64 = readq(&bar0->gpio_int_reg);
3688 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
3689 (val64 & GPIO_INT_REG_LINK_UP)) {
3690 val64 |= GPIO_INT_REG_LINK_DOWN;
3691 val64 |= GPIO_INT_REG_LINK_UP;
3692 writeq(val64, &bar0->gpio_int_reg);
3696 if (((sp->last_link_state == LINK_UP) &&
3697 (val64 & GPIO_INT_REG_LINK_DOWN)) ||
3698 ((sp->last_link_state == LINK_DOWN) &&
3699 (val64 & GPIO_INT_REG_LINK_UP))) {
3700 val64 = readq(&bar0->gpio_int_mask);
3701 val64 |= GPIO_INT_MASK_LINK_DOWN;
3702 val64 |= GPIO_INT_MASK_LINK_UP;
3703 writeq(val64, &bar0->gpio_int_mask);
3704 s2io_set_link((unsigned long)sp);
3707 if (sp->last_link_state == LINK_UP) {
3708 /*enable down interrupt */
3709 val64 = readq(&bar0->gpio_int_mask);
3710 /* unmasks link down intr */
3711 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
3712 /* masks link up intr */
3713 val64 |= GPIO_INT_MASK_LINK_UP;
3714 writeq(val64, &bar0->gpio_int_mask);
3716 /*enable UP Interrupt */
3717 val64 = readq(&bar0->gpio_int_mask);
3718 /* unmasks link up interrupt */
3719 val64 &= ~GPIO_INT_MASK_LINK_UP;
3720 /* masks link down interrupt */
3721 val64 |= GPIO_INT_MASK_LINK_DOWN;
3722 writeq(val64, &bar0->gpio_int_mask);
3728 * s2io_isr - ISR handler of the device .
3729 * @irq: the irq of the device.
3730 * @dev_id: a void pointer to the dev structure of the NIC.
3731 * @pt_regs: pointer to the registers pushed on the stack.
3732 * Description: This function is the ISR handler of the device. It
3733 * identifies the reason for the interrupt and calls the relevant
3734 * service routines. As a contongency measure, this ISR allocates the
3735 * recv buffers, if their numbers are below the panic value which is
3736 * presently set to 25% of the original number of rcv buffers allocated.
3738 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
3739 * IRQ_NONE: will be returned if interrupt is not from our device
3741 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
3743 struct net_device *dev = (struct net_device *) dev_id;
3744 nic_t *sp = dev->priv;
3745 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3747 u64 reason = 0, val64;
3748 mac_info_t *mac_control;
3749 struct config_param *config;
3751 atomic_inc(&sp->isr_cnt);
3752 mac_control = &sp->mac_control;
3753 config = &sp->config;
3756 * Identify the cause for interrupt and call the appropriate
3757 * interrupt handler. Causes for the interrupt could be;
3761 * 4. Error in any functional blocks of the NIC.
3763 reason = readq(&bar0->general_int_status);
3766 /* The interrupt was not raised by Xena. */
3767 atomic_dec(&sp->isr_cnt);
3771 #ifdef CONFIG_S2IO_NAPI
3772 if (reason & GEN_INTR_RXTRAFFIC) {
3773 if (netif_rx_schedule_prep(dev)) {
3774 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
3776 __netif_rx_schedule(dev);
3780 /* If Intr is because of Rx Traffic */
3781 if (reason & GEN_INTR_RXTRAFFIC) {
3783 * rx_traffic_int reg is an R1 register, writing all 1's
3784 * will ensure that the actual interrupt causing bit get's
3785 * cleared and hence a read can be avoided.
3787 val64 = 0xFFFFFFFFFFFFFFFFULL;
3788 writeq(val64, &bar0->rx_traffic_int);
3789 for (i = 0; i < config->rx_ring_num; i++) {
3790 rx_intr_handler(&mac_control->rings[i]);
3795 /* If Intr is because of Tx Traffic */
3796 if (reason & GEN_INTR_TXTRAFFIC) {
3798 * tx_traffic_int reg is an R1 register, writing all 1's
3799 * will ensure that the actual interrupt causing bit get's
3800 * cleared and hence a read can be avoided.
3802 val64 = 0xFFFFFFFFFFFFFFFFULL;
3803 writeq(val64, &bar0->tx_traffic_int);
3805 for (i = 0; i < config->tx_fifo_num; i++)
3806 tx_intr_handler(&mac_control->fifos[i]);
3809 if (reason & GEN_INTR_TXPIC)
3810 s2io_txpic_intr_handle(sp);
3812 * If the Rx buffer count is below the panic threshold then
3813 * reallocate the buffers from the interrupt handler itself,
3814 * else schedule a tasklet to reallocate the buffers.
3816 #ifndef CONFIG_S2IO_NAPI
3817 for (i = 0; i < config->rx_ring_num; i++) {
3819 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3820 int level = rx_buffer_level(sp, rxb_size, i);
3822 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3823 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3824 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3825 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3826 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3828 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3829 clear_bit(0, (&sp->tasklet_status));
3830 atomic_dec(&sp->isr_cnt);
3833 clear_bit(0, (&sp->tasklet_status));
3834 } else if (level == LOW) {
3835 tasklet_schedule(&sp->task);
3840 atomic_dec(&sp->isr_cnt);
3847 static void s2io_updt_stats(nic_t *sp)
3849 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3853 if (atomic_read(&sp->card_state) == CARD_UP) {
3854 /* Apprx 30us on a 133 MHz bus */
3855 val64 = SET_UPDT_CLICKS(10) |
3856 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3857 writeq(val64, &bar0->stat_cfg);
3860 val64 = readq(&bar0->stat_cfg);
3861 if (!(val64 & BIT(0)))
3865 break; /* Updt failed */
3871 * s2io_get_stats - Updates the device statistics structure.
3872 * @dev : pointer to the device structure.
3874 * This function updates the device statistics structure in the s2io_nic
3875 * structure and returns a pointer to the same.
3877 * pointer to the updated net_device_stats structure.
3880 struct net_device_stats *s2io_get_stats(struct net_device *dev)
3882 nic_t *sp = dev->priv;
3883 mac_info_t *mac_control;
3884 struct config_param *config;
3887 mac_control = &sp->mac_control;
3888 config = &sp->config;
3890 /* Configure Stats for immediate updt */
3891 s2io_updt_stats(sp);
3893 sp->stats.tx_packets =
3894 le32_to_cpu(mac_control->stats_info->tmac_frms);
3895 sp->stats.tx_errors =
3896 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3897 sp->stats.rx_errors =
3898 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3899 sp->stats.multicast =
3900 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
3901 sp->stats.rx_length_errors =
3902 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
3904 return (&sp->stats);
3908 * s2io_set_multicast - entry point for multicast address enable/disable.
3909 * @dev : pointer to the device structure
3911 * This function is a driver entry point which gets called by the kernel
3912 * whenever multicast addresses must be enabled/disabled. This also gets
3913 * called to set/reset promiscuous mode. Depending on the deivce flag, we
3914 * determine, if multicast address must be enabled or if promiscuous mode
3915 * is to be disabled etc.
3920 static void s2io_set_multicast(struct net_device *dev)
3923 struct dev_mc_list *mclist;
3924 nic_t *sp = dev->priv;
3925 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3926 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3928 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3931 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3932 /* Enable all Multicast addresses */
3933 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3934 &bar0->rmac_addr_data0_mem);
3935 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3936 &bar0->rmac_addr_data1_mem);
3937 val64 = RMAC_ADDR_CMD_MEM_WE |
3938 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3939 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3940 writeq(val64, &bar0->rmac_addr_cmd_mem);
3941 /* Wait till command completes */
3942 wait_for_cmd_complete(sp);
3945 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3946 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3947 /* Disable all Multicast addresses */
3948 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3949 &bar0->rmac_addr_data0_mem);
3950 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3951 &bar0->rmac_addr_data1_mem);
3952 val64 = RMAC_ADDR_CMD_MEM_WE |
3953 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3954 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3955 writeq(val64, &bar0->rmac_addr_cmd_mem);
3956 /* Wait till command completes */
3957 wait_for_cmd_complete(sp);
3960 sp->all_multi_pos = 0;
3963 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3964 /* Put the NIC into promiscuous mode */
3965 add = &bar0->mac_cfg;
3966 val64 = readq(&bar0->mac_cfg);
3967 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3969 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3970 writel((u32) val64, add);
3971 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3972 writel((u32) (val64 >> 32), (add + 4));
3974 val64 = readq(&bar0->mac_cfg);
3975 sp->promisc_flg = 1;
3976 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
3978 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3979 /* Remove the NIC from promiscuous mode */
3980 add = &bar0->mac_cfg;
3981 val64 = readq(&bar0->mac_cfg);
3982 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3984 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3985 writel((u32) val64, add);
3986 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3987 writel((u32) (val64 >> 32), (add + 4));
3989 val64 = readq(&bar0->mac_cfg);
3990 sp->promisc_flg = 0;
3991 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
3995 /* Update individual M_CAST address list */
3996 if ((!sp->m_cast_flg) && dev->mc_count) {
3998 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3999 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4001 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4002 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4006 prev_cnt = sp->mc_addr_count;
4007 sp->mc_addr_count = dev->mc_count;
4009 /* Clear out the previous list of Mc in the H/W. */
4010 for (i = 0; i < prev_cnt; i++) {
4011 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4012 &bar0->rmac_addr_data0_mem);
4013 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4014 &bar0->rmac_addr_data1_mem);
4015 val64 = RMAC_ADDR_CMD_MEM_WE |
4016 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4017 RMAC_ADDR_CMD_MEM_OFFSET
4018 (MAC_MC_ADDR_START_OFFSET + i);
4019 writeq(val64, &bar0->rmac_addr_cmd_mem);
4021 /* Wait for command completes */
4022 if (wait_for_cmd_complete(sp)) {
4023 DBG_PRINT(ERR_DBG, "%s: Adding ",
4025 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4030 /* Create the new Rx filter list and update the same in H/W. */
4031 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4032 i++, mclist = mclist->next) {
4033 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4035 for (j = 0; j < ETH_ALEN; j++) {
4036 mac_addr |= mclist->dmi_addr[j];
4040 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4041 &bar0->rmac_addr_data0_mem);
4042 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4043 &bar0->rmac_addr_data1_mem);
4044 val64 = RMAC_ADDR_CMD_MEM_WE |
4045 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4046 RMAC_ADDR_CMD_MEM_OFFSET
4047 (i + MAC_MC_ADDR_START_OFFSET);
4048 writeq(val64, &bar0->rmac_addr_cmd_mem);
4050 /* Wait for command completes */
4051 if (wait_for_cmd_complete(sp)) {
4052 DBG_PRINT(ERR_DBG, "%s: Adding ",
4054 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4062 * s2io_set_mac_addr - Programs the Xframe mac address
4063 * @dev : pointer to the device structure.
4064 * @addr: a uchar pointer to the new mac address which is to be set.
4065 * Description : This procedure will program the Xframe to receive
4066 * frames with new Mac Address
4067 * Return value: SUCCESS on success and an appropriate (-)ve integer
4068 * as defined in errno.h file on failure.
4071 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4073 nic_t *sp = dev->priv;
4074 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4075 register u64 val64, mac_addr = 0;
4079 * Set the new MAC address as the new unicast filter and reflect this
4080 * change on the device address registered with the OS. It will be
4083 for (i = 0; i < ETH_ALEN; i++) {
4085 mac_addr |= addr[i];
4088 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4089 &bar0->rmac_addr_data0_mem);
4092 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4093 RMAC_ADDR_CMD_MEM_OFFSET(0);
4094 writeq(val64, &bar0->rmac_addr_cmd_mem);
4095 /* Wait till command completes */
4096 if (wait_for_cmd_complete(sp)) {
4097 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4105 * s2io_ethtool_sset - Sets different link parameters.
4106 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4107 * @info: pointer to the structure with parameters given by ethtool to set
4110 * The function sets different link parameters provided by the user onto
4116 static int s2io_ethtool_sset(struct net_device *dev,
4117 struct ethtool_cmd *info)
4119 nic_t *sp = dev->priv;
4120 if ((info->autoneg == AUTONEG_ENABLE) ||
4121 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4124 s2io_close(sp->dev);
4132 * s2io_ethtol_gset - Return link specific information.
4133 * @sp : private member of the device structure, pointer to the
4134 * s2io_nic structure.
4135 * @info : pointer to the structure with parameters given by ethtool
4136 * to return link information.
4138 * Returns link specific information like speed, duplex etc.. to ethtool.
4140 * return 0 on success.
4143 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4145 nic_t *sp = dev->priv;
4146 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4147 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4148 info->port = PORT_FIBRE;
4149 /* info->transceiver?? TODO */
4151 if (netif_carrier_ok(sp->dev)) {
4152 info->speed = 10000;
4153 info->duplex = DUPLEX_FULL;
4159 info->autoneg = AUTONEG_DISABLE;
4164 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4165 * @sp : private member of the device structure, which is a pointer to the
4166 * s2io_nic structure.
4167 * @info : pointer to the structure with parameters given by ethtool to
4168 * return driver information.
4170 * Returns driver specefic information like name, version etc.. to ethtool.
4175 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4176 struct ethtool_drvinfo *info)
4178 nic_t *sp = dev->priv;
4180 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4181 strncpy(info->version, s2io_driver_version, sizeof(info->version));
4182 strncpy(info->fw_version, "", sizeof(info->fw_version));
4183 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
4184 info->regdump_len = XENA_REG_SPACE;
4185 info->eedump_len = XENA_EEPROM_SPACE;
4186 info->testinfo_len = S2IO_TEST_LEN;
4187 info->n_stats = S2IO_STAT_LEN;
4191 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4192 * @sp: private member of the device structure, which is a pointer to the
4193 * s2io_nic structure.
4194 * @regs : pointer to the structure with parameters given by ethtool for
4195 * dumping the registers.
4196 * @reg_space: The input argumnet into which all the registers are dumped.
4198 * Dumps the entire register space of xFrame NIC into the user given
4204 static void s2io_ethtool_gregs(struct net_device *dev,
4205 struct ethtool_regs *regs, void *space)
4209 u8 *reg_space = (u8 *) space;
4210 nic_t *sp = dev->priv;
4212 regs->len = XENA_REG_SPACE;
4213 regs->version = sp->pdev->subsystem_device;
4215 for (i = 0; i < regs->len; i += 8) {
4216 reg = readq(sp->bar0 + i);
4217 memcpy((reg_space + i), ®, 8);
4222 * s2io_phy_id - timer function that alternates adapter LED.
4223 * @data : address of the private member of the device structure, which
4224 * is a pointer to the s2io_nic structure, provided as an u32.
4225 * Description: This is actually the timer function that alternates the
4226 * adapter LED bit of the adapter control bit to set/reset every time on
4227 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4228 * once every second.
4230 static void s2io_phy_id(unsigned long data)
4232 nic_t *sp = (nic_t *) data;
4233 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4237 subid = sp->pdev->subsystem_device;
4238 if ((sp->device_type == XFRAME_II_DEVICE) ||
4239 ((subid & 0xFF) >= 0x07)) {
4240 val64 = readq(&bar0->gpio_control);
4241 val64 ^= GPIO_CTRL_GPIO_0;
4242 writeq(val64, &bar0->gpio_control);
4244 val64 = readq(&bar0->adapter_control);
4245 val64 ^= ADAPTER_LED_ON;
4246 writeq(val64, &bar0->adapter_control);
4249 mod_timer(&sp->id_timer, jiffies + HZ / 2);
4253 * s2io_ethtool_idnic - To physically identify the nic on the system.
4254 * @sp : private member of the device structure, which is a pointer to the
4255 * s2io_nic structure.
4256 * @id : pointer to the structure with identification parameters given by
4258 * Description: Used to physically identify the NIC on the system.
4259 * The Link LED will blink for a time specified by the user for
4261 * NOTE: The Link has to be Up to be able to blink the LED. Hence
4262 * identification is possible only if it's link is up.
4264 * int , returns 0 on success
4267 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4269 u64 val64 = 0, last_gpio_ctrl_val;
4270 nic_t *sp = dev->priv;
4271 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4274 subid = sp->pdev->subsystem_device;
4275 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4276 if ((sp->device_type == XFRAME_I_DEVICE) &&
4277 ((subid & 0xFF) < 0x07)) {
4278 val64 = readq(&bar0->adapter_control);
4279 if (!(val64 & ADAPTER_CNTL_EN)) {
4281 "Adapter Link down, cannot blink LED\n");
4285 if (sp->id_timer.function == NULL) {
4286 init_timer(&sp->id_timer);
4287 sp->id_timer.function = s2io_phy_id;
4288 sp->id_timer.data = (unsigned long) sp;
4290 mod_timer(&sp->id_timer, jiffies);
4292 msleep_interruptible(data * HZ);
4294 msleep_interruptible(MAX_FLICKER_TIME);
4295 del_timer_sync(&sp->id_timer);
4297 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
4298 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4299 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4306 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
4307 * @sp : private member of the device structure, which is a pointer to the
4308 * s2io_nic structure.
4309 * @ep : pointer to the structure with pause parameters given by ethtool.
4311 * Returns the Pause frame generation and reception capability of the NIC.
4315 static void s2io_ethtool_getpause_data(struct net_device *dev,
4316 struct ethtool_pauseparam *ep)
4319 nic_t *sp = dev->priv;
4320 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4322 val64 = readq(&bar0->rmac_pause_cfg);
4323 if (val64 & RMAC_PAUSE_GEN_ENABLE)
4324 ep->tx_pause = TRUE;
4325 if (val64 & RMAC_PAUSE_RX_ENABLE)
4326 ep->rx_pause = TRUE;
4327 ep->autoneg = FALSE;
4331 * s2io_ethtool_setpause_data - set/reset pause frame generation.
4332 * @sp : private member of the device structure, which is a pointer to the
4333 * s2io_nic structure.
4334 * @ep : pointer to the structure with pause parameters given by ethtool.
4336 * It can be used to set or reset Pause frame generation or reception
4337 * support of the NIC.
4339 * int, returns 0 on Success
4342 static int s2io_ethtool_setpause_data(struct net_device *dev,
4343 struct ethtool_pauseparam *ep)
4346 nic_t *sp = dev->priv;
4347 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4349 val64 = readq(&bar0->rmac_pause_cfg);
4351 val64 |= RMAC_PAUSE_GEN_ENABLE;
4353 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4355 val64 |= RMAC_PAUSE_RX_ENABLE;
4357 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4358 writeq(val64, &bar0->rmac_pause_cfg);
4363 * read_eeprom - reads 4 bytes of data from user given offset.
4364 * @sp : private member of the device structure, which is a pointer to the
4365 * s2io_nic structure.
4366 * @off : offset at which the data must be written
4367 * @data : Its an output parameter where the data read at the given
4370 * Will read 4 bytes of data from the user given offset and return the
4372 * NOTE: Will allow to read only part of the EEPROM visible through the
4375 * -1 on failure and 0 on success.
4378 #define S2IO_DEV_ID 5
4379 static int read_eeprom(nic_t * sp, int off, u32 * data)
4384 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4386 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4387 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
4388 I2C_CONTROL_CNTL_START;
4389 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4391 while (exit_cnt < 5) {
4392 val64 = readq(&bar0->i2c_control);
4393 if (I2C_CONTROL_CNTL_END(val64)) {
4394 *data = I2C_CONTROL_GET_DATA(val64);
4406 * write_eeprom - actually writes the relevant part of the data value.
4407 * @sp : private member of the device structure, which is a pointer to the
4408 * s2io_nic structure.
4409 * @off : offset at which the data must be written
4410 * @data : The data that is to be written
4411 * @cnt : Number of bytes of the data that are actually to be written into
4412 * the Eeprom. (max of 3)
4414 * Actually writes the relevant part of the data value into the Eeprom
4415 * through the I2C bus.
4417 * 0 on success, -1 on failure.
4420 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
4422 int exit_cnt = 0, ret = -1;
4424 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4426 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4427 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
4428 I2C_CONTROL_CNTL_START;
4429 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4431 while (exit_cnt < 5) {
4432 val64 = readq(&bar0->i2c_control);
4433 if (I2C_CONTROL_CNTL_END(val64)) {
4434 if (!(val64 & I2C_CONTROL_NACK))
4446 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
4447 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4448 * @eeprom : pointer to the user level structure provided by ethtool,
4449 * containing all relevant information.
4450 * @data_buf : user defined value to be written into Eeprom.
4451 * Description: Reads the values stored in the Eeprom at given offset
4452 * for a given length. Stores these values int the input argument data
4453 * buffer 'data_buf' and returns these to the caller (ethtool.)
4458 static int s2io_ethtool_geeprom(struct net_device *dev,
4459 struct ethtool_eeprom *eeprom, u8 * data_buf)
4462 nic_t *sp = dev->priv;
4464 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
4466 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
4467 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
4469 for (i = 0; i < eeprom->len; i += 4) {
4470 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
4471 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
4475 memcpy((data_buf + i), &valid, 4);
4481 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
4482 * @sp : private member of the device structure, which is a pointer to the
4483 * s2io_nic structure.
4484 * @eeprom : pointer to the user level structure provided by ethtool,
4485 * containing all relevant information.
4486 * @data_buf ; user defined value to be written into Eeprom.
4488 * Tries to write the user provided value in the Eeprom, at the offset
4489 * given by the user.
4491 * 0 on success, -EFAULT on failure.
4494 static int s2io_ethtool_seeprom(struct net_device *dev,
4495 struct ethtool_eeprom *eeprom,
4498 int len = eeprom->len, cnt = 0;
4499 u32 valid = 0, data;
4500 nic_t *sp = dev->priv;
4502 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
4504 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
4505 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
4511 data = (u32) data_buf[cnt] & 0x000000FF;
4513 valid = (u32) (data << 24);
4517 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
4519 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
4521 "write into the specified offset\n");
4532 * s2io_register_test - reads and writes into all clock domains.
4533 * @sp : private member of the device structure, which is a pointer to the
4534 * s2io_nic structure.
4535 * @data : variable that returns the result of each of the test conducted b
4538 * Read and write into all clock domains. The NIC has 3 clock domains,
4539 * see that registers in all the three regions are accessible.
4544 static int s2io_register_test(nic_t * sp, uint64_t * data)
4546 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4550 val64 = readq(&bar0->pif_rd_swapper_fb);
4551 if (val64 != 0x123456789abcdefULL) {
4553 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
4556 val64 = readq(&bar0->rmac_pause_cfg);
4557 if (val64 != 0xc000ffff00000000ULL) {
4559 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
4562 val64 = readq(&bar0->rx_queue_cfg);
4563 if (val64 != 0x0808080808080808ULL) {
4565 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
4568 val64 = readq(&bar0->xgxs_efifo_cfg);
4569 if (val64 != 0x000000001923141EULL) {
4571 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
4574 val64 = 0x5A5A5A5A5A5A5A5AULL;
4575 writeq(val64, &bar0->xmsi_data);
4576 val64 = readq(&bar0->xmsi_data);
4577 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
4579 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
4582 val64 = 0xA5A5A5A5A5A5A5A5ULL;
4583 writeq(val64, &bar0->xmsi_data);
4584 val64 = readq(&bar0->xmsi_data);
4585 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
4587 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
4595 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
4596 * @sp : private member of the device structure, which is a pointer to the
4597 * s2io_nic structure.
4598 * @data:variable that returns the result of each of the test conducted by
4601 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
4607 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
4612 /* Test Write Error at offset 0 */
4613 if (!write_eeprom(sp, 0, 0, 3))
4616 /* Test Write at offset 4f0 */
4617 if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
4619 if (read_eeprom(sp, 0x4F0, &ret_data))
4622 if (ret_data != 0x01234567)
4625 /* Reset the EEPROM data go FFFF */
4626 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
4628 /* Test Write Request Error at offset 0x7c */
4629 if (!write_eeprom(sp, 0x07C, 0, 3))
4632 /* Test Write Request at offset 0x7fc */
4633 if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
4635 if (read_eeprom(sp, 0x7FC, &ret_data))
4638 if (ret_data != 0x01234567)
4641 /* Reset the EEPROM data go FFFF */
4642 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
4644 /* Test Write Error at offset 0x80 */
4645 if (!write_eeprom(sp, 0x080, 0, 3))
4648 /* Test Write Error at offset 0xfc */
4649 if (!write_eeprom(sp, 0x0FC, 0, 3))
4652 /* Test Write Error at offset 0x100 */
4653 if (!write_eeprom(sp, 0x100, 0, 3))
4656 /* Test Write Error at offset 4ec */
4657 if (!write_eeprom(sp, 0x4EC, 0, 3))
4665 * s2io_bist_test - invokes the MemBist test of the card .
4666 * @sp : private member of the device structure, which is a pointer to the
4667 * s2io_nic structure.
4668 * @data:variable that returns the result of each of the test conducted by
4671 * This invokes the MemBist test of the card. We give around
4672 * 2 secs time for the Test to complete. If it's still not complete
4673 * within this peiod, we consider that the test failed.
4675 * 0 on success and -1 on failure.
4678 static int s2io_bist_test(nic_t * sp, uint64_t * data)
4681 int cnt = 0, ret = -1;
4683 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4684 bist |= PCI_BIST_START;
4685 pci_write_config_word(sp->pdev, PCI_BIST, bist);
4688 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4689 if (!(bist & PCI_BIST_START)) {
4690 *data = (bist & PCI_BIST_CODE_MASK);
4702 * s2io-link_test - verifies the link state of the nic
4703 * @sp ; private member of the device structure, which is a pointer to the
4704 * s2io_nic structure.
4705 * @data: variable that returns the result of each of the test conducted by
4708 * The function verifies the link state of the NIC and updates the input
4709 * argument 'data' appropriately.
4714 static int s2io_link_test(nic_t * sp, uint64_t * data)
4716 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4719 val64 = readq(&bar0->adapter_status);
4720 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
4727 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
4728 * @sp - private member of the device structure, which is a pointer to the
4729 * s2io_nic structure.
4730 * @data - variable that returns the result of each of the test
4731 * conducted by the driver.
4733 * This is one of the offline test that tests the read and write
4734 * access to the RldRam chip on the NIC.
4739 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
4741 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4743 int cnt, iteration = 0, test_pass = 0;
4745 val64 = readq(&bar0->adapter_control);
4746 val64 &= ~ADAPTER_ECC_EN;
4747 writeq(val64, &bar0->adapter_control);
4749 val64 = readq(&bar0->mc_rldram_test_ctrl);
4750 val64 |= MC_RLDRAM_TEST_MODE;
4751 writeq(val64, &bar0->mc_rldram_test_ctrl);
4753 val64 = readq(&bar0->mc_rldram_mrs);
4754 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
4755 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4757 val64 |= MC_RLDRAM_MRS_ENABLE;
4758 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4760 while (iteration < 2) {
4761 val64 = 0x55555555aaaa0000ULL;
4762 if (iteration == 1) {
4763 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4765 writeq(val64, &bar0->mc_rldram_test_d0);
4767 val64 = 0xaaaa5a5555550000ULL;
4768 if (iteration == 1) {
4769 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4771 writeq(val64, &bar0->mc_rldram_test_d1);
4773 val64 = 0x55aaaaaaaa5a0000ULL;
4774 if (iteration == 1) {
4775 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4777 writeq(val64, &bar0->mc_rldram_test_d2);
4779 val64 = (u64) (0x0000003fffff0000ULL);
4780 writeq(val64, &bar0->mc_rldram_test_add);
4783 val64 = MC_RLDRAM_TEST_MODE;
4784 writeq(val64, &bar0->mc_rldram_test_ctrl);
4787 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
4789 writeq(val64, &bar0->mc_rldram_test_ctrl);
4791 for (cnt = 0; cnt < 5; cnt++) {
4792 val64 = readq(&bar0->mc_rldram_test_ctrl);
4793 if (val64 & MC_RLDRAM_TEST_DONE)
4801 val64 = MC_RLDRAM_TEST_MODE;
4802 writeq(val64, &bar0->mc_rldram_test_ctrl);
4804 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
4805 writeq(val64, &bar0->mc_rldram_test_ctrl);
4807 for (cnt = 0; cnt < 5; cnt++) {
4808 val64 = readq(&bar0->mc_rldram_test_ctrl);
4809 if (val64 & MC_RLDRAM_TEST_DONE)
4817 val64 = readq(&bar0->mc_rldram_test_ctrl);
4818 if (val64 & MC_RLDRAM_TEST_PASS)
4833 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4834 * @sp : private member of the device structure, which is a pointer to the
4835 * s2io_nic structure.
4836 * @ethtest : pointer to a ethtool command specific structure that will be
4837 * returned to the user.
4838 * @data : variable that returns the result of each of the test
4839 * conducted by the driver.
4841 * This function conducts 6 tests ( 4 offline and 2 online) to determine
4842 * the health of the card.
4847 static void s2io_ethtool_test(struct net_device *dev,
4848 struct ethtool_test *ethtest,
4851 nic_t *sp = dev->priv;
4852 int orig_state = netif_running(sp->dev);
4854 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4855 /* Offline Tests. */
4857 s2io_close(sp->dev);
4859 if (s2io_register_test(sp, &data[0]))
4860 ethtest->flags |= ETH_TEST_FL_FAILED;
4864 if (s2io_rldram_test(sp, &data[3]))
4865 ethtest->flags |= ETH_TEST_FL_FAILED;
4869 if (s2io_eeprom_test(sp, &data[1]))
4870 ethtest->flags |= ETH_TEST_FL_FAILED;
4872 if (s2io_bist_test(sp, &data[4]))
4873 ethtest->flags |= ETH_TEST_FL_FAILED;
4883 "%s: is not up, cannot run test\n",
4892 if (s2io_link_test(sp, &data[2]))
4893 ethtest->flags |= ETH_TEST_FL_FAILED;
4902 static void s2io_get_ethtool_stats(struct net_device *dev,
4903 struct ethtool_stats *estats,
4907 nic_t *sp = dev->priv;
4908 StatInfo_t *stat_info = sp->mac_control.stats_info;
4910 s2io_updt_stats(sp);
4912 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
4913 le32_to_cpu(stat_info->tmac_frms);
4915 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
4916 le32_to_cpu(stat_info->tmac_data_octets);
4917 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
4919 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
4920 le32_to_cpu(stat_info->tmac_mcst_frms);
4922 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
4923 le32_to_cpu(stat_info->tmac_bcst_frms);
4924 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
4926 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
4927 le32_to_cpu(stat_info->tmac_any_err_frms);
4928 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
4930 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
4931 le32_to_cpu(stat_info->tmac_vld_ip);
4933 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
4934 le32_to_cpu(stat_info->tmac_drop_ip);
4936 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
4937 le32_to_cpu(stat_info->tmac_icmp);
4939 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
4940 le32_to_cpu(stat_info->tmac_rst_tcp);
4941 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
4942 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
4943 le32_to_cpu(stat_info->tmac_udp);
4945 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
4946 le32_to_cpu(stat_info->rmac_vld_frms);
4948 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
4949 le32_to_cpu(stat_info->rmac_data_octets);
4950 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4951 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
4953 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
4954 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4956 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
4957 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
4958 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4959 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4960 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
4962 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
4963 le32_to_cpu(stat_info->rmac_discarded_frms);
4965 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
4966 le32_to_cpu(stat_info->rmac_usized_frms);
4968 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
4969 le32_to_cpu(stat_info->rmac_osized_frms);
4971 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
4972 le32_to_cpu(stat_info->rmac_frag_frms);
4974 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
4975 le32_to_cpu(stat_info->rmac_jabber_frms);
4976 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
4977 le32_to_cpu(stat_info->rmac_ip);
4978 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4979 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
4980 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
4981 le32_to_cpu(stat_info->rmac_drop_ip);
4982 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
4983 le32_to_cpu(stat_info->rmac_icmp);
4984 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
4985 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
4986 le32_to_cpu(stat_info->rmac_udp);
4988 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
4989 le32_to_cpu(stat_info->rmac_err_drp_udp);
4991 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
4992 le32_to_cpu(stat_info->rmac_pause_cnt);
4994 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
4995 le32_to_cpu(stat_info->rmac_accepted_ip);
4996 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
4998 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4999 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5002 int s2io_ethtool_get_regs_len(struct net_device *dev)
5004 return (XENA_REG_SPACE);
5008 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5010 nic_t *sp = dev->priv;
5012 return (sp->rx_csum);
5014 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5016 nic_t *sp = dev->priv;
5025 int s2io_get_eeprom_len(struct net_device *dev)
5027 return (XENA_EEPROM_SPACE);
5030 int s2io_ethtool_self_test_count(struct net_device *dev)
5032 return (S2IO_TEST_LEN);
5034 void s2io_ethtool_get_strings(struct net_device *dev,
5035 u32 stringset, u8 * data)
5037 switch (stringset) {
5039 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5042 memcpy(data, ðtool_stats_keys,
5043 sizeof(ethtool_stats_keys));
5046 static int s2io_ethtool_get_stats_count(struct net_device *dev)
5048 return (S2IO_STAT_LEN);
5051 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
5054 dev->features |= NETIF_F_IP_CSUM;
5056 dev->features &= ~NETIF_F_IP_CSUM;
5062 static struct ethtool_ops netdev_ethtool_ops = {
5063 .get_settings = s2io_ethtool_gset,
5064 .set_settings = s2io_ethtool_sset,
5065 .get_drvinfo = s2io_ethtool_gdrvinfo,
5066 .get_regs_len = s2io_ethtool_get_regs_len,
5067 .get_regs = s2io_ethtool_gregs,
5068 .get_link = ethtool_op_get_link,
5069 .get_eeprom_len = s2io_get_eeprom_len,
5070 .get_eeprom = s2io_ethtool_geeprom,
5071 .set_eeprom = s2io_ethtool_seeprom,
5072 .get_pauseparam = s2io_ethtool_getpause_data,
5073 .set_pauseparam = s2io_ethtool_setpause_data,
5074 .get_rx_csum = s2io_ethtool_get_rx_csum,
5075 .set_rx_csum = s2io_ethtool_set_rx_csum,
5076 .get_tx_csum = ethtool_op_get_tx_csum,
5077 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5078 .get_sg = ethtool_op_get_sg,
5079 .set_sg = ethtool_op_set_sg,
5081 .get_tso = ethtool_op_get_tso,
5082 .set_tso = ethtool_op_set_tso,
5084 .self_test_count = s2io_ethtool_self_test_count,
5085 .self_test = s2io_ethtool_test,
5086 .get_strings = s2io_ethtool_get_strings,
5087 .phys_id = s2io_ethtool_idnic,
5088 .get_stats_count = s2io_ethtool_get_stats_count,
5089 .get_ethtool_stats = s2io_get_ethtool_stats
5093 * s2io_ioctl - Entry point for the Ioctl
5094 * @dev : Device pointer.
5095 * @ifr : An IOCTL specefic structure, that can contain a pointer to
5096 * a proprietary structure used to pass information to the driver.
5097 * @cmd : This is used to distinguish between the different commands that
5098 * can be passed to the IOCTL functions.
5100 * Currently there are no special functionality supported in IOCTL, hence
5101 * function always return EOPNOTSUPPORTED
5104 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5110 * s2io_change_mtu - entry point to change MTU size for the device.
5111 * @dev : device pointer.
5112 * @new_mtu : the new MTU size for the device.
5113 * Description: A driver entry point to change MTU size for the device.
5114 * Before changing the MTU the device must be stopped.
5116 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5120 int s2io_change_mtu(struct net_device *dev, int new_mtu)
5122 nic_t *sp = dev->priv;
5124 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5125 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
5131 if (netif_running(dev)) {
5133 netif_stop_queue(dev);
5134 if (s2io_card_up(sp)) {
5135 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5138 if (netif_queue_stopped(dev))
5139 netif_wake_queue(dev);
5140 } else { /* Device is down */
5141 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5142 u64 val64 = new_mtu;
5144 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
5151 * s2io_tasklet - Bottom half of the ISR.
5152 * @dev_adr : address of the device structure in dma_addr_t format.
5154 * This is the tasklet or the bottom half of the ISR. This is
5155 * an extension of the ISR which is scheduled by the scheduler to be run
5156 * when the load on the CPU is low. All low priority tasks of the ISR can
5157 * be pushed into the tasklet. For now the tasklet is used only to
5158 * replenish the Rx buffers in the Rx buffer descriptors.
5163 static void s2io_tasklet(unsigned long dev_addr)
5165 struct net_device *dev = (struct net_device *) dev_addr;
5166 nic_t *sp = dev->priv;
5168 mac_info_t *mac_control;
5169 struct config_param *config;
5171 mac_control = &sp->mac_control;
5172 config = &sp->config;
5174 if (!TASKLET_IN_USE) {
5175 for (i = 0; i < config->rx_ring_num; i++) {
5176 ret = fill_rx_buffers(sp, i);
5177 if (ret == -ENOMEM) {
5178 DBG_PRINT(ERR_DBG, "%s: Out of ",
5180 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
5182 } else if (ret == -EFILL) {
5184 "%s: Rx Ring %d is full\n",
5189 clear_bit(0, (&sp->tasklet_status));
5194 * s2io_set_link - Set the LInk status
5195 * @data: long pointer to device private structue
5196 * Description: Sets the link status for the adapter
5199 static void s2io_set_link(unsigned long data)
5201 nic_t *nic = (nic_t *) data;
5202 struct net_device *dev = nic->dev;
5203 XENA_dev_config_t __iomem *bar0 = nic->bar0;
5207 if (test_and_set_bit(0, &(nic->link_state))) {
5208 /* The card is being reset, no point doing anything */
5212 subid = nic->pdev->subsystem_device;
5213 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
5215 * Allow a small delay for the NICs self initiated
5216 * cleanup to complete.
5221 val64 = readq(&bar0->adapter_status);
5222 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
5223 if (LINK_IS_UP(val64)) {
5224 val64 = readq(&bar0->adapter_control);
5225 val64 |= ADAPTER_CNTL_EN;
5226 writeq(val64, &bar0->adapter_control);
5227 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5229 val64 = readq(&bar0->gpio_control);
5230 val64 |= GPIO_CTRL_GPIO_0;
5231 writeq(val64, &bar0->gpio_control);
5232 val64 = readq(&bar0->gpio_control);
5234 val64 |= ADAPTER_LED_ON;
5235 writeq(val64, &bar0->adapter_control);
5237 if (s2io_link_fault_indication(nic) ==
5238 MAC_RMAC_ERR_TIMER) {
5239 val64 = readq(&bar0->adapter_status);
5240 if (!LINK_IS_UP(val64)) {
5241 DBG_PRINT(ERR_DBG, "%s:", dev->name);
5242 DBG_PRINT(ERR_DBG, " Link down");
5243 DBG_PRINT(ERR_DBG, "after ");
5244 DBG_PRINT(ERR_DBG, "enabling ");
5245 DBG_PRINT(ERR_DBG, "device \n");
5248 if (nic->device_enabled_once == FALSE) {
5249 nic->device_enabled_once = TRUE;
5251 s2io_link(nic, LINK_UP);
5253 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5255 val64 = readq(&bar0->gpio_control);
5256 val64 &= ~GPIO_CTRL_GPIO_0;
5257 writeq(val64, &bar0->gpio_control);
5258 val64 = readq(&bar0->gpio_control);
5260 s2io_link(nic, LINK_DOWN);
5262 } else { /* NIC is not Quiescent. */
5263 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
5264 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
5265 netif_stop_queue(dev);
5267 clear_bit(0, &(nic->link_state));
5270 static void s2io_card_down(nic_t * sp)
5273 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5274 unsigned long flags;
5275 register u64 val64 = 0;
5277 del_timer_sync(&sp->alarm_timer);
5278 /* If s2io_set_link task is executing, wait till it completes. */
5279 while (test_and_set_bit(0, &(sp->link_state))) {
5282 atomic_set(&sp->card_state, CARD_DOWN);
5284 /* disable Tx and Rx traffic on the NIC */
5288 tasklet_kill(&sp->task);
5290 /* Check if the device is Quiescent and then Reset the NIC */
5292 val64 = readq(&bar0->adapter_status);
5293 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
5301 "s2io_close:Device not Quiescent ");
5302 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
5303 (unsigned long long) val64);
5309 /* Waiting till all Interrupt handlers are complete */
5313 if (!atomic_read(&sp->isr_cnt))
5318 spin_lock_irqsave(&sp->tx_lock, flags);
5319 /* Free all Tx buffers */
5320 free_tx_buffers(sp);
5321 spin_unlock_irqrestore(&sp->tx_lock, flags);
5323 /* Free all Rx buffers */
5324 spin_lock_irqsave(&sp->rx_lock, flags);
5325 free_rx_buffers(sp);
5326 spin_unlock_irqrestore(&sp->rx_lock, flags);
5328 clear_bit(0, &(sp->link_state));
5331 static int s2io_card_up(nic_t * sp)
5334 mac_info_t *mac_control;
5335 struct config_param *config;
5336 struct net_device *dev = (struct net_device *) sp->dev;
5338 /* Initialize the H/W I/O registers */
5339 if (init_nic(sp) != 0) {
5340 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
5345 if (sp->intr_type == MSI)
5346 ret = s2io_enable_msi(sp);
5347 else if (sp->intr_type == MSI_X)
5348 ret = s2io_enable_msi_x(sp);
5350 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
5351 sp->intr_type = INTA;
5355 * Initializing the Rx buffers. For now we are considering only 1
5356 * Rx ring and initializing buffers into 30 Rx blocks
5358 mac_control = &sp->mac_control;
5359 config = &sp->config;
5361 for (i = 0; i < config->rx_ring_num; i++) {
5362 if ((ret = fill_rx_buffers(sp, i))) {
5363 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
5366 free_rx_buffers(sp);
5369 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
5370 atomic_read(&sp->rx_bufs_left[i]));
5373 /* Setting its receive mode */
5374 s2io_set_multicast(dev);
5376 /* Enable tasklet for the device */
5377 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
5379 /* Enable Rx Traffic and interrupts on the NIC */
5380 if (start_nic(sp)) {
5381 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
5382 tasklet_kill(&sp->task);
5384 free_irq(dev->irq, dev);
5385 free_rx_buffers(sp);
5389 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
5391 atomic_set(&sp->card_state, CARD_UP);
5396 * s2io_restart_nic - Resets the NIC.
5397 * @data : long pointer to the device private structure
5399 * This function is scheduled to be run by the s2io_tx_watchdog
5400 * function after 0.5 secs to reset the NIC. The idea is to reduce
5401 * the run time of the watch dog routine which is run holding a
5405 static void s2io_restart_nic(unsigned long data)
5407 struct net_device *dev = (struct net_device *) data;
5408 nic_t *sp = dev->priv;
5411 if (s2io_card_up(sp)) {
5412 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5415 netif_wake_queue(dev);
5416 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
5422 * s2io_tx_watchdog - Watchdog for transmit side.
5423 * @dev : Pointer to net device structure
5425 * This function is triggered if the Tx Queue is stopped
5426 * for a pre-defined amount of time when the Interface is still up.
5427 * If the Interface is jammed in such a situation, the hardware is
5428 * reset (by s2io_close) and restarted again (by s2io_open) to
5429 * overcome any problem that might have been caused in the hardware.
5434 static void s2io_tx_watchdog(struct net_device *dev)
5436 nic_t *sp = dev->priv;
5438 if (netif_carrier_ok(dev)) {
5439 schedule_work(&sp->rst_timer_task);
5444 * rx_osm_handler - To perform some OS related operations on SKB.
5445 * @sp: private member of the device structure,pointer to s2io_nic structure.
5446 * @skb : the socket buffer pointer.
5447 * @len : length of the packet
5448 * @cksum : FCS checksum of the frame.
5449 * @ring_no : the ring from which this RxD was extracted.
5451 * This function is called by the Tx interrupt serivce routine to perform
5452 * some OS related operations on the SKB before passing it to the upper
5453 * layers. It mainly checks if the checksum is OK, if so adds it to the
5454 * SKBs cksum variable, increments the Rx packet count and passes the SKB
5455 * to the upper layer. If the checksum is wrong, it increments the Rx
5456 * packet error count, frees the SKB and returns error.
5458 * SUCCESS on success and -1 on failure.
5460 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
5462 nic_t *sp = ring_data->nic;
5463 struct net_device *dev = (struct net_device *) sp->dev;
5464 struct sk_buff *skb = (struct sk_buff *)
5465 ((unsigned long) rxdp->Host_Control);
5466 int ring_no = ring_data->ring_no;
5467 u16 l3_csum, l4_csum;
5468 #ifdef CONFIG_2BUFF_MODE
5469 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
5470 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
5471 int get_block = ring_data->rx_curr_get_info.block_index;
5472 int get_off = ring_data->rx_curr_get_info.offset;
5473 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
5474 unsigned char *buff;
5476 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
5479 if (rxdp->Control_1 & RXD_T_CODE) {
5480 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
5481 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
5484 sp->stats.rx_crc_errors++;
5485 atomic_dec(&sp->rx_bufs_left[ring_no]);
5486 rxdp->Host_Control = 0;
5490 /* Updating statistics */
5491 rxdp->Host_Control = 0;
5493 sp->stats.rx_packets++;
5494 #ifndef CONFIG_2BUFF_MODE
5495 sp->stats.rx_bytes += len;
5497 sp->stats.rx_bytes += buf0_len + buf2_len;
5500 #ifndef CONFIG_2BUFF_MODE
5503 buff = skb_push(skb, buf0_len);
5504 memcpy(buff, ba->ba_0, buf0_len);
5505 skb_put(skb, buf2_len);
5508 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
5510 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
5511 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
5512 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
5514 * NIC verifies if the Checksum of the received
5515 * frame is Ok or not and accordingly returns
5516 * a flag in the RxD.
5518 skb->ip_summed = CHECKSUM_UNNECESSARY;
5521 * Packet with erroneous checksum, let the
5522 * upper layers deal with it.
5524 skb->ip_summed = CHECKSUM_NONE;
5527 skb->ip_summed = CHECKSUM_NONE;
5530 skb->protocol = eth_type_trans(skb, dev);
5531 #ifdef CONFIG_S2IO_NAPI
5532 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5533 /* Queueing the vlan frame to the upper layer */
5534 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
5535 RXD_GET_VLAN_TAG(rxdp->Control_2));
5537 netif_receive_skb(skb);
5540 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5541 /* Queueing the vlan frame to the upper layer */
5542 vlan_hwaccel_rx(skb, sp->vlgrp,
5543 RXD_GET_VLAN_TAG(rxdp->Control_2));
5548 dev->last_rx = jiffies;
5549 atomic_dec(&sp->rx_bufs_left[ring_no]);
5554 * s2io_link - stops/starts the Tx queue.
5555 * @sp : private member of the device structure, which is a pointer to the
5556 * s2io_nic structure.
5557 * @link : inidicates whether link is UP/DOWN.
5559 * This function stops/starts the Tx queue depending on whether the link
5560 * status of the NIC is is down or up. This is called by the Alarm
5561 * interrupt handler whenever a link change interrupt comes up.
5566 void s2io_link(nic_t * sp, int link)
5568 struct net_device *dev = (struct net_device *) sp->dev;
5570 if (link != sp->last_link_state) {
5571 if (link == LINK_DOWN) {
5572 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
5573 netif_carrier_off(dev);
5575 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
5576 netif_carrier_on(dev);
5579 sp->last_link_state = link;
5583 * get_xena_rev_id - to identify revision ID of xena.
5584 * @pdev : PCI Dev structure
5586 * Function to identify the Revision ID of xena.
5588 * returns the revision ID of the device.
5591 int get_xena_rev_id(struct pci_dev *pdev)
5595 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
5600 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
5601 * @sp : private member of the device structure, which is a pointer to the
5602 * s2io_nic structure.
5604 * This function initializes a few of the PCI and PCI-X configuration registers
5605 * with recommended values.
5610 static void s2io_init_pci(nic_t * sp)
5612 u16 pci_cmd = 0, pcix_cmd = 0;
5614 /* Enable Data Parity Error Recovery in PCI-X command register. */
5615 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5617 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5619 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5622 /* Set the PErr Response bit in PCI command register. */
5623 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5624 pci_write_config_word(sp->pdev, PCI_COMMAND,
5625 (pci_cmd | PCI_COMMAND_PARITY));
5626 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5628 /* Forcibly disabling relaxed ordering capability of the card. */
5630 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5632 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5636 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
5637 MODULE_LICENSE("GPL");
5638 module_param(tx_fifo_num, int, 0);
5639 module_param(rx_ring_num, int, 0);
5640 module_param_array(tx_fifo_len, uint, NULL, 0);
5641 module_param_array(rx_ring_sz, uint, NULL, 0);
5642 module_param_array(rts_frm_len, uint, NULL, 0);
5643 module_param(use_continuous_tx_intrs, int, 1);
5644 module_param(rmac_pause_time, int, 0);
5645 module_param(mc_pause_threshold_q0q3, int, 0);
5646 module_param(mc_pause_threshold_q4q7, int, 0);
5647 module_param(shared_splits, int, 0);
5648 module_param(tmac_util_period, int, 0);
5649 module_param(rmac_util_period, int, 0);
5650 module_param(bimodal, bool, 0);
5651 #ifndef CONFIG_S2IO_NAPI
5652 module_param(indicate_max_pkts, int, 0);
5654 module_param(rxsync_frequency, int, 0);
5655 module_param(intr_type, int, 0);
5658 * s2io_init_nic - Initialization of the adapter .
5659 * @pdev : structure containing the PCI related information of the device.
5660 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
5662 * The function initializes an adapter identified by the pci_dec structure.
5663 * All OS related initialization including memory and device structure and
5664 * initlaization of the device private variable is done. Also the swapper
5665 * control register is initialized to enable read and write into the I/O
5666 * registers of the device.
5668 * returns 0 on success and negative on failure.
5671 static int __devinit
5672 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5675 struct net_device *dev;
5677 int dma_flag = FALSE;
5678 u32 mac_up, mac_down;
5679 u64 val64 = 0, tmp64 = 0;
5680 XENA_dev_config_t __iomem *bar0 = NULL;
5682 mac_info_t *mac_control;
5683 struct config_param *config;
5685 u8 dev_intr_type = intr_type;
5687 #ifdef CONFIG_S2IO_NAPI
5688 if (dev_intr_type != INTA) {
5689 DBG_PRINT(ERR_DBG, "NAPI cannot be enabled when MSI/MSI-X \
5690 is enabled. Defaulting to INTA\n");
5691 dev_intr_type = INTA;
5694 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
5697 if ((ret = pci_enable_device(pdev))) {
5699 "s2io_init_nic: pci_enable_device failed\n");
5703 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
5704 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
5706 if (pci_set_consistent_dma_mask
5707 (pdev, DMA_64BIT_MASK)) {
5709 "Unable to obtain 64bit DMA for \
5710 consistent allocations\n");
5711 pci_disable_device(pdev);
5714 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
5715 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
5717 pci_disable_device(pdev);
5721 if ((dev_intr_type == MSI_X) &&
5722 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
5723 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
5724 DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. \
5725 Defaulting to INTA\n");
5726 dev_intr_type = INTA;
5728 if (dev_intr_type != MSI_X) {
5729 if (pci_request_regions(pdev, s2io_driver_name)) {
5730 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
5731 pci_disable_device(pdev);
5736 if (!(request_mem_region(pci_resource_start(pdev, 0),
5737 pci_resource_len(pdev, 0), s2io_driver_name))) {
5738 DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
5739 pci_disable_device(pdev);
5742 if (!(request_mem_region(pci_resource_start(pdev, 2),
5743 pci_resource_len(pdev, 2), s2io_driver_name))) {
5744 DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
5745 release_mem_region(pci_resource_start(pdev, 0),
5746 pci_resource_len(pdev, 0));
5747 pci_disable_device(pdev);
5752 dev = alloc_etherdev(sizeof(nic_t));
5754 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
5755 pci_disable_device(pdev);
5756 pci_release_regions(pdev);
5760 pci_set_master(pdev);
5761 pci_set_drvdata(pdev, dev);
5762 SET_MODULE_OWNER(dev);
5763 SET_NETDEV_DEV(dev, &pdev->dev);
5765 /* Private member variable initialized to s2io NIC structure */
5767 memset(sp, 0, sizeof(nic_t));
5770 sp->high_dma_flag = dma_flag;
5771 sp->device_enabled_once = FALSE;
5772 sp->intr_type = dev_intr_type;
5774 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
5775 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
5776 sp->device_type = XFRAME_II_DEVICE;
5778 sp->device_type = XFRAME_I_DEVICE;
5781 /* Initialize some PCI/PCI-X fields of the NIC. */
5785 * Setting the device configuration parameters.
5786 * Most of these parameters can be specified by the user during
5787 * module insertion as they are module loadable parameters. If
5788 * these parameters are not not specified during load time, they
5789 * are initialized with default values.
5791 mac_control = &sp->mac_control;
5792 config = &sp->config;
5794 /* Tx side parameters. */
5795 if (tx_fifo_len[0] == 0)
5796 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
5797 config->tx_fifo_num = tx_fifo_num;
5798 for (i = 0; i < MAX_TX_FIFOS; i++) {
5799 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
5800 config->tx_cfg[i].fifo_priority = i;
5803 /* mapping the QoS priority to the configured fifos */
5804 for (i = 0; i < MAX_TX_FIFOS; i++)
5805 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
5807 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
5808 for (i = 0; i < config->tx_fifo_num; i++) {
5809 config->tx_cfg[i].f_no_snoop =
5810 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
5811 if (config->tx_cfg[i].fifo_len < 65) {
5812 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
5816 config->max_txds = MAX_SKB_FRAGS + 1;
5818 /* Rx side parameters. */
5819 if (rx_ring_sz[0] == 0)
5820 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
5821 config->rx_ring_num = rx_ring_num;
5822 for (i = 0; i < MAX_RX_RINGS; i++) {
5823 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
5824 (MAX_RXDS_PER_BLOCK + 1);
5825 config->rx_cfg[i].ring_priority = i;
5828 for (i = 0; i < rx_ring_num; i++) {
5829 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
5830 config->rx_cfg[i].f_no_snoop =
5831 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
5834 /* Setting Mac Control parameters */
5835 mac_control->rmac_pause_time = rmac_pause_time;
5836 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
5837 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
5840 /* Initialize Ring buffer parameters. */
5841 for (i = 0; i < config->rx_ring_num; i++)
5842 atomic_set(&sp->rx_bufs_left[i], 0);
5844 /* Initialize the number of ISRs currently running */
5845 atomic_set(&sp->isr_cnt, 0);
5847 /* initialize the shared memory used by the NIC and the host */
5848 if (init_shared_mem(sp)) {
5849 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
5852 goto mem_alloc_failed;
5855 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
5856 pci_resource_len(pdev, 0));
5858 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
5861 goto bar0_remap_failed;
5864 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
5865 pci_resource_len(pdev, 2));
5867 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
5870 goto bar1_remap_failed;
5873 dev->irq = pdev->irq;
5874 dev->base_addr = (unsigned long) sp->bar0;
5876 /* Initializing the BAR1 address as the start of the FIFO pointer. */
5877 for (j = 0; j < MAX_TX_FIFOS; j++) {
5878 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
5879 (sp->bar1 + (j * 0x00020000));
5882 /* Driver entry points */
5883 dev->open = &s2io_open;
5884 dev->stop = &s2io_close;
5885 dev->hard_start_xmit = &s2io_xmit;
5886 dev->get_stats = &s2io_get_stats;
5887 dev->set_multicast_list = &s2io_set_multicast;
5888 dev->do_ioctl = &s2io_ioctl;
5889 dev->change_mtu = &s2io_change_mtu;
5890 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
5891 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5892 dev->vlan_rx_register = s2io_vlan_rx_register;
5893 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
5896 * will use eth_mac_addr() for dev->set_mac_address
5897 * mac address will be set every time dev->open() is called
5899 #if defined(CONFIG_S2IO_NAPI)
5900 dev->poll = s2io_poll;
5904 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
5905 if (sp->high_dma_flag == TRUE)
5906 dev->features |= NETIF_F_HIGHDMA;
5908 dev->features |= NETIF_F_TSO;
5911 dev->tx_timeout = &s2io_tx_watchdog;
5912 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
5913 INIT_WORK(&sp->rst_timer_task,
5914 (void (*)(void *)) s2io_restart_nic, dev);
5915 INIT_WORK(&sp->set_link_task,
5916 (void (*)(void *)) s2io_set_link, sp);
5918 pci_save_state(sp->pdev);
5920 /* Setting swapper control on the NIC, for proper reset operation */
5921 if (s2io_set_swapper(sp)) {
5922 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
5925 goto set_swap_failed;
5928 /* Verify if the Herc works on the slot its placed into */
5929 if (sp->device_type & XFRAME_II_DEVICE) {
5930 mode = s2io_verify_pci_mode(sp);
5932 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
5933 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
5935 goto set_swap_failed;
5939 /* Not needed for Herc */
5940 if (sp->device_type & XFRAME_I_DEVICE) {
5942 * Fix for all "FFs" MAC address problems observed on
5945 fix_mac_address(sp);
5950 * MAC address initialization.
5951 * For now only one mac address will be read and used.
5954 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5955 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
5956 writeq(val64, &bar0->rmac_addr_cmd_mem);
5957 wait_for_cmd_complete(sp);
5959 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5960 mac_down = (u32) tmp64;
5961 mac_up = (u32) (tmp64 >> 32);
5963 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
5965 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
5966 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
5967 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
5968 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
5969 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
5970 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
5972 /* Set the factory defined MAC address initially */
5973 dev->addr_len = ETH_ALEN;
5974 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
5977 * Initialize the tasklet status and link state flags
5978 * and the card state parameter
5980 atomic_set(&(sp->card_state), 0);
5981 sp->tasklet_status = 0;
5984 /* Initialize spinlocks */
5985 spin_lock_init(&sp->tx_lock);
5986 #ifndef CONFIG_S2IO_NAPI
5987 spin_lock_init(&sp->put_lock);
5989 spin_lock_init(&sp->rx_lock);
5992 * SXE-002: Configure link and activity LED to init state
5995 subid = sp->pdev->subsystem_device;
5996 if ((subid & 0xFF) >= 0x07) {
5997 val64 = readq(&bar0->gpio_control);
5998 val64 |= 0x0000800000000000ULL;
5999 writeq(val64, &bar0->gpio_control);
6000 val64 = 0x0411040400000000ULL;
6001 writeq(val64, (void __iomem *) bar0 + 0x2700);
6002 val64 = readq(&bar0->gpio_control);
6005 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
6007 if (register_netdev(dev)) {
6008 DBG_PRINT(ERR_DBG, "Device registration failed\n");
6010 goto register_failed;
6013 if (sp->device_type & XFRAME_II_DEVICE) {
6014 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
6016 DBG_PRINT(ERR_DBG, "(rev %d), %s",
6017 get_xena_rev_id(sp->pdev),
6018 s2io_driver_version);
6019 #ifdef CONFIG_2BUFF_MODE
6020 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
6022 switch(sp->intr_type) {
6024 DBG_PRINT(ERR_DBG, ", Intr type INTA");
6027 DBG_PRINT(ERR_DBG, ", Intr type MSI");
6030 DBG_PRINT(ERR_DBG, ", Intr type MSI-X");
6034 DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
6035 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
6036 sp->def_mac_addr[0].mac_addr[0],
6037 sp->def_mac_addr[0].mac_addr[1],
6038 sp->def_mac_addr[0].mac_addr[2],
6039 sp->def_mac_addr[0].mac_addr[3],
6040 sp->def_mac_addr[0].mac_addr[4],
6041 sp->def_mac_addr[0].mac_addr[5]);
6042 mode = s2io_print_pci_mode(sp);
6044 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
6046 goto set_swap_failed;
6049 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
6051 DBG_PRINT(ERR_DBG, "(rev %d), %s",
6052 get_xena_rev_id(sp->pdev),
6053 s2io_driver_version);
6054 #ifdef CONFIG_2BUFF_MODE
6055 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
6057 switch(sp->intr_type) {
6059 DBG_PRINT(ERR_DBG, ", Intr type INTA");
6062 DBG_PRINT(ERR_DBG, ", Intr type MSI");
6065 DBG_PRINT(ERR_DBG, ", Intr type MSI-X");
6068 DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
6069 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
6070 sp->def_mac_addr[0].mac_addr[0],
6071 sp->def_mac_addr[0].mac_addr[1],
6072 sp->def_mac_addr[0].mac_addr[2],
6073 sp->def_mac_addr[0].mac_addr[3],
6074 sp->def_mac_addr[0].mac_addr[4],
6075 sp->def_mac_addr[0].mac_addr[5]);
6078 /* Initialize device name */
6079 strcpy(sp->name, dev->name);
6080 if (sp->device_type & XFRAME_II_DEVICE)
6081 strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
6083 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
6085 /* Initialize bimodal Interrupts */
6086 sp->config.bimodal = bimodal;
6087 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
6088 sp->config.bimodal = 0;
6089 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
6094 * Make Link state as off at this point, when the Link change
6095 * interrupt comes the state will be automatically changed to
6098 netif_carrier_off(dev);
6109 free_shared_mem(sp);
6110 pci_disable_device(pdev);
6111 if (dev_intr_type != MSI_X)
6112 pci_release_regions(pdev);
6114 release_mem_region(pci_resource_start(pdev, 0),
6115 pci_resource_len(pdev, 0));
6116 release_mem_region(pci_resource_start(pdev, 2),
6117 pci_resource_len(pdev, 2));
6119 pci_set_drvdata(pdev, NULL);
6126 * s2io_rem_nic - Free the PCI device
6127 * @pdev: structure containing the PCI related information of the device.
6128 * Description: This function is called by the Pci subsystem to release a
6129 * PCI device and free up all resource held up by the device. This could
6130 * be in response to a Hot plug event or when the driver is to be removed
6134 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
6136 struct net_device *dev =
6137 (struct net_device *) pci_get_drvdata(pdev);
6141 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
6146 unregister_netdev(dev);
6148 free_shared_mem(sp);
6151 pci_disable_device(pdev);
6152 if (sp->intr_type != MSI_X)
6153 pci_release_regions(pdev);
6155 release_mem_region(pci_resource_start(pdev, 0),
6156 pci_resource_len(pdev, 0));
6157 release_mem_region(pci_resource_start(pdev, 2),
6158 pci_resource_len(pdev, 2));
6160 pci_set_drvdata(pdev, NULL);
6165 * s2io_starter - Entry point for the driver
6166 * Description: This function is the entry point for the driver. It verifies
6167 * the module loadable parameters and initializes PCI configuration space.
6170 int __init s2io_starter(void)
6172 return pci_module_init(&s2io_driver);
6176 * s2io_closer - Cleanup routine for the driver
6177 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
6180 void s2io_closer(void)
6182 pci_unregister_driver(&s2io_driver);
6183 DBG_PRINT(INIT_DBG, "cleanup done\n");
6186 module_init(s2io_starter);
6187 module_exit(s2io_closer);