]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/s2io.c
[PATCH] (2/7) iomem annotations (e1000)
[net-next-2.6.git] / drivers / net / s2io.c
CommitLineData
1da177e4
LT
1/************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
20346722
K
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
1da177e4
LT
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
20346722 21 * Francois Romieu : For pointing out all code part that were
1da177e4 22 * deprecated and also styling related comments.
20346722 23 * Grant Grundler : For helping me get rid of some Architecture
1da177e4
LT
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
20346722 26 *
1da177e4
LT
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
20346722
K
29 * rx_ring_num : This can be used to program the number of receive rings used
30 * in the driver.
31 * rx_ring_len: This defines the number of descriptors each ring can have. This
1da177e4
LT
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
20346722 34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
1da177e4 35 * Tx descriptors that can be associated with each corresponding FIFO.
1da177e4
LT
36 ************************************************************************/
37
38#include <linux/config.h>
39#include <linux/module.h>
40#include <linux/types.h>
41#include <linux/errno.h>
42#include <linux/ioport.h>
43#include <linux/pci.h>
1e7f0bd8 44#include <linux/dma-mapping.h>
1da177e4
LT
45#include <linux/kernel.h>
46#include <linux/netdevice.h>
47#include <linux/etherdevice.h>
48#include <linux/skbuff.h>
49#include <linux/init.h>
50#include <linux/delay.h>
51#include <linux/stddef.h>
52#include <linux/ioctl.h>
53#include <linux/timex.h>
54#include <linux/sched.h>
55#include <linux/ethtool.h>
56#include <linux/version.h>
57#include <linux/workqueue.h>
be3a6b02 58#include <linux/if_vlan.h>
1da177e4 59
1da177e4
LT
60#include <asm/system.h>
61#include <asm/uaccess.h>
20346722 62#include <asm/io.h>
1da177e4
LT
63
64/* local include */
65#include "s2io.h"
66#include "s2io-regs.h"
67
68/* S2io Driver name & version. */
20346722 69static char s2io_driver_name[] = "Neterion";
e960fc5c 70static char s2io_driver_version[] = "Version 2.0.3.1";
1da177e4 71
5e25b9dd
K
72static inline int RXD_IS_UP2DT(RxD_t *rxdp)
73{
74 int ret;
75
76 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
77 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
78
79 return ret;
80}
81
20346722 82/*
1da177e4
LT
83 * Cards with following subsystem_id have a link state indication
84 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
85 * macro below identifies these cards given the subsystem_id.
86 */
541ae68f
K
87#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
88 (dev_type == XFRAME_I_DEVICE) ? \
89 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
90 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
1da177e4
LT
91
92#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
93 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
94#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
95#define PANIC 1
96#define LOW 2
97static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
98{
99 int level = 0;
20346722
K
100 mac_info_t *mac_control;
101
102 mac_control = &sp->mac_control;
103 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
1da177e4 104 level = LOW;
fe113638 105 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
1da177e4
LT
106 level = PANIC;
107 }
108 }
109
110 return level;
111}
112
113/* Ethtool related variables and Macros. */
114static char s2io_gstrings[][ETH_GSTRING_LEN] = {
115 "Register test\t(offline)",
116 "Eeprom test\t(offline)",
117 "Link test\t(online)",
118 "RLDRAM test\t(offline)",
119 "BIST Test\t(offline)"
120};
121
122static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
123 {"tmac_frms"},
124 {"tmac_data_octets"},
125 {"tmac_drop_frms"},
126 {"tmac_mcst_frms"},
127 {"tmac_bcst_frms"},
128 {"tmac_pause_ctrl_frms"},
129 {"tmac_any_err_frms"},
130 {"tmac_vld_ip_octets"},
131 {"tmac_vld_ip"},
132 {"tmac_drop_ip"},
133 {"tmac_icmp"},
134 {"tmac_rst_tcp"},
135 {"tmac_tcp"},
136 {"tmac_udp"},
137 {"rmac_vld_frms"},
138 {"rmac_data_octets"},
139 {"rmac_fcs_err_frms"},
140 {"rmac_drop_frms"},
141 {"rmac_vld_mcst_frms"},
142 {"rmac_vld_bcst_frms"},
143 {"rmac_in_rng_len_err_frms"},
144 {"rmac_long_frms"},
145 {"rmac_pause_ctrl_frms"},
146 {"rmac_discarded_frms"},
147 {"rmac_usized_frms"},
148 {"rmac_osized_frms"},
149 {"rmac_frag_frms"},
150 {"rmac_jabber_frms"},
151 {"rmac_ip"},
152 {"rmac_ip_octets"},
153 {"rmac_hdr_err_ip"},
154 {"rmac_drop_ip"},
155 {"rmac_icmp"},
156 {"rmac_tcp"},
157 {"rmac_udp"},
158 {"rmac_err_drp_udp"},
159 {"rmac_pause_cnt"},
160 {"rmac_accepted_ip"},
161 {"rmac_err_tcp"},
7ba013ac
K
162 {"\n DRIVER STATISTICS"},
163 {"single_bit_ecc_errs"},
164 {"double_bit_ecc_errs"},
1da177e4
LT
165};
166
167#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
168#define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
169
170#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
171#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
172
25fff88e
K
173#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
174 init_timer(&timer); \
175 timer.function = handle; \
176 timer.data = (unsigned long) arg; \
177 mod_timer(&timer, (jiffies + exp)) \
178
be3a6b02
K
179/* Add the vlan */
180static void s2io_vlan_rx_register(struct net_device *dev,
181 struct vlan_group *grp)
182{
183 nic_t *nic = dev->priv;
184 unsigned long flags;
185
186 spin_lock_irqsave(&nic->tx_lock, flags);
187 nic->vlgrp = grp;
188 spin_unlock_irqrestore(&nic->tx_lock, flags);
189}
190
191/* Unregister the vlan */
192static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
193{
194 nic_t *nic = dev->priv;
195 unsigned long flags;
196
197 spin_lock_irqsave(&nic->tx_lock, flags);
198 if (nic->vlgrp)
199 nic->vlgrp->vlan_devices[vid] = NULL;
200 spin_unlock_irqrestore(&nic->tx_lock, flags);
201}
202
20346722 203/*
1da177e4
LT
204 * Constants to be programmed into the Xena's registers, to configure
205 * the XAUI.
206 */
207
208#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
209#define END_SIGN 0x0
210
541ae68f
K
211static u64 herc_act_dtx_cfg[] = {
212 /* Set address */
e960fc5c 213 0x8000051536750000ULL, 0x80000515367500E0ULL,
541ae68f 214 /* Write data */
e960fc5c 215 0x8000051536750004ULL, 0x80000515367500E4ULL,
541ae68f
K
216 /* Set address */
217 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
218 /* Write data */
219 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
220 /* Set address */
e960fc5c 221 0x801205150D440000ULL, 0x801205150D4400E0ULL,
222 /* Write data */
223 0x801205150D440004ULL, 0x801205150D4400E4ULL,
224 /* Set address */
541ae68f
K
225 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
226 /* Write data */
227 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
228 /* Done */
229 END_SIGN
230};
231
232static u64 xena_mdio_cfg[] = {
1da177e4
LT
233 /* Reset PMA PLL */
234 0xC001010000000000ULL, 0xC0010100000000E0ULL,
235 0xC0010100008000E4ULL,
236 /* Remove Reset from PMA PLL */
237 0xC001010000000000ULL, 0xC0010100000000E0ULL,
238 0xC0010100000000E4ULL,
239 END_SIGN
240};
241
541ae68f 242static u64 xena_dtx_cfg[] = {
1da177e4
LT
243 0x8000051500000000ULL, 0x80000515000000E0ULL,
244 0x80000515D93500E4ULL, 0x8001051500000000ULL,
245 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
246 0x8002051500000000ULL, 0x80020515000000E0ULL,
247 0x80020515F21000E4ULL,
248 /* Set PADLOOPBACKN */
249 0x8002051500000000ULL, 0x80020515000000E0ULL,
250 0x80020515B20000E4ULL, 0x8003051500000000ULL,
251 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
252 0x8004051500000000ULL, 0x80040515000000E0ULL,
253 0x80040515B20000E4ULL, 0x8005051500000000ULL,
254 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
255 SWITCH_SIGN,
256 /* Remove PADLOOPBACKN */
257 0x8002051500000000ULL, 0x80020515000000E0ULL,
258 0x80020515F20000E4ULL, 0x8003051500000000ULL,
259 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
260 0x8004051500000000ULL, 0x80040515000000E0ULL,
261 0x80040515F20000E4ULL, 0x8005051500000000ULL,
262 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
263 END_SIGN
264};
265
20346722 266/*
1da177e4
LT
267 * Constants for Fixing the MacAddress problem seen mostly on
268 * Alpha machines.
269 */
270static u64 fix_mac[] = {
271 0x0060000000000000ULL, 0x0060600000000000ULL,
272 0x0040600000000000ULL, 0x0000600000000000ULL,
273 0x0020600000000000ULL, 0x0060600000000000ULL,
274 0x0020600000000000ULL, 0x0060600000000000ULL,
275 0x0020600000000000ULL, 0x0060600000000000ULL,
276 0x0020600000000000ULL, 0x0060600000000000ULL,
277 0x0020600000000000ULL, 0x0060600000000000ULL,
278 0x0020600000000000ULL, 0x0060600000000000ULL,
279 0x0020600000000000ULL, 0x0060600000000000ULL,
280 0x0020600000000000ULL, 0x0060600000000000ULL,
281 0x0020600000000000ULL, 0x0060600000000000ULL,
282 0x0020600000000000ULL, 0x0060600000000000ULL,
283 0x0020600000000000ULL, 0x0000600000000000ULL,
284 0x0040600000000000ULL, 0x0060600000000000ULL,
285 END_SIGN
286};
287
288/* Module Loadable parameters. */
289static unsigned int tx_fifo_num = 1;
290static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
291 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
292static unsigned int rx_ring_num = 1;
293static unsigned int rx_ring_sz[MAX_RX_RINGS] =
294 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
20346722
K
295static unsigned int rts_frm_len[MAX_RX_RINGS] =
296 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
5e25b9dd 297static unsigned int use_continuous_tx_intrs = 1;
1da177e4
LT
298static unsigned int rmac_pause_time = 65535;
299static unsigned int mc_pause_threshold_q0q3 = 187;
300static unsigned int mc_pause_threshold_q4q7 = 187;
301static unsigned int shared_splits;
302static unsigned int tmac_util_period = 5;
303static unsigned int rmac_util_period = 5;
b6e3f982 304static unsigned int bimodal = 0;
1da177e4
LT
305#ifndef CONFIG_S2IO_NAPI
306static unsigned int indicate_max_pkts;
307#endif
303bcb4b
K
308/* Frequency of Rx desc syncs expressed as power of 2 */
309static unsigned int rxsync_frequency = 3;
1da177e4 310
20346722 311/*
1da177e4 312 * S2IO device table.
20346722 313 * This table lists all the devices that this driver supports.
1da177e4
LT
314 */
315static struct pci_device_id s2io_tbl[] __devinitdata = {
316 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
317 PCI_ANY_ID, PCI_ANY_ID},
318 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
319 PCI_ANY_ID, PCI_ANY_ID},
320 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
20346722
K
321 PCI_ANY_ID, PCI_ANY_ID},
322 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
323 PCI_ANY_ID, PCI_ANY_ID},
1da177e4
LT
324 {0,}
325};
326
327MODULE_DEVICE_TABLE(pci, s2io_tbl);
328
329static struct pci_driver s2io_driver = {
330 .name = "S2IO",
331 .id_table = s2io_tbl,
332 .probe = s2io_init_nic,
333 .remove = __devexit_p(s2io_rem_nic),
334};
335
336/* A simplifier macro used both by init and free shared_mem Fns(). */
337#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
338
339/**
340 * init_shared_mem - Allocation and Initialization of Memory
341 * @nic: Device private variable.
20346722
K
342 * Description: The function allocates all the memory areas shared
343 * between the NIC and the driver. This includes Tx descriptors,
1da177e4
LT
344 * Rx descriptors and the statistics block.
345 */
346
347static int init_shared_mem(struct s2io_nic *nic)
348{
349 u32 size;
350 void *tmp_v_addr, *tmp_v_addr_next;
351 dma_addr_t tmp_p_addr, tmp_p_addr_next;
352 RxD_block_t *pre_rxd_blk = NULL;
20346722 353 int i, j, blk_cnt, rx_sz, tx_sz;
1da177e4
LT
354 int lst_size, lst_per_page;
355 struct net_device *dev = nic->dev;
356#ifdef CONFIG_2BUFF_MODE
20346722 357 u64 tmp;
1da177e4
LT
358 buffAdd_t *ba;
359#endif
360
361 mac_info_t *mac_control;
362 struct config_param *config;
363
364 mac_control = &nic->mac_control;
365 config = &nic->config;
366
367
368 /* Allocation and initialization of TXDLs in FIOFs */
369 size = 0;
370 for (i = 0; i < config->tx_fifo_num; i++) {
371 size += config->tx_cfg[i].fifo_len;
372 }
373 if (size > MAX_AVAILABLE_TXDS) {
0b1f7ebe
K
374 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
375 __FUNCTION__);
376 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
1da177e4
LT
377 return FAILURE;
378 }
379
380 lst_size = (sizeof(TxD_t) * config->max_txds);
20346722 381 tx_sz = lst_size * size;
1da177e4
LT
382 lst_per_page = PAGE_SIZE / lst_size;
383
384 for (i = 0; i < config->tx_fifo_num; i++) {
385 int fifo_len = config->tx_cfg[i].fifo_len;
386 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
20346722
K
387 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
388 GFP_KERNEL);
389 if (!mac_control->fifos[i].list_info) {
1da177e4
LT
390 DBG_PRINT(ERR_DBG,
391 "Malloc failed for list_info\n");
392 return -ENOMEM;
393 }
20346722 394 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
1da177e4
LT
395 }
396 for (i = 0; i < config->tx_fifo_num; i++) {
397 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
398 lst_per_page);
20346722
K
399 mac_control->fifos[i].tx_curr_put_info.offset = 0;
400 mac_control->fifos[i].tx_curr_put_info.fifo_len =
1da177e4 401 config->tx_cfg[i].fifo_len - 1;
20346722
K
402 mac_control->fifos[i].tx_curr_get_info.offset = 0;
403 mac_control->fifos[i].tx_curr_get_info.fifo_len =
1da177e4 404 config->tx_cfg[i].fifo_len - 1;
20346722
K
405 mac_control->fifos[i].fifo_no = i;
406 mac_control->fifos[i].nic = nic;
407 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
408
1da177e4
LT
409 for (j = 0; j < page_num; j++) {
410 int k = 0;
411 dma_addr_t tmp_p;
412 void *tmp_v;
413 tmp_v = pci_alloc_consistent(nic->pdev,
414 PAGE_SIZE, &tmp_p);
415 if (!tmp_v) {
416 DBG_PRINT(ERR_DBG,
417 "pci_alloc_consistent ");
418 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
419 return -ENOMEM;
420 }
421 while (k < lst_per_page) {
422 int l = (j * lst_per_page) + k;
423 if (l == config->tx_cfg[i].fifo_len)
20346722
K
424 break;
425 mac_control->fifos[i].list_info[l].list_virt_addr =
1da177e4 426 tmp_v + (k * lst_size);
20346722 427 mac_control->fifos[i].list_info[l].list_phy_addr =
1da177e4
LT
428 tmp_p + (k * lst_size);
429 k++;
430 }
431 }
432 }
1da177e4
LT
433
434 /* Allocation and initialization of RXDs in Rings */
435 size = 0;
436 for (i = 0; i < config->rx_ring_num; i++) {
437 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
438 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
439 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
440 i);
441 DBG_PRINT(ERR_DBG, "RxDs per Block");
442 return FAILURE;
443 }
444 size += config->rx_cfg[i].num_rxd;
20346722 445 mac_control->rings[i].block_count =
1da177e4 446 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
20346722
K
447 mac_control->rings[i].pkt_cnt =
448 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
1da177e4 449 }
20346722
K
450 size = (size * (sizeof(RxD_t)));
451 rx_sz = size;
1da177e4
LT
452
453 for (i = 0; i < config->rx_ring_num; i++) {
20346722
K
454 mac_control->rings[i].rx_curr_get_info.block_index = 0;
455 mac_control->rings[i].rx_curr_get_info.offset = 0;
456 mac_control->rings[i].rx_curr_get_info.ring_len =
1da177e4 457 config->rx_cfg[i].num_rxd - 1;
20346722
K
458 mac_control->rings[i].rx_curr_put_info.block_index = 0;
459 mac_control->rings[i].rx_curr_put_info.offset = 0;
460 mac_control->rings[i].rx_curr_put_info.ring_len =
1da177e4 461 config->rx_cfg[i].num_rxd - 1;
20346722
K
462 mac_control->rings[i].nic = nic;
463 mac_control->rings[i].ring_no = i;
464
1da177e4
LT
465 blk_cnt =
466 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
467 /* Allocating all the Rx blocks */
468 for (j = 0; j < blk_cnt; j++) {
469#ifndef CONFIG_2BUFF_MODE
470 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
471#else
472 size = SIZE_OF_BLOCK;
473#endif
474 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
475 &tmp_p_addr);
476 if (tmp_v_addr == NULL) {
477 /*
20346722
K
478 * In case of failure, free_shared_mem()
479 * is called, which should free any
480 * memory that was alloced till the
1da177e4
LT
481 * failure happened.
482 */
20346722 483 mac_control->rings[i].rx_blocks[j].block_virt_addr =
1da177e4
LT
484 tmp_v_addr;
485 return -ENOMEM;
486 }
487 memset(tmp_v_addr, 0, size);
20346722
K
488 mac_control->rings[i].rx_blocks[j].block_virt_addr =
489 tmp_v_addr;
490 mac_control->rings[i].rx_blocks[j].block_dma_addr =
491 tmp_p_addr;
1da177e4
LT
492 }
493 /* Interlinking all Rx Blocks */
494 for (j = 0; j < blk_cnt; j++) {
20346722
K
495 tmp_v_addr =
496 mac_control->rings[i].rx_blocks[j].block_virt_addr;
1da177e4 497 tmp_v_addr_next =
20346722 498 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4 499 blk_cnt].block_virt_addr;
20346722
K
500 tmp_p_addr =
501 mac_control->rings[i].rx_blocks[j].block_dma_addr;
1da177e4 502 tmp_p_addr_next =
20346722 503 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4
LT
504 blk_cnt].block_dma_addr;
505
506 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
20346722 507 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
1da177e4
LT
508 * marker.
509 */
510#ifndef CONFIG_2BUFF_MODE
511 pre_rxd_blk->reserved_2_pNext_RxD_block =
512 (unsigned long) tmp_v_addr_next;
513#endif
514 pre_rxd_blk->pNext_RxD_Blk_physical =
515 (u64) tmp_p_addr_next;
516 }
517 }
518
519#ifdef CONFIG_2BUFF_MODE
20346722 520 /*
1da177e4
LT
521 * Allocation of Storages for buffer addresses in 2BUFF mode
522 * and the buffers as well.
523 */
524 for (i = 0; i < config->rx_ring_num; i++) {
525 blk_cnt =
526 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
20346722 527 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
1da177e4 528 GFP_KERNEL);
20346722 529 if (!mac_control->rings[i].ba)
1da177e4
LT
530 return -ENOMEM;
531 for (j = 0; j < blk_cnt; j++) {
532 int k = 0;
20346722 533 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
1da177e4
LT
534 (MAX_RXDS_PER_BLOCK + 1)),
535 GFP_KERNEL);
20346722 536 if (!mac_control->rings[i].ba[j])
1da177e4
LT
537 return -ENOMEM;
538 while (k != MAX_RXDS_PER_BLOCK) {
20346722 539 ba = &mac_control->rings[i].ba[j][k];
1da177e4 540
20346722 541 ba->ba_0_org = (void *) kmalloc
1da177e4
LT
542 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
543 if (!ba->ba_0_org)
544 return -ENOMEM;
20346722 545 tmp = (u64) ba->ba_0_org;
1da177e4 546 tmp += ALIGN_SIZE;
20346722 547 tmp &= ~((u64) ALIGN_SIZE);
1da177e4
LT
548 ba->ba_0 = (void *) tmp;
549
20346722 550 ba->ba_1_org = (void *) kmalloc
1da177e4
LT
551 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
552 if (!ba->ba_1_org)
553 return -ENOMEM;
20346722 554 tmp = (u64) ba->ba_1_org;
1da177e4 555 tmp += ALIGN_SIZE;
20346722 556 tmp &= ~((u64) ALIGN_SIZE);
1da177e4
LT
557 ba->ba_1 = (void *) tmp;
558 k++;
559 }
560 }
561 }
562#endif
563
564 /* Allocation and initialization of Statistics block */
565 size = sizeof(StatInfo_t);
566 mac_control->stats_mem = pci_alloc_consistent
567 (nic->pdev, size, &mac_control->stats_mem_phy);
568
569 if (!mac_control->stats_mem) {
20346722
K
570 /*
571 * In case of failure, free_shared_mem() is called, which
572 * should free any memory that was alloced till the
1da177e4
LT
573 * failure happened.
574 */
575 return -ENOMEM;
576 }
577 mac_control->stats_mem_sz = size;
578
579 tmp_v_addr = mac_control->stats_mem;
580 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
581 memset(tmp_v_addr, 0, size);
1da177e4
LT
582 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
583 (unsigned long long) tmp_p_addr);
584
585 return SUCCESS;
586}
587
20346722
K
588/**
589 * free_shared_mem - Free the allocated Memory
1da177e4
LT
590 * @nic: Device private variable.
591 * Description: This function is to free all memory locations allocated by
592 * the init_shared_mem() function and return it to the kernel.
593 */
594
595static void free_shared_mem(struct s2io_nic *nic)
596{
597 int i, j, blk_cnt, size;
598 void *tmp_v_addr;
599 dma_addr_t tmp_p_addr;
600 mac_info_t *mac_control;
601 struct config_param *config;
602 int lst_size, lst_per_page;
603
604
605 if (!nic)
606 return;
607
608 mac_control = &nic->mac_control;
609 config = &nic->config;
610
611 lst_size = (sizeof(TxD_t) * config->max_txds);
612 lst_per_page = PAGE_SIZE / lst_size;
613
614 for (i = 0; i < config->tx_fifo_num; i++) {
615 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
616 lst_per_page);
617 for (j = 0; j < page_num; j++) {
618 int mem_blks = (j * lst_per_page);
0b1f7ebe
K
619 if ((!mac_control->fifos[i].list_info) ||
620 (!mac_control->fifos[i].list_info[mem_blks].
621 list_virt_addr))
1da177e4
LT
622 break;
623 pci_free_consistent(nic->pdev, PAGE_SIZE,
20346722
K
624 mac_control->fifos[i].
625 list_info[mem_blks].
1da177e4 626 list_virt_addr,
20346722
K
627 mac_control->fifos[i].
628 list_info[mem_blks].
1da177e4
LT
629 list_phy_addr);
630 }
20346722 631 kfree(mac_control->fifos[i].list_info);
1da177e4
LT
632 }
633
634#ifndef CONFIG_2BUFF_MODE
635 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
636#else
637 size = SIZE_OF_BLOCK;
638#endif
639 for (i = 0; i < config->rx_ring_num; i++) {
20346722 640 blk_cnt = mac_control->rings[i].block_count;
1da177e4 641 for (j = 0; j < blk_cnt; j++) {
20346722
K
642 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
643 block_virt_addr;
644 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
645 block_dma_addr;
1da177e4
LT
646 if (tmp_v_addr == NULL)
647 break;
648 pci_free_consistent(nic->pdev, size,
649 tmp_v_addr, tmp_p_addr);
650 }
651 }
652
653#ifdef CONFIG_2BUFF_MODE
654 /* Freeing buffer storage addresses in 2BUFF mode. */
655 for (i = 0; i < config->rx_ring_num; i++) {
656 blk_cnt =
657 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
1da177e4
LT
658 for (j = 0; j < blk_cnt; j++) {
659 int k = 0;
20346722
K
660 if (!mac_control->rings[i].ba[j])
661 continue;
1da177e4 662 while (k != MAX_RXDS_PER_BLOCK) {
20346722 663 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
1da177e4
LT
664 kfree(ba->ba_0_org);
665 kfree(ba->ba_1_org);
666 k++;
667 }
20346722 668 kfree(mac_control->rings[i].ba[j]);
1da177e4 669 }
20346722
K
670 if (mac_control->rings[i].ba)
671 kfree(mac_control->rings[i].ba);
1da177e4 672 }
1da177e4
LT
673#endif
674
675 if (mac_control->stats_mem) {
676 pci_free_consistent(nic->pdev,
677 mac_control->stats_mem_sz,
678 mac_control->stats_mem,
679 mac_control->stats_mem_phy);
680 }
681}
682
541ae68f
K
683/**
684 * s2io_verify_pci_mode -
685 */
686
687static int s2io_verify_pci_mode(nic_t *nic)
688{
689 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
690 register u64 val64 = 0;
691 int mode;
692
693 val64 = readq(&bar0->pci_mode);
694 mode = (u8)GET_PCI_MODE(val64);
695
696 if ( val64 & PCI_MODE_UNKNOWN_MODE)
697 return -1; /* Unknown PCI mode */
698 return mode;
699}
700
701
702/**
703 * s2io_print_pci_mode -
704 */
705static int s2io_print_pci_mode(nic_t *nic)
706{
707 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
708 register u64 val64 = 0;
709 int mode;
710 struct config_param *config = &nic->config;
711
712 val64 = readq(&bar0->pci_mode);
713 mode = (u8)GET_PCI_MODE(val64);
714
715 if ( val64 & PCI_MODE_UNKNOWN_MODE)
716 return -1; /* Unknown PCI mode */
717
718 if (val64 & PCI_MODE_32_BITS) {
719 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
720 } else {
721 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
722 }
723
724 switch(mode) {
725 case PCI_MODE_PCI_33:
726 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
727 config->bus_speed = 33;
728 break;
729 case PCI_MODE_PCI_66:
730 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
731 config->bus_speed = 133;
732 break;
733 case PCI_MODE_PCIX_M1_66:
734 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
735 config->bus_speed = 133; /* Herc doubles the clock rate */
736 break;
737 case PCI_MODE_PCIX_M1_100:
738 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
739 config->bus_speed = 200;
740 break;
741 case PCI_MODE_PCIX_M1_133:
742 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
743 config->bus_speed = 266;
744 break;
745 case PCI_MODE_PCIX_M2_66:
746 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
747 config->bus_speed = 133;
748 break;
749 case PCI_MODE_PCIX_M2_100:
750 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
751 config->bus_speed = 200;
752 break;
753 case PCI_MODE_PCIX_M2_133:
754 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
755 config->bus_speed = 266;
756 break;
757 default:
758 return -1; /* Unsupported bus speed */
759 }
760
761 return mode;
762}
763
20346722
K
764/**
765 * init_nic - Initialization of hardware
1da177e4 766 * @nic: device peivate variable
20346722
K
767 * Description: The function sequentially configures every block
768 * of the H/W from their reset values.
769 * Return Value: SUCCESS on success and
1da177e4
LT
770 * '-1' on failure (endian settings incorrect).
771 */
772
773static int init_nic(struct s2io_nic *nic)
774{
775 XENA_dev_config_t __iomem *bar0 = nic->bar0;
776 struct net_device *dev = nic->dev;
777 register u64 val64 = 0;
778 void __iomem *add;
779 u32 time;
780 int i, j;
781 mac_info_t *mac_control;
782 struct config_param *config;
783 int mdio_cnt = 0, dtx_cnt = 0;
784 unsigned long long mem_share;
20346722 785 int mem_size;
1da177e4
LT
786
787 mac_control = &nic->mac_control;
788 config = &nic->config;
789
5e25b9dd 790 /* to set the swapper controle on the card */
20346722 791 if(s2io_set_swapper(nic)) {
1da177e4
LT
792 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
793 return -1;
794 }
795
541ae68f
K
796 /*
797 * Herc requires EOI to be removed from reset before XGXS, so..
798 */
799 if (nic->device_type & XFRAME_II_DEVICE) {
800 val64 = 0xA500000000ULL;
801 writeq(val64, &bar0->sw_reset);
802 msleep(500);
803 val64 = readq(&bar0->sw_reset);
804 }
805
1da177e4
LT
806 /* Remove XGXS from reset state */
807 val64 = 0;
808 writeq(val64, &bar0->sw_reset);
1da177e4 809 msleep(500);
20346722 810 val64 = readq(&bar0->sw_reset);
1da177e4
LT
811
812 /* Enable Receiving broadcasts */
813 add = &bar0->mac_cfg;
814 val64 = readq(&bar0->mac_cfg);
815 val64 |= MAC_RMAC_BCAST_ENABLE;
816 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
817 writel((u32) val64, add);
818 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
819 writel((u32) (val64 >> 32), (add + 4));
820
821 /* Read registers in all blocks */
822 val64 = readq(&bar0->mac_int_mask);
823 val64 = readq(&bar0->mc_int_mask);
824 val64 = readq(&bar0->xgxs_int_mask);
825
826 /* Set MTU */
827 val64 = dev->mtu;
828 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
829
20346722
K
830 /*
831 * Configuring the XAUI Interface of Xena.
1da177e4 832 * ***************************************
20346722
K
833 * To Configure the Xena's XAUI, one has to write a series
834 * of 64 bit values into two registers in a particular
835 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
836 * which will be defined in the array of configuration values
541ae68f 837 * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
20346722 838 * to switch writing from one regsiter to another. We continue
1da177e4 839 * writing these values until we encounter the 'END_SIGN' macro.
20346722
K
840 * For example, After making a series of 21 writes into
841 * dtx_control register the 'SWITCH_SIGN' appears and hence we
1da177e4
LT
842 * start writing into mdio_control until we encounter END_SIGN.
843 */
541ae68f
K
844 if (nic->device_type & XFRAME_II_DEVICE) {
845 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
303bcb4b 846 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1da177e4 847 &bar0->dtx_control, UF);
541ae68f
K
848 if (dtx_cnt & 0x1)
849 msleep(1); /* Necessary!! */
1da177e4
LT
850 dtx_cnt++;
851 }
541ae68f
K
852 } else {
853 while (1) {
854 dtx_cfg:
855 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
856 if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
857 dtx_cnt++;
858 goto mdio_cfg;
859 }
860 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
861 &bar0->dtx_control, UF);
862 val64 = readq(&bar0->dtx_control);
863 dtx_cnt++;
864 }
865 mdio_cfg:
866 while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
867 if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
868 mdio_cnt++;
869 goto dtx_cfg;
870 }
871 SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
872 &bar0->mdio_control, UF);
873 val64 = readq(&bar0->mdio_control);
1da177e4 874 mdio_cnt++;
541ae68f
K
875 }
876 if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
877 (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
878 break;
879 } else {
1da177e4
LT
880 goto dtx_cfg;
881 }
1da177e4
LT
882 }
883 }
884
885 /* Tx DMA Initialization */
886 val64 = 0;
887 writeq(val64, &bar0->tx_fifo_partition_0);
888 writeq(val64, &bar0->tx_fifo_partition_1);
889 writeq(val64, &bar0->tx_fifo_partition_2);
890 writeq(val64, &bar0->tx_fifo_partition_3);
891
892
893 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
894 val64 |=
895 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
896 13) | vBIT(config->tx_cfg[i].fifo_priority,
897 ((i * 32) + 5), 3);
898
899 if (i == (config->tx_fifo_num - 1)) {
900 if (i % 2 == 0)
901 i++;
902 }
903
904 switch (i) {
905 case 1:
906 writeq(val64, &bar0->tx_fifo_partition_0);
907 val64 = 0;
908 break;
909 case 3:
910 writeq(val64, &bar0->tx_fifo_partition_1);
911 val64 = 0;
912 break;
913 case 5:
914 writeq(val64, &bar0->tx_fifo_partition_2);
915 val64 = 0;
916 break;
917 case 7:
918 writeq(val64, &bar0->tx_fifo_partition_3);
919 break;
920 }
921 }
922
923 /* Enable Tx FIFO partition 0. */
924 val64 = readq(&bar0->tx_fifo_partition_0);
925 val64 |= BIT(0); /* To enable the FIFO partition. */
926 writeq(val64, &bar0->tx_fifo_partition_0);
927
5e25b9dd
K
928 /*
929 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
930 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
931 */
541ae68f
K
932 if ((nic->device_type == XFRAME_I_DEVICE) &&
933 (get_xena_rev_id(nic->pdev) < 4))
5e25b9dd
K
934 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
935
1da177e4
LT
936 val64 = readq(&bar0->tx_fifo_partition_0);
937 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
938 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
939
20346722
K
940 /*
941 * Initialization of Tx_PA_CONFIG register to ignore packet
1da177e4
LT
942 * integrity checking.
943 */
944 val64 = readq(&bar0->tx_pa_cfg);
945 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
946 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
947 writeq(val64, &bar0->tx_pa_cfg);
948
949 /* Rx DMA intialization. */
950 val64 = 0;
951 for (i = 0; i < config->rx_ring_num; i++) {
952 val64 |=
953 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
954 3);
955 }
956 writeq(val64, &bar0->rx_queue_priority);
957
20346722
K
958 /*
959 * Allocating equal share of memory to all the
1da177e4
LT
960 * configured Rings.
961 */
962 val64 = 0;
541ae68f
K
963 if (nic->device_type & XFRAME_II_DEVICE)
964 mem_size = 32;
965 else
966 mem_size = 64;
967
1da177e4
LT
968 for (i = 0; i < config->rx_ring_num; i++) {
969 switch (i) {
970 case 0:
20346722
K
971 mem_share = (mem_size / config->rx_ring_num +
972 mem_size % config->rx_ring_num);
1da177e4
LT
973 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
974 continue;
975 case 1:
20346722 976 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
977 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
978 continue;
979 case 2:
20346722 980 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
981 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
982 continue;
983 case 3:
20346722 984 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
985 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
986 continue;
987 case 4:
20346722 988 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
989 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
990 continue;
991 case 5:
20346722 992 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
993 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
994 continue;
995 case 6:
20346722 996 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
997 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
998 continue;
999 case 7:
20346722 1000 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1001 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1002 continue;
1003 }
1004 }
1005 writeq(val64, &bar0->rx_queue_cfg);
1006
20346722 1007 /*
5e25b9dd
K
1008 * Filling Tx round robin registers
1009 * as per the number of FIFOs
1da177e4 1010 */
5e25b9dd
K
1011 switch (config->tx_fifo_num) {
1012 case 1:
1013 val64 = 0x0000000000000000ULL;
1014 writeq(val64, &bar0->tx_w_round_robin_0);
1015 writeq(val64, &bar0->tx_w_round_robin_1);
1016 writeq(val64, &bar0->tx_w_round_robin_2);
1017 writeq(val64, &bar0->tx_w_round_robin_3);
1018 writeq(val64, &bar0->tx_w_round_robin_4);
1019 break;
1020 case 2:
1021 val64 = 0x0000010000010000ULL;
1022 writeq(val64, &bar0->tx_w_round_robin_0);
1023 val64 = 0x0100000100000100ULL;
1024 writeq(val64, &bar0->tx_w_round_robin_1);
1025 val64 = 0x0001000001000001ULL;
1026 writeq(val64, &bar0->tx_w_round_robin_2);
1027 val64 = 0x0000010000010000ULL;
1028 writeq(val64, &bar0->tx_w_round_robin_3);
1029 val64 = 0x0100000000000000ULL;
1030 writeq(val64, &bar0->tx_w_round_robin_4);
1031 break;
1032 case 3:
1033 val64 = 0x0001000102000001ULL;
1034 writeq(val64, &bar0->tx_w_round_robin_0);
1035 val64 = 0x0001020000010001ULL;
1036 writeq(val64, &bar0->tx_w_round_robin_1);
1037 val64 = 0x0200000100010200ULL;
1038 writeq(val64, &bar0->tx_w_round_robin_2);
1039 val64 = 0x0001000102000001ULL;
1040 writeq(val64, &bar0->tx_w_round_robin_3);
1041 val64 = 0x0001020000000000ULL;
1042 writeq(val64, &bar0->tx_w_round_robin_4);
1043 break;
1044 case 4:
1045 val64 = 0x0001020300010200ULL;
1046 writeq(val64, &bar0->tx_w_round_robin_0);
1047 val64 = 0x0100000102030001ULL;
1048 writeq(val64, &bar0->tx_w_round_robin_1);
1049 val64 = 0x0200010000010203ULL;
1050 writeq(val64, &bar0->tx_w_round_robin_2);
1051 val64 = 0x0001020001000001ULL;
1052 writeq(val64, &bar0->tx_w_round_robin_3);
1053 val64 = 0x0203000100000000ULL;
1054 writeq(val64, &bar0->tx_w_round_robin_4);
1055 break;
1056 case 5:
1057 val64 = 0x0001000203000102ULL;
1058 writeq(val64, &bar0->tx_w_round_robin_0);
1059 val64 = 0x0001020001030004ULL;
1060 writeq(val64, &bar0->tx_w_round_robin_1);
1061 val64 = 0x0001000203000102ULL;
1062 writeq(val64, &bar0->tx_w_round_robin_2);
1063 val64 = 0x0001020001030004ULL;
1064 writeq(val64, &bar0->tx_w_round_robin_3);
1065 val64 = 0x0001000000000000ULL;
1066 writeq(val64, &bar0->tx_w_round_robin_4);
1067 break;
1068 case 6:
1069 val64 = 0x0001020304000102ULL;
1070 writeq(val64, &bar0->tx_w_round_robin_0);
1071 val64 = 0x0304050001020001ULL;
1072 writeq(val64, &bar0->tx_w_round_robin_1);
1073 val64 = 0x0203000100000102ULL;
1074 writeq(val64, &bar0->tx_w_round_robin_2);
1075 val64 = 0x0304000102030405ULL;
1076 writeq(val64, &bar0->tx_w_round_robin_3);
1077 val64 = 0x0001000200000000ULL;
1078 writeq(val64, &bar0->tx_w_round_robin_4);
1079 break;
1080 case 7:
1081 val64 = 0x0001020001020300ULL;
1082 writeq(val64, &bar0->tx_w_round_robin_0);
1083 val64 = 0x0102030400010203ULL;
1084 writeq(val64, &bar0->tx_w_round_robin_1);
1085 val64 = 0x0405060001020001ULL;
1086 writeq(val64, &bar0->tx_w_round_robin_2);
1087 val64 = 0x0304050000010200ULL;
1088 writeq(val64, &bar0->tx_w_round_robin_3);
1089 val64 = 0x0102030000000000ULL;
1090 writeq(val64, &bar0->tx_w_round_robin_4);
1091 break;
1092 case 8:
1093 val64 = 0x0001020300040105ULL;
1094 writeq(val64, &bar0->tx_w_round_robin_0);
1095 val64 = 0x0200030106000204ULL;
1096 writeq(val64, &bar0->tx_w_round_robin_1);
1097 val64 = 0x0103000502010007ULL;
1098 writeq(val64, &bar0->tx_w_round_robin_2);
1099 val64 = 0x0304010002060500ULL;
1100 writeq(val64, &bar0->tx_w_round_robin_3);
1101 val64 = 0x0103020400000000ULL;
1102 writeq(val64, &bar0->tx_w_round_robin_4);
1103 break;
1104 }
1105
1106 /* Filling the Rx round robin registers as per the
1107 * number of Rings and steering based on QoS.
1108 */
1109 switch (config->rx_ring_num) {
1110 case 1:
1111 val64 = 0x8080808080808080ULL;
1112 writeq(val64, &bar0->rts_qos_steering);
1113 break;
1114 case 2:
1115 val64 = 0x0000010000010000ULL;
1116 writeq(val64, &bar0->rx_w_round_robin_0);
1117 val64 = 0x0100000100000100ULL;
1118 writeq(val64, &bar0->rx_w_round_robin_1);
1119 val64 = 0x0001000001000001ULL;
1120 writeq(val64, &bar0->rx_w_round_robin_2);
1121 val64 = 0x0000010000010000ULL;
1122 writeq(val64, &bar0->rx_w_round_robin_3);
1123 val64 = 0x0100000000000000ULL;
1124 writeq(val64, &bar0->rx_w_round_robin_4);
1125
1126 val64 = 0x8080808040404040ULL;
1127 writeq(val64, &bar0->rts_qos_steering);
1128 break;
1129 case 3:
1130 val64 = 0x0001000102000001ULL;
1131 writeq(val64, &bar0->rx_w_round_robin_0);
1132 val64 = 0x0001020000010001ULL;
1133 writeq(val64, &bar0->rx_w_round_robin_1);
1134 val64 = 0x0200000100010200ULL;
1135 writeq(val64, &bar0->rx_w_round_robin_2);
1136 val64 = 0x0001000102000001ULL;
1137 writeq(val64, &bar0->rx_w_round_robin_3);
1138 val64 = 0x0001020000000000ULL;
1139 writeq(val64, &bar0->rx_w_round_robin_4);
1140
1141 val64 = 0x8080804040402020ULL;
1142 writeq(val64, &bar0->rts_qos_steering);
1143 break;
1144 case 4:
1145 val64 = 0x0001020300010200ULL;
1146 writeq(val64, &bar0->rx_w_round_robin_0);
1147 val64 = 0x0100000102030001ULL;
1148 writeq(val64, &bar0->rx_w_round_robin_1);
1149 val64 = 0x0200010000010203ULL;
1150 writeq(val64, &bar0->rx_w_round_robin_2);
1151 val64 = 0x0001020001000001ULL;
1152 writeq(val64, &bar0->rx_w_round_robin_3);
1153 val64 = 0x0203000100000000ULL;
1154 writeq(val64, &bar0->rx_w_round_robin_4);
1155
1156 val64 = 0x8080404020201010ULL;
1157 writeq(val64, &bar0->rts_qos_steering);
1158 break;
1159 case 5:
1160 val64 = 0x0001000203000102ULL;
1161 writeq(val64, &bar0->rx_w_round_robin_0);
1162 val64 = 0x0001020001030004ULL;
1163 writeq(val64, &bar0->rx_w_round_robin_1);
1164 val64 = 0x0001000203000102ULL;
1165 writeq(val64, &bar0->rx_w_round_robin_2);
1166 val64 = 0x0001020001030004ULL;
1167 writeq(val64, &bar0->rx_w_round_robin_3);
1168 val64 = 0x0001000000000000ULL;
1169 writeq(val64, &bar0->rx_w_round_robin_4);
1170
1171 val64 = 0x8080404020201008ULL;
1172 writeq(val64, &bar0->rts_qos_steering);
1173 break;
1174 case 6:
1175 val64 = 0x0001020304000102ULL;
1176 writeq(val64, &bar0->rx_w_round_robin_0);
1177 val64 = 0x0304050001020001ULL;
1178 writeq(val64, &bar0->rx_w_round_robin_1);
1179 val64 = 0x0203000100000102ULL;
1180 writeq(val64, &bar0->rx_w_round_robin_2);
1181 val64 = 0x0304000102030405ULL;
1182 writeq(val64, &bar0->rx_w_round_robin_3);
1183 val64 = 0x0001000200000000ULL;
1184 writeq(val64, &bar0->rx_w_round_robin_4);
1185
1186 val64 = 0x8080404020100804ULL;
1187 writeq(val64, &bar0->rts_qos_steering);
1188 break;
1189 case 7:
1190 val64 = 0x0001020001020300ULL;
1191 writeq(val64, &bar0->rx_w_round_robin_0);
1192 val64 = 0x0102030400010203ULL;
1193 writeq(val64, &bar0->rx_w_round_robin_1);
1194 val64 = 0x0405060001020001ULL;
1195 writeq(val64, &bar0->rx_w_round_robin_2);
1196 val64 = 0x0304050000010200ULL;
1197 writeq(val64, &bar0->rx_w_round_robin_3);
1198 val64 = 0x0102030000000000ULL;
1199 writeq(val64, &bar0->rx_w_round_robin_4);
1200
1201 val64 = 0x8080402010080402ULL;
1202 writeq(val64, &bar0->rts_qos_steering);
1203 break;
1204 case 8:
1205 val64 = 0x0001020300040105ULL;
1206 writeq(val64, &bar0->rx_w_round_robin_0);
1207 val64 = 0x0200030106000204ULL;
1208 writeq(val64, &bar0->rx_w_round_robin_1);
1209 val64 = 0x0103000502010007ULL;
1210 writeq(val64, &bar0->rx_w_round_robin_2);
1211 val64 = 0x0304010002060500ULL;
1212 writeq(val64, &bar0->rx_w_round_robin_3);
1213 val64 = 0x0103020400000000ULL;
1214 writeq(val64, &bar0->rx_w_round_robin_4);
1215
1216 val64 = 0x8040201008040201ULL;
1217 writeq(val64, &bar0->rts_qos_steering);
1218 break;
1219 }
1da177e4
LT
1220
1221 /* UDP Fix */
1222 val64 = 0;
20346722 1223 for (i = 0; i < 8; i++)
1da177e4
LT
1224 writeq(val64, &bar0->rts_frm_len_n[i]);
1225
5e25b9dd
K
1226 /* Set the default rts frame length for the rings configured */
1227 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1228 for (i = 0 ; i < config->rx_ring_num ; i++)
1229 writeq(val64, &bar0->rts_frm_len_n[i]);
1230
1231 /* Set the frame length for the configured rings
1232 * desired by the user
1233 */
1234 for (i = 0; i < config->rx_ring_num; i++) {
1235 /* If rts_frm_len[i] == 0 then it is assumed that user not
1236 * specified frame length steering.
1237 * If the user provides the frame length then program
1238 * the rts_frm_len register for those values or else
1239 * leave it as it is.
1240 */
1241 if (rts_frm_len[i] != 0) {
1242 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1243 &bar0->rts_frm_len_n[i]);
1244 }
1245 }
1da177e4 1246
20346722 1247 /* Program statistics memory */
1da177e4 1248 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1da177e4 1249
541ae68f
K
1250 if (nic->device_type == XFRAME_II_DEVICE) {
1251 val64 = STAT_BC(0x320);
1252 writeq(val64, &bar0->stat_byte_cnt);
1253 }
1254
20346722 1255 /*
1da177e4
LT
1256 * Initializing the sampling rate for the device to calculate the
1257 * bandwidth utilization.
1258 */
1259 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1260 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1261 writeq(val64, &bar0->mac_link_util);
1262
1263
20346722
K
1264 /*
1265 * Initializing the Transmit and Receive Traffic Interrupt
1da177e4
LT
1266 * Scheme.
1267 */
20346722
K
1268 /*
1269 * TTI Initialization. Default Tx timer gets us about
1da177e4
LT
1270 * 250 interrupts per sec. Continuous interrupts are enabled
1271 * by default.
1272 */
541ae68f
K
1273 if (nic->device_type == XFRAME_II_DEVICE) {
1274 int count = (nic->config.bus_speed * 125)/2;
1275 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1276 } else {
1277
1278 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1279 }
1280 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1da177e4 1281 TTI_DATA1_MEM_TX_URNG_B(0x10) |
5e25b9dd 1282 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
541ae68f
K
1283 if (use_continuous_tx_intrs)
1284 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1da177e4
LT
1285 writeq(val64, &bar0->tti_data1_mem);
1286
1287 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1288 TTI_DATA2_MEM_TX_UFC_B(0x20) |
5e25b9dd 1289 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1da177e4
LT
1290 writeq(val64, &bar0->tti_data2_mem);
1291
1292 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1293 writeq(val64, &bar0->tti_command_mem);
1294
20346722 1295 /*
1da177e4
LT
1296 * Once the operation completes, the Strobe bit of the command
1297 * register will be reset. We poll for this particular condition
1298 * We wait for a maximum of 500ms for the operation to complete,
1299 * if it's not complete by then we return error.
1300 */
1301 time = 0;
1302 while (TRUE) {
1303 val64 = readq(&bar0->tti_command_mem);
1304 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1305 break;
1306 }
1307 if (time > 10) {
1308 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1309 dev->name);
1310 return -1;
1311 }
1312 msleep(50);
1313 time++;
1314 }
1315
b6e3f982
K
1316 if (nic->config.bimodal) {
1317 int k = 0;
1318 for (k = 0; k < config->rx_ring_num; k++) {
1319 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1320 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1321 writeq(val64, &bar0->tti_command_mem);
541ae68f 1322
541ae68f 1323 /*
b6e3f982
K
1324 * Once the operation completes, the Strobe bit of the command
1325 * register will be reset. We poll for this particular condition
1326 * We wait for a maximum of 500ms for the operation to complete,
1327 * if it's not complete by then we return error.
1328 */
1329 time = 0;
1330 while (TRUE) {
1331 val64 = readq(&bar0->tti_command_mem);
1332 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1333 break;
1334 }
1335 if (time > 10) {
1336 DBG_PRINT(ERR_DBG,
1337 "%s: TTI init Failed\n",
1338 dev->name);
1339 return -1;
1340 }
1341 time++;
1342 msleep(50);
1343 }
1344 }
541ae68f 1345 } else {
1da177e4 1346
b6e3f982
K
1347 /* RTI Initialization */
1348 if (nic->device_type == XFRAME_II_DEVICE) {
1349 /*
1350 * Programmed to generate Apprx 500 Intrs per
1351 * second
1352 */
1353 int count = (nic->config.bus_speed * 125)/4;
1354 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1355 } else {
1356 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1357 }
1358 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1359 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1360 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1da177e4 1361
b6e3f982 1362 writeq(val64, &bar0->rti_data1_mem);
1da177e4 1363
b6e3f982
K
1364 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1365 RTI_DATA2_MEM_RX_UFC_B(0x2) |
1366 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1367 writeq(val64, &bar0->rti_data2_mem);
1da177e4 1368
b6e3f982
K
1369 for (i = 0; i < config->rx_ring_num; i++) {
1370 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1371 | RTI_CMD_MEM_OFFSET(i);
1372 writeq(val64, &bar0->rti_command_mem);
1373
1374 /*
1375 * Once the operation completes, the Strobe bit of the
1376 * command register will be reset. We poll for this
1377 * particular condition. We wait for a maximum of 500ms
1378 * for the operation to complete, if it's not complete
1379 * by then we return error.
1380 */
1381 time = 0;
1382 while (TRUE) {
1383 val64 = readq(&bar0->rti_command_mem);
1384 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1385 break;
1386 }
1387 if (time > 10) {
1388 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1389 dev->name);
1390 return -1;
1391 }
1392 time++;
1393 msleep(50);
1394 }
1da177e4 1395 }
1da177e4
LT
1396 }
1397
20346722
K
1398 /*
1399 * Initializing proper values as Pause threshold into all
1da177e4
LT
1400 * the 8 Queues on Rx side.
1401 */
1402 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1403 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1404
1405 /* Disable RMAC PAD STRIPPING */
20346722 1406 add = (void *) &bar0->mac_cfg;
1da177e4
LT
1407 val64 = readq(&bar0->mac_cfg);
1408 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1409 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1410 writel((u32) (val64), add);
1411 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1412 writel((u32) (val64 >> 32), (add + 4));
1413 val64 = readq(&bar0->mac_cfg);
1414
20346722
K
1415 /*
1416 * Set the time value to be inserted in the pause frame
1da177e4
LT
1417 * generated by xena.
1418 */
1419 val64 = readq(&bar0->rmac_pause_cfg);
1420 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1421 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1422 writeq(val64, &bar0->rmac_pause_cfg);
1423
20346722 1424 /*
1da177e4
LT
1425 * Set the Threshold Limit for Generating the pause frame
1426 * If the amount of data in any Queue exceeds ratio of
1427 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1428 * pause frame is generated
1429 */
1430 val64 = 0;
1431 for (i = 0; i < 4; i++) {
1432 val64 |=
1433 (((u64) 0xFF00 | nic->mac_control.
1434 mc_pause_threshold_q0q3)
1435 << (i * 2 * 8));
1436 }
1437 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1438
1439 val64 = 0;
1440 for (i = 0; i < 4; i++) {
1441 val64 |=
1442 (((u64) 0xFF00 | nic->mac_control.
1443 mc_pause_threshold_q4q7)
1444 << (i * 2 * 8));
1445 }
1446 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1447
20346722
K
1448 /*
1449 * TxDMA will stop Read request if the number of read split has
1da177e4
LT
1450 * exceeded the limit pointed by shared_splits
1451 */
1452 val64 = readq(&bar0->pic_control);
1453 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1454 writeq(val64, &bar0->pic_control);
1455
541ae68f
K
1456 /*
1457 * Programming the Herc to split every write transaction
1458 * that does not start on an ADB to reduce disconnects.
1459 */
1460 if (nic->device_type == XFRAME_II_DEVICE) {
1461 val64 = WREQ_SPLIT_MASK_SET_MASK(255);
1462 writeq(val64, &bar0->wreq_split_mask);
1463 }
1464
a371a07d
K
1465 /* Setting Link stability period to 64 ms */
1466 if (nic->device_type == XFRAME_II_DEVICE) {
1467 val64 = MISC_LINK_STABILITY_PRD(3);
1468 writeq(val64, &bar0->misc_control);
1469 }
1470
1da177e4
LT
1471 return SUCCESS;
1472}
a371a07d
K
1473#define LINK_UP_DOWN_INTERRUPT 1
1474#define MAC_RMAC_ERR_TIMER 2
1475
1476#if defined(CONFIG_MSI_MODE) || defined(CONFIG_MSIX_MODE)
1477#define s2io_link_fault_indication(x) MAC_RMAC_ERR_TIMER
1478#else
1479int s2io_link_fault_indication(nic_t *nic)
1480{
1481 if (nic->device_type == XFRAME_II_DEVICE)
1482 return LINK_UP_DOWN_INTERRUPT;
1483 else
1484 return MAC_RMAC_ERR_TIMER;
1485}
1486#endif
1da177e4 1487
20346722
K
1488/**
1489 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1da177e4
LT
1490 * @nic: device private variable,
1491 * @mask: A mask indicating which Intr block must be modified and,
1492 * @flag: A flag indicating whether to enable or disable the Intrs.
1493 * Description: This function will either disable or enable the interrupts
20346722
K
1494 * depending on the flag argument. The mask argument can be used to
1495 * enable/disable any Intr block.
1da177e4
LT
1496 * Return Value: NONE.
1497 */
1498
1499static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1500{
1501 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1502 register u64 val64 = 0, temp64 = 0;
1503
1504 /* Top level interrupt classification */
1505 /* PIC Interrupts */
1506 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1507 /* Enable PIC Intrs in the general intr mask register */
1508 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1509 if (flag == ENABLE_INTRS) {
1510 temp64 = readq(&bar0->general_int_mask);
1511 temp64 &= ~((u64) val64);
1512 writeq(temp64, &bar0->general_int_mask);
20346722 1513 /*
a371a07d
K
1514 * If Hercules adapter enable GPIO otherwise
1515 * disabled all PCIX, Flash, MDIO, IIC and GPIO
20346722
K
1516 * interrupts for now.
1517 * TODO
1da177e4 1518 */
a371a07d
K
1519 if (s2io_link_fault_indication(nic) ==
1520 LINK_UP_DOWN_INTERRUPT ) {
1521 temp64 = readq(&bar0->pic_int_mask);
1522 temp64 &= ~((u64) PIC_INT_GPIO);
1523 writeq(temp64, &bar0->pic_int_mask);
1524 temp64 = readq(&bar0->gpio_int_mask);
1525 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1526 writeq(temp64, &bar0->gpio_int_mask);
1527 } else {
1528 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1529 }
20346722 1530 /*
1da177e4
LT
1531 * No MSI Support is available presently, so TTI and
1532 * RTI interrupts are also disabled.
1533 */
1534 } else if (flag == DISABLE_INTRS) {
20346722
K
1535 /*
1536 * Disable PIC Intrs in the general
1537 * intr mask register
1da177e4
LT
1538 */
1539 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1540 temp64 = readq(&bar0->general_int_mask);
1541 val64 |= temp64;
1542 writeq(val64, &bar0->general_int_mask);
1543 }
1544 }
1545
1546 /* DMA Interrupts */
1547 /* Enabling/Disabling Tx DMA interrupts */
1548 if (mask & TX_DMA_INTR) {
1549 /* Enable TxDMA Intrs in the general intr mask register */
1550 val64 = TXDMA_INT_M;
1551 if (flag == ENABLE_INTRS) {
1552 temp64 = readq(&bar0->general_int_mask);
1553 temp64 &= ~((u64) val64);
1554 writeq(temp64, &bar0->general_int_mask);
20346722
K
1555 /*
1556 * Keep all interrupts other than PFC interrupt
1da177e4
LT
1557 * and PCC interrupt disabled in DMA level.
1558 */
1559 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1560 TXDMA_PCC_INT_M);
1561 writeq(val64, &bar0->txdma_int_mask);
20346722
K
1562 /*
1563 * Enable only the MISC error 1 interrupt in PFC block
1da177e4
LT
1564 */
1565 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1566 writeq(val64, &bar0->pfc_err_mask);
20346722
K
1567 /*
1568 * Enable only the FB_ECC error interrupt in PCC block
1da177e4
LT
1569 */
1570 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1571 writeq(val64, &bar0->pcc_err_mask);
1572 } else if (flag == DISABLE_INTRS) {
20346722
K
1573 /*
1574 * Disable TxDMA Intrs in the general intr mask
1575 * register
1da177e4
LT
1576 */
1577 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1578 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1579 temp64 = readq(&bar0->general_int_mask);
1580 val64 |= temp64;
1581 writeq(val64, &bar0->general_int_mask);
1582 }
1583 }
1584
1585 /* Enabling/Disabling Rx DMA interrupts */
1586 if (mask & RX_DMA_INTR) {
1587 /* Enable RxDMA Intrs in the general intr mask register */
1588 val64 = RXDMA_INT_M;
1589 if (flag == ENABLE_INTRS) {
1590 temp64 = readq(&bar0->general_int_mask);
1591 temp64 &= ~((u64) val64);
1592 writeq(temp64, &bar0->general_int_mask);
20346722
K
1593 /*
1594 * All RxDMA block interrupts are disabled for now
1595 * TODO
1da177e4
LT
1596 */
1597 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1598 } else if (flag == DISABLE_INTRS) {
20346722
K
1599 /*
1600 * Disable RxDMA Intrs in the general intr mask
1601 * register
1da177e4
LT
1602 */
1603 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1604 temp64 = readq(&bar0->general_int_mask);
1605 val64 |= temp64;
1606 writeq(val64, &bar0->general_int_mask);
1607 }
1608 }
1609
1610 /* MAC Interrupts */
1611 /* Enabling/Disabling MAC interrupts */
1612 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1613 val64 = TXMAC_INT_M | RXMAC_INT_M;
1614 if (flag == ENABLE_INTRS) {
1615 temp64 = readq(&bar0->general_int_mask);
1616 temp64 &= ~((u64) val64);
1617 writeq(temp64, &bar0->general_int_mask);
20346722
K
1618 /*
1619 * All MAC block error interrupts are disabled for now
1da177e4
LT
1620 * TODO
1621 */
1da177e4 1622 } else if (flag == DISABLE_INTRS) {
20346722
K
1623 /*
1624 * Disable MAC Intrs in the general intr mask register
1da177e4
LT
1625 */
1626 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1627 writeq(DISABLE_ALL_INTRS,
1628 &bar0->mac_rmac_err_mask);
1629
1630 temp64 = readq(&bar0->general_int_mask);
1631 val64 |= temp64;
1632 writeq(val64, &bar0->general_int_mask);
1633 }
1634 }
1635
1636 /* XGXS Interrupts */
1637 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1638 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1639 if (flag == ENABLE_INTRS) {
1640 temp64 = readq(&bar0->general_int_mask);
1641 temp64 &= ~((u64) val64);
1642 writeq(temp64, &bar0->general_int_mask);
20346722 1643 /*
1da177e4 1644 * All XGXS block error interrupts are disabled for now
20346722 1645 * TODO
1da177e4
LT
1646 */
1647 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1648 } else if (flag == DISABLE_INTRS) {
20346722
K
1649 /*
1650 * Disable MC Intrs in the general intr mask register
1da177e4
LT
1651 */
1652 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1653 temp64 = readq(&bar0->general_int_mask);
1654 val64 |= temp64;
1655 writeq(val64, &bar0->general_int_mask);
1656 }
1657 }
1658
1659 /* Memory Controller(MC) interrupts */
1660 if (mask & MC_INTR) {
1661 val64 = MC_INT_M;
1662 if (flag == ENABLE_INTRS) {
1663 temp64 = readq(&bar0->general_int_mask);
1664 temp64 &= ~((u64) val64);
1665 writeq(temp64, &bar0->general_int_mask);
20346722 1666 /*
5e25b9dd 1667 * Enable all MC Intrs.
1da177e4 1668 */
5e25b9dd
K
1669 writeq(0x0, &bar0->mc_int_mask);
1670 writeq(0x0, &bar0->mc_err_mask);
1da177e4
LT
1671 } else if (flag == DISABLE_INTRS) {
1672 /*
1673 * Disable MC Intrs in the general intr mask register
1674 */
1675 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1676 temp64 = readq(&bar0->general_int_mask);
1677 val64 |= temp64;
1678 writeq(val64, &bar0->general_int_mask);
1679 }
1680 }
1681
1682
1683 /* Tx traffic interrupts */
1684 if (mask & TX_TRAFFIC_INTR) {
1685 val64 = TXTRAFFIC_INT_M;
1686 if (flag == ENABLE_INTRS) {
1687 temp64 = readq(&bar0->general_int_mask);
1688 temp64 &= ~((u64) val64);
1689 writeq(temp64, &bar0->general_int_mask);
20346722 1690 /*
1da177e4 1691 * Enable all the Tx side interrupts
20346722 1692 * writing 0 Enables all 64 TX interrupt levels
1da177e4
LT
1693 */
1694 writeq(0x0, &bar0->tx_traffic_mask);
1695 } else if (flag == DISABLE_INTRS) {
20346722
K
1696 /*
1697 * Disable Tx Traffic Intrs in the general intr mask
1da177e4
LT
1698 * register.
1699 */
1700 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1701 temp64 = readq(&bar0->general_int_mask);
1702 val64 |= temp64;
1703 writeq(val64, &bar0->general_int_mask);
1704 }
1705 }
1706
1707 /* Rx traffic interrupts */
1708 if (mask & RX_TRAFFIC_INTR) {
1709 val64 = RXTRAFFIC_INT_M;
1710 if (flag == ENABLE_INTRS) {
1711 temp64 = readq(&bar0->general_int_mask);
1712 temp64 &= ~((u64) val64);
1713 writeq(temp64, &bar0->general_int_mask);
1714 /* writing 0 Enables all 8 RX interrupt levels */
1715 writeq(0x0, &bar0->rx_traffic_mask);
1716 } else if (flag == DISABLE_INTRS) {
20346722
K
1717 /*
1718 * Disable Rx Traffic Intrs in the general intr mask
1da177e4
LT
1719 * register.
1720 */
1721 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1722 temp64 = readq(&bar0->general_int_mask);
1723 val64 |= temp64;
1724 writeq(val64, &bar0->general_int_mask);
1725 }
1726 }
1727}
1728
541ae68f 1729static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
20346722
K
1730{
1731 int ret = 0;
1732
1733 if (flag == FALSE) {
541ae68f 1734 if ((!herc && (rev_id >= 4)) || herc) {
5e25b9dd
K
1735 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1736 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1737 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1738 ret = 1;
1739 }
541ae68f 1740 }else {
5e25b9dd
K
1741 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1742 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1743 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1744 ret = 1;
1745 }
20346722
K
1746 }
1747 } else {
541ae68f 1748 if ((!herc && (rev_id >= 4)) || herc) {
5e25b9dd
K
1749 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1750 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1751 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1752 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1753 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1754 ret = 1;
1755 }
1756 } else {
1757 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1758 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1759 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1760 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1761 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1762 ret = 1;
1763 }
20346722
K
1764 }
1765 }
1766
1767 return ret;
1768}
1769/**
1770 * verify_xena_quiescence - Checks whether the H/W is ready
1da177e4
LT
1771 * @val64 : Value read from adapter status register.
1772 * @flag : indicates if the adapter enable bit was ever written once
1773 * before.
1774 * Description: Returns whether the H/W is ready to go or not. Depending
20346722 1775 * on whether adapter enable bit was written or not the comparison
1da177e4
LT
1776 * differs and the calling function passes the input argument flag to
1777 * indicate this.
20346722 1778 * Return: 1 If xena is quiescence
1da177e4
LT
1779 * 0 If Xena is not quiescence
1780 */
1781
20346722 1782static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1da177e4 1783{
541ae68f 1784 int ret = 0, herc;
1da177e4 1785 u64 tmp64 = ~((u64) val64);
5e25b9dd 1786 int rev_id = get_xena_rev_id(sp->pdev);
1da177e4 1787
541ae68f 1788 herc = (sp->device_type == XFRAME_II_DEVICE);
1da177e4
LT
1789 if (!
1790 (tmp64 &
1791 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1792 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1793 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1794 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1795 ADAPTER_STATUS_P_PLL_LOCK))) {
541ae68f 1796 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1da177e4
LT
1797 }
1798
1799 return ret;
1800}
1801
1802/**
1803 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1804 * @sp: Pointer to device specifc structure
20346722 1805 * Description :
1da177e4
LT
1806 * New procedure to clear mac address reading problems on Alpha platforms
1807 *
1808 */
1809
20346722 1810void fix_mac_address(nic_t * sp)
1da177e4
LT
1811{
1812 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1813 u64 val64;
1814 int i = 0;
1815
1816 while (fix_mac[i] != END_SIGN) {
1817 writeq(fix_mac[i++], &bar0->gpio_control);
20346722 1818 udelay(10);
1da177e4
LT
1819 val64 = readq(&bar0->gpio_control);
1820 }
1821}
1822
1823/**
20346722 1824 * start_nic - Turns the device on
1da177e4 1825 * @nic : device private variable.
20346722
K
1826 * Description:
1827 * This function actually turns the device on. Before this function is
1828 * called,all Registers are configured from their reset states
1829 * and shared memory is allocated but the NIC is still quiescent. On
1da177e4
LT
1830 * calling this function, the device interrupts are cleared and the NIC is
1831 * literally switched on by writing into the adapter control register.
20346722 1832 * Return Value:
1da177e4
LT
1833 * SUCCESS on success and -1 on failure.
1834 */
1835
1836static int start_nic(struct s2io_nic *nic)
1837{
1838 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1839 struct net_device *dev = nic->dev;
1840 register u64 val64 = 0;
20346722
K
1841 u16 interruptible;
1842 u16 subid, i;
1da177e4
LT
1843 mac_info_t *mac_control;
1844 struct config_param *config;
1845
1846 mac_control = &nic->mac_control;
1847 config = &nic->config;
1848
1849 /* PRC Initialization and configuration */
1850 for (i = 0; i < config->rx_ring_num; i++) {
20346722 1851 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1da177e4
LT
1852 &bar0->prc_rxd0_n[i]);
1853
1854 val64 = readq(&bar0->prc_ctrl_n[i]);
b6e3f982
K
1855 if (nic->config.bimodal)
1856 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1da177e4
LT
1857#ifndef CONFIG_2BUFF_MODE
1858 val64 |= PRC_CTRL_RC_ENABLED;
1859#else
1860 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1861#endif
1862 writeq(val64, &bar0->prc_ctrl_n[i]);
1863 }
1864
1865#ifdef CONFIG_2BUFF_MODE
1866 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1867 val64 = readq(&bar0->rx_pa_cfg);
1868 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1869 writeq(val64, &bar0->rx_pa_cfg);
1870#endif
1871
20346722 1872 /*
1da177e4
LT
1873 * Enabling MC-RLDRAM. After enabling the device, we timeout
1874 * for around 100ms, which is approximately the time required
1875 * for the device to be ready for operation.
1876 */
1877 val64 = readq(&bar0->mc_rldram_mrs);
1878 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1879 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1880 val64 = readq(&bar0->mc_rldram_mrs);
1881
20346722 1882 msleep(100); /* Delay by around 100 ms. */
1da177e4
LT
1883
1884 /* Enabling ECC Protection. */
1885 val64 = readq(&bar0->adapter_control);
1886 val64 &= ~ADAPTER_ECC_EN;
1887 writeq(val64, &bar0->adapter_control);
1888
20346722
K
1889 /*
1890 * Clearing any possible Link state change interrupts that
1da177e4
LT
1891 * could have popped up just before Enabling the card.
1892 */
1893 val64 = readq(&bar0->mac_rmac_err_reg);
1894 if (val64)
1895 writeq(val64, &bar0->mac_rmac_err_reg);
1896
20346722
K
1897 /*
1898 * Verify if the device is ready to be enabled, if so enable
1da177e4
LT
1899 * it.
1900 */
1901 val64 = readq(&bar0->adapter_status);
20346722 1902 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1da177e4
LT
1903 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1904 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1905 (unsigned long long) val64);
1906 return FAILURE;
1907 }
1908
1909 /* Enable select interrupts */
e960fc5c 1910 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
a371a07d
K
1911 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
1912 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1913
1da177e4
LT
1914 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1915
20346722 1916 /*
1da177e4 1917 * With some switches, link might be already up at this point.
20346722
K
1918 * Because of this weird behavior, when we enable laser,
1919 * we may not get link. We need to handle this. We cannot
1920 * figure out which switch is misbehaving. So we are forced to
1921 * make a global change.
1da177e4
LT
1922 */
1923
1924 /* Enabling Laser. */
1925 val64 = readq(&bar0->adapter_control);
1926 val64 |= ADAPTER_EOI_TX_ON;
1927 writeq(val64, &bar0->adapter_control);
1928
1929 /* SXE-002: Initialize link and activity LED */
1930 subid = nic->pdev->subsystem_device;
541ae68f
K
1931 if (((subid & 0xFF) >= 0x07) &&
1932 (nic->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
1933 val64 = readq(&bar0->gpio_control);
1934 val64 |= 0x0000800000000000ULL;
1935 writeq(val64, &bar0->gpio_control);
1936 val64 = 0x0411040400000000ULL;
20346722 1937 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1da177e4
LT
1938 }
1939
20346722
K
1940 /*
1941 * Don't see link state interrupts on certain switches, so
1da177e4
LT
1942 * directly scheduling a link state task from here.
1943 */
1944 schedule_work(&nic->set_link_task);
1945
1da177e4
LT
1946 return SUCCESS;
1947}
1948
20346722
K
1949/**
1950 * free_tx_buffers - Free all queued Tx buffers
1da177e4 1951 * @nic : device private variable.
20346722 1952 * Description:
1da177e4 1953 * Free all queued Tx buffers.
20346722 1954 * Return Value: void
1da177e4
LT
1955*/
1956
1957static void free_tx_buffers(struct s2io_nic *nic)
1958{
1959 struct net_device *dev = nic->dev;
1960 struct sk_buff *skb;
1961 TxD_t *txdp;
1962 int i, j;
1963 mac_info_t *mac_control;
1964 struct config_param *config;
1ddc50d4 1965 int cnt = 0, frg_cnt;
1da177e4
LT
1966
1967 mac_control = &nic->mac_control;
1968 config = &nic->config;
1969
1970 for (i = 0; i < config->tx_fifo_num; i++) {
1971 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
20346722 1972 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1da177e4
LT
1973 list_virt_addr;
1974 skb =
1975 (struct sk_buff *) ((unsigned long) txdp->
1976 Host_Control);
1977 if (skb == NULL) {
1ddc50d4
K
1978 memset(txdp, 0, sizeof(TxD_t) *
1979 config->max_txds);
1da177e4
LT
1980 continue;
1981 }
1ddc50d4
K
1982 frg_cnt = skb_shinfo(skb)->nr_frags;
1983 pci_unmap_single(nic->pdev, (dma_addr_t)
1984 txdp->Buffer_Pointer,
1985 skb->len - skb->data_len,
1986 PCI_DMA_TODEVICE);
1987 if (frg_cnt) {
1988 TxD_t *temp;
1989 temp = txdp;
1990 txdp++;
1991 for (j = 0; j < frg_cnt; j++, txdp++) {
1992 skb_frag_t *frag =
1993 &skb_shinfo(skb)->frags[j];
1994 pci_unmap_page(nic->pdev,
1995 (dma_addr_t)
1996 txdp->
1997 Buffer_Pointer,
1998 frag->size,
1999 PCI_DMA_TODEVICE);
2000 }
2001 txdp = temp;
2002 }
1da177e4 2003 dev_kfree_skb(skb);
1ddc50d4 2004 memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
1da177e4
LT
2005 cnt++;
2006 }
2007 DBG_PRINT(INTR_DBG,
2008 "%s:forcibly freeing %d skbs on FIFO%d\n",
2009 dev->name, cnt, i);
20346722
K
2010 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2011 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1da177e4
LT
2012 }
2013}
2014
20346722
K
2015/**
2016 * stop_nic - To stop the nic
1da177e4 2017 * @nic ; device private variable.
20346722
K
2018 * Description:
2019 * This function does exactly the opposite of what the start_nic()
1da177e4
LT
2020 * function does. This function is called to stop the device.
2021 * Return Value:
2022 * void.
2023 */
2024
2025static void stop_nic(struct s2io_nic *nic)
2026{
2027 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2028 register u64 val64 = 0;
2029 u16 interruptible, i;
2030 mac_info_t *mac_control;
2031 struct config_param *config;
2032
2033 mac_control = &nic->mac_control;
2034 config = &nic->config;
2035
2036 /* Disable all interrupts */
e960fc5c 2037 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
a371a07d
K
2038 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2039 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1da177e4
LT
2040 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2041
2042 /* Disable PRCs */
2043 for (i = 0; i < config->rx_ring_num; i++) {
2044 val64 = readq(&bar0->prc_ctrl_n[i]);
2045 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
2046 writeq(val64, &bar0->prc_ctrl_n[i]);
2047 }
2048}
2049
20346722
K
2050/**
2051 * fill_rx_buffers - Allocates the Rx side skbs
1da177e4 2052 * @nic: device private variable
20346722
K
2053 * @ring_no: ring number
2054 * Description:
1da177e4
LT
2055 * The function allocates Rx side skbs and puts the physical
2056 * address of these buffers into the RxD buffer pointers, so that the NIC
2057 * can DMA the received frame into these locations.
2058 * The NIC supports 3 receive modes, viz
2059 * 1. single buffer,
2060 * 2. three buffer and
2061 * 3. Five buffer modes.
20346722
K
2062 * Each mode defines how many fragments the received frame will be split
2063 * up into by the NIC. The frame is split into L3 header, L4 Header,
1da177e4
LT
2064 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2065 * is split into 3 fragments. As of now only single buffer mode is
2066 * supported.
2067 * Return Value:
2068 * SUCCESS on success or an appropriate -ve value on failure.
2069 */
2070
20346722 2071int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1da177e4
LT
2072{
2073 struct net_device *dev = nic->dev;
2074 struct sk_buff *skb;
2075 RxD_t *rxdp;
2076 int off, off1, size, block_no, block_no1;
2077 int offset, offset1;
2078 u32 alloc_tab = 0;
20346722 2079 u32 alloc_cnt;
1da177e4
LT
2080 mac_info_t *mac_control;
2081 struct config_param *config;
2082#ifdef CONFIG_2BUFF_MODE
2083 RxD_t *rxdpnext;
2084 int nextblk;
20346722 2085 u64 tmp;
1da177e4
LT
2086 buffAdd_t *ba;
2087 dma_addr_t rxdpphys;
2088#endif
2089#ifndef CONFIG_S2IO_NAPI
2090 unsigned long flags;
2091#endif
303bcb4b 2092 RxD_t *first_rxdp = NULL;
1da177e4
LT
2093
2094 mac_control = &nic->mac_control;
2095 config = &nic->config;
20346722
K
2096 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2097 atomic_read(&nic->rx_bufs_left[ring_no]);
1da177e4
LT
2098 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2099 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2100
2101 while (alloc_tab < alloc_cnt) {
20346722 2102 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2103 block_index;
20346722 2104 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
1da177e4 2105 block_index;
20346722
K
2106 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2107 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1da177e4
LT
2108#ifndef CONFIG_2BUFF_MODE
2109 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
2110 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
2111#else
2112 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
2113 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
2114#endif
2115
20346722 2116 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1da177e4
LT
2117 block_virt_addr + off;
2118 if ((offset == offset1) && (rxdp->Host_Control)) {
2119 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
2120 DBG_PRINT(INTR_DBG, " info equated\n");
2121 goto end;
2122 }
2123#ifndef CONFIG_2BUFF_MODE
2124 if (rxdp->Control_1 == END_OF_BLOCK) {
20346722 2125 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2126 block_index++;
20346722
K
2127 mac_control->rings[ring_no].rx_curr_put_info.
2128 block_index %= mac_control->rings[ring_no].block_count;
2129 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2130 block_index;
1da177e4
LT
2131 off++;
2132 off %= (MAX_RXDS_PER_BLOCK + 1);
20346722 2133 mac_control->rings[ring_no].rx_curr_put_info.offset =
1da177e4
LT
2134 off;
2135 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
2136 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2137 dev->name, rxdp);
2138 }
2139#ifndef CONFIG_S2IO_NAPI
2140 spin_lock_irqsave(&nic->put_lock, flags);
20346722 2141 mac_control->rings[ring_no].put_pos =
1da177e4
LT
2142 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
2143 spin_unlock_irqrestore(&nic->put_lock, flags);
2144#endif
2145#else
2146 if (rxdp->Host_Control == END_OF_BLOCK) {
20346722 2147 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2148 block_index++;
20346722
K
2149 mac_control->rings[ring_no].rx_curr_put_info.block_index
2150 %= mac_control->rings[ring_no].block_count;
2151 block_no = mac_control->rings[ring_no].rx_curr_put_info
2152 .block_index;
1da177e4
LT
2153 off = 0;
2154 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
2155 dev->name, block_no,
2156 (unsigned long long) rxdp->Control_1);
20346722 2157 mac_control->rings[ring_no].rx_curr_put_info.offset =
1da177e4 2158 off;
20346722 2159 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1da177e4
LT
2160 block_virt_addr;
2161 }
2162#ifndef CONFIG_S2IO_NAPI
2163 spin_lock_irqsave(&nic->put_lock, flags);
20346722 2164 mac_control->rings[ring_no].put_pos = (block_no *
1da177e4
LT
2165 (MAX_RXDS_PER_BLOCK + 1)) + off;
2166 spin_unlock_irqrestore(&nic->put_lock, flags);
2167#endif
2168#endif
2169
2170#ifndef CONFIG_2BUFF_MODE
2171 if (rxdp->Control_1 & RXD_OWN_XENA)
2172#else
2173 if (rxdp->Control_2 & BIT(0))
2174#endif
2175 {
20346722 2176 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4
LT
2177 offset = off;
2178 goto end;
2179 }
2180#ifdef CONFIG_2BUFF_MODE
20346722
K
2181 /*
2182 * RxDs Spanning cache lines will be replenished only
2183 * if the succeeding RxD is also owned by Host. It
2184 * will always be the ((8*i)+3) and ((8*i)+6)
2185 * descriptors for the 48 byte descriptor. The offending
1da177e4
LT
2186 * decsriptor is of-course the 3rd descriptor.
2187 */
20346722 2188 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
1da177e4
LT
2189 block_dma_addr + (off * sizeof(RxD_t));
2190 if (((u64) (rxdpphys)) % 128 > 80) {
20346722 2191 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
1da177e4
LT
2192 block_virt_addr + (off + 1);
2193 if (rxdpnext->Host_Control == END_OF_BLOCK) {
2194 nextblk = (block_no + 1) %
20346722
K
2195 (mac_control->rings[ring_no].block_count);
2196 rxdpnext = mac_control->rings[ring_no].rx_blocks
1da177e4
LT
2197 [nextblk].block_virt_addr;
2198 }
2199 if (rxdpnext->Control_2 & BIT(0))
2200 goto end;
2201 }
2202#endif
2203
2204#ifndef CONFIG_2BUFF_MODE
2205 skb = dev_alloc_skb(size + NET_IP_ALIGN);
2206#else
2207 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
2208#endif
2209 if (!skb) {
2210 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2211 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
303bcb4b
K
2212 if (first_rxdp) {
2213 wmb();
2214 first_rxdp->Control_1 |= RXD_OWN_XENA;
2215 }
1da177e4
LT
2216 return -ENOMEM;
2217 }
2218#ifndef CONFIG_2BUFF_MODE
2219 skb_reserve(skb, NET_IP_ALIGN);
2220 memset(rxdp, 0, sizeof(RxD_t));
2221 rxdp->Buffer0_ptr = pci_map_single
2222 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2223 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
2224 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
2225 rxdp->Host_Control = (unsigned long) (skb);
303bcb4b
K
2226 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2227 rxdp->Control_1 |= RXD_OWN_XENA;
1da177e4
LT
2228 off++;
2229 off %= (MAX_RXDS_PER_BLOCK + 1);
20346722 2230 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1da177e4 2231#else
20346722 2232 ba = &mac_control->rings[ring_no].ba[block_no][off];
1da177e4 2233 skb_reserve(skb, BUF0_LEN);
689be439
DM
2234 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
2235 if (tmp)
2236 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
1da177e4
LT
2237
2238 memset(rxdp, 0, sizeof(RxD_t));
2239 rxdp->Buffer2_ptr = pci_map_single
2240 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
2241 PCI_DMA_FROMDEVICE);
2242 rxdp->Buffer0_ptr =
2243 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2244 PCI_DMA_FROMDEVICE);
2245 rxdp->Buffer1_ptr =
2246 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2247 PCI_DMA_FROMDEVICE);
2248
2249 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
2250 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
2251 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
2252 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
2253 rxdp->Host_Control = (u64) ((unsigned long) (skb));
303bcb4b
K
2254 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2255 rxdp->Control_1 |= RXD_OWN_XENA;
1da177e4 2256 off++;
20346722 2257 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1da177e4 2258#endif
5e25b9dd 2259 rxdp->Control_2 |= SET_RXD_MARKER;
20346722 2260
303bcb4b
K
2261 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2262 if (first_rxdp) {
2263 wmb();
2264 first_rxdp->Control_1 |= RXD_OWN_XENA;
2265 }
2266 first_rxdp = rxdp;
2267 }
1da177e4
LT
2268 atomic_inc(&nic->rx_bufs_left[ring_no]);
2269 alloc_tab++;
2270 }
2271
2272 end:
303bcb4b
K
2273 /* Transfer ownership of first descriptor to adapter just before
2274 * exiting. Before that, use memory barrier so that ownership
2275 * and other fields are seen by adapter correctly.
2276 */
2277 if (first_rxdp) {
2278 wmb();
2279 first_rxdp->Control_1 |= RXD_OWN_XENA;
2280 }
2281
1da177e4
LT
2282 return SUCCESS;
2283}
2284
2285/**
20346722 2286 * free_rx_buffers - Frees all Rx buffers
1da177e4 2287 * @sp: device private variable.
20346722 2288 * Description:
1da177e4
LT
2289 * This function will free all Rx buffers allocated by host.
2290 * Return Value:
2291 * NONE.
2292 */
2293
2294static void free_rx_buffers(struct s2io_nic *sp)
2295{
2296 struct net_device *dev = sp->dev;
2297 int i, j, blk = 0, off, buf_cnt = 0;
2298 RxD_t *rxdp;
2299 struct sk_buff *skb;
2300 mac_info_t *mac_control;
2301 struct config_param *config;
2302#ifdef CONFIG_2BUFF_MODE
2303 buffAdd_t *ba;
2304#endif
2305
2306 mac_control = &sp->mac_control;
2307 config = &sp->config;
2308
2309 for (i = 0; i < config->rx_ring_num; i++) {
2310 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2311 off = j % (MAX_RXDS_PER_BLOCK + 1);
20346722
K
2312 rxdp = mac_control->rings[i].rx_blocks[blk].
2313 block_virt_addr + off;
1da177e4
LT
2314
2315#ifndef CONFIG_2BUFF_MODE
2316 if (rxdp->Control_1 == END_OF_BLOCK) {
2317 rxdp =
2318 (RxD_t *) ((unsigned long) rxdp->
2319 Control_2);
2320 j++;
2321 blk++;
2322 }
2323#else
2324 if (rxdp->Host_Control == END_OF_BLOCK) {
2325 blk++;
2326 continue;
2327 }
2328#endif
2329
2330 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2331 memset(rxdp, 0, sizeof(RxD_t));
2332 continue;
2333 }
2334
2335 skb =
2336 (struct sk_buff *) ((unsigned long) rxdp->
2337 Host_Control);
2338 if (skb) {
2339#ifndef CONFIG_2BUFF_MODE
2340 pci_unmap_single(sp->pdev, (dma_addr_t)
2341 rxdp->Buffer0_ptr,
2342 dev->mtu +
2343 HEADER_ETHERNET_II_802_3_SIZE
2344 + HEADER_802_2_SIZE +
2345 HEADER_SNAP_SIZE,
2346 PCI_DMA_FROMDEVICE);
2347#else
20346722 2348 ba = &mac_control->rings[i].ba[blk][off];
1da177e4
LT
2349 pci_unmap_single(sp->pdev, (dma_addr_t)
2350 rxdp->Buffer0_ptr,
2351 BUF0_LEN,
2352 PCI_DMA_FROMDEVICE);
2353 pci_unmap_single(sp->pdev, (dma_addr_t)
2354 rxdp->Buffer1_ptr,
2355 BUF1_LEN,
2356 PCI_DMA_FROMDEVICE);
2357 pci_unmap_single(sp->pdev, (dma_addr_t)
2358 rxdp->Buffer2_ptr,
2359 dev->mtu + BUF0_LEN + 4,
2360 PCI_DMA_FROMDEVICE);
2361#endif
2362 dev_kfree_skb(skb);
2363 atomic_dec(&sp->rx_bufs_left[i]);
2364 buf_cnt++;
2365 }
2366 memset(rxdp, 0, sizeof(RxD_t));
2367 }
20346722
K
2368 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2369 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2370 mac_control->rings[i].rx_curr_put_info.offset = 0;
2371 mac_control->rings[i].rx_curr_get_info.offset = 0;
1da177e4
LT
2372 atomic_set(&sp->rx_bufs_left[i], 0);
2373 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2374 dev->name, buf_cnt, i);
2375 }
2376}
2377
2378/**
2379 * s2io_poll - Rx interrupt handler for NAPI support
2380 * @dev : pointer to the device structure.
20346722 2381 * @budget : The number of packets that were budgeted to be processed
1da177e4
LT
2382 * during one pass through the 'Poll" function.
2383 * Description:
2384 * Comes into picture only if NAPI support has been incorporated. It does
2385 * the same thing that rx_intr_handler does, but not in a interrupt context
2386 * also It will process only a given number of packets.
2387 * Return value:
2388 * 0 on success and 1 if there are No Rx packets to be processed.
2389 */
2390
20346722 2391#if defined(CONFIG_S2IO_NAPI)
1da177e4
LT
2392static int s2io_poll(struct net_device *dev, int *budget)
2393{
2394 nic_t *nic = dev->priv;
20346722 2395 int pkt_cnt = 0, org_pkts_to_process;
1da177e4
LT
2396 mac_info_t *mac_control;
2397 struct config_param *config;
20346722
K
2398 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2399 u64 val64;
2400 int i;
1da177e4 2401
7ba013ac 2402 atomic_inc(&nic->isr_cnt);
1da177e4
LT
2403 mac_control = &nic->mac_control;
2404 config = &nic->config;
2405
20346722
K
2406 nic->pkts_to_process = *budget;
2407 if (nic->pkts_to_process > dev->quota)
2408 nic->pkts_to_process = dev->quota;
2409 org_pkts_to_process = nic->pkts_to_process;
1da177e4
LT
2410
2411 val64 = readq(&bar0->rx_traffic_int);
2412 writeq(val64, &bar0->rx_traffic_int);
2413
2414 for (i = 0; i < config->rx_ring_num; i++) {
20346722
K
2415 rx_intr_handler(&mac_control->rings[i]);
2416 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2417 if (!nic->pkts_to_process) {
2418 /* Quota for the current iteration has been met */
2419 goto no_rx;
1da177e4 2420 }
1da177e4
LT
2421 }
2422 if (!pkt_cnt)
2423 pkt_cnt = 1;
2424
2425 dev->quota -= pkt_cnt;
2426 *budget -= pkt_cnt;
2427 netif_rx_complete(dev);
2428
2429 for (i = 0; i < config->rx_ring_num; i++) {
2430 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2431 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2432 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2433 break;
2434 }
2435 }
2436 /* Re enable the Rx interrupts. */
2437 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
7ba013ac 2438 atomic_dec(&nic->isr_cnt);
1da177e4
LT
2439 return 0;
2440
20346722 2441no_rx:
1da177e4
LT
2442 dev->quota -= pkt_cnt;
2443 *budget -= pkt_cnt;
2444
2445 for (i = 0; i < config->rx_ring_num; i++) {
2446 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2447 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2448 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2449 break;
2450 }
2451 }
7ba013ac 2452 atomic_dec(&nic->isr_cnt);
1da177e4
LT
2453 return 1;
2454}
20346722
K
2455#endif
2456
2457/**
1da177e4
LT
2458 * rx_intr_handler - Rx interrupt handler
2459 * @nic: device private variable.
20346722
K
2460 * Description:
2461 * If the interrupt is because of a received frame or if the
1da177e4 2462 * receive ring contains fresh as yet un-processed frames,this function is
20346722
K
2463 * called. It picks out the RxD at which place the last Rx processing had
2464 * stopped and sends the skb to the OSM's Rx handler and then increments
1da177e4
LT
2465 * the offset.
2466 * Return Value:
2467 * NONE.
2468 */
20346722 2469static void rx_intr_handler(ring_info_t *ring_data)
1da177e4 2470{
20346722 2471 nic_t *nic = ring_data->nic;
1da177e4 2472 struct net_device *dev = (struct net_device *) nic->dev;
20346722 2473 int get_block, get_offset, put_block, put_offset, ring_bufs;
1da177e4
LT
2474 rx_curr_get_info_t get_info, put_info;
2475 RxD_t *rxdp;
2476 struct sk_buff *skb;
20346722
K
2477#ifndef CONFIG_S2IO_NAPI
2478 int pkt_cnt = 0;
1da177e4 2479#endif
7ba013ac
K
2480 spin_lock(&nic->rx_lock);
2481 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2482 DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n",
2483 __FUNCTION__, dev->name);
2484 spin_unlock(&nic->rx_lock);
2485 }
2486
20346722
K
2487 get_info = ring_data->rx_curr_get_info;
2488 get_block = get_info.block_index;
2489 put_info = ring_data->rx_curr_put_info;
2490 put_block = put_info.block_index;
2491 ring_bufs = get_info.ring_len+1;
2492 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
1da177e4 2493 get_info.offset;
20346722
K
2494 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2495 get_info.offset;
2496#ifndef CONFIG_S2IO_NAPI
2497 spin_lock(&nic->put_lock);
2498 put_offset = ring_data->put_pos;
2499 spin_unlock(&nic->put_lock);
2500#else
2501 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2502 put_info.offset;
2503#endif
5e25b9dd
K
2504 while (RXD_IS_UP2DT(rxdp) &&
2505 (((get_offset + 1) % ring_bufs) != put_offset)) {
20346722
K
2506 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2507 if (skb == NULL) {
2508 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2509 dev->name);
2510 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
7ba013ac 2511 spin_unlock(&nic->rx_lock);
20346722 2512 return;
1da177e4 2513 }
20346722
K
2514#ifndef CONFIG_2BUFF_MODE
2515 pci_unmap_single(nic->pdev, (dma_addr_t)
2516 rxdp->Buffer0_ptr,
2517 dev->mtu +
2518 HEADER_ETHERNET_II_802_3_SIZE +
2519 HEADER_802_2_SIZE +
2520 HEADER_SNAP_SIZE,
2521 PCI_DMA_FROMDEVICE);
1da177e4 2522#else
20346722
K
2523 pci_unmap_single(nic->pdev, (dma_addr_t)
2524 rxdp->Buffer0_ptr,
2525 BUF0_LEN, PCI_DMA_FROMDEVICE);
2526 pci_unmap_single(nic->pdev, (dma_addr_t)
2527 rxdp->Buffer1_ptr,
2528 BUF1_LEN, PCI_DMA_FROMDEVICE);
2529 pci_unmap_single(nic->pdev, (dma_addr_t)
2530 rxdp->Buffer2_ptr,
2531 dev->mtu + BUF0_LEN + 4,
2532 PCI_DMA_FROMDEVICE);
2533#endif
2534 rx_osm_handler(ring_data, rxdp);
2535 get_info.offset++;
2536 ring_data->rx_curr_get_info.offset =
1da177e4 2537 get_info.offset;
20346722
K
2538 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2539 get_info.offset;
2540 if (get_info.offset &&
2541 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2542 get_info.offset = 0;
2543 ring_data->rx_curr_get_info.offset
2544 = get_info.offset;
2545 get_block++;
2546 get_block %= ring_data->block_count;
2547 ring_data->rx_curr_get_info.block_index
2548 = get_block;
2549 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2550 }
1da177e4 2551
20346722 2552 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1da177e4 2553 get_info.offset;
20346722
K
2554#ifdef CONFIG_S2IO_NAPI
2555 nic->pkts_to_process -= 1;
2556 if (!nic->pkts_to_process)
2557 break;
2558#else
2559 pkt_cnt++;
1da177e4
LT
2560 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2561 break;
20346722 2562#endif
1da177e4 2563 }
7ba013ac 2564 spin_unlock(&nic->rx_lock);
1da177e4 2565}
20346722
K
2566
2567/**
1da177e4
LT
2568 * tx_intr_handler - Transmit interrupt handler
2569 * @nic : device private variable
20346722
K
2570 * Description:
2571 * If an interrupt was raised to indicate DMA complete of the
2572 * Tx packet, this function is called. It identifies the last TxD
2573 * whose buffer was freed and frees all skbs whose data have already
1da177e4
LT
2574 * DMA'ed into the NICs internal memory.
2575 * Return Value:
2576 * NONE
2577 */
2578
20346722 2579static void tx_intr_handler(fifo_info_t *fifo_data)
1da177e4 2580{
20346722 2581 nic_t *nic = fifo_data->nic;
1da177e4
LT
2582 struct net_device *dev = (struct net_device *) nic->dev;
2583 tx_curr_get_info_t get_info, put_info;
2584 struct sk_buff *skb;
2585 TxD_t *txdlp;
1da177e4 2586 u16 j, frg_cnt;
1da177e4 2587
20346722
K
2588 get_info = fifo_data->tx_curr_get_info;
2589 put_info = fifo_data->tx_curr_put_info;
2590 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2591 list_virt_addr;
2592 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2593 (get_info.offset != put_info.offset) &&
2594 (txdlp->Host_Control)) {
2595 /* Check for TxD errors */
2596 if (txdlp->Control_1 & TXD_T_CODE) {
2597 unsigned long long err;
2598 err = txdlp->Control_1 & TXD_T_CODE;
2599 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2600 err);
2601 }
1da177e4 2602
20346722
K
2603 skb = (struct sk_buff *) ((unsigned long)
2604 txdlp->Host_Control);
2605 if (skb == NULL) {
2606 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2607 __FUNCTION__);
2608 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2609 return;
2610 }
2611
2612 frg_cnt = skb_shinfo(skb)->nr_frags;
2613 nic->tx_pkt_count++;
2614
2615 pci_unmap_single(nic->pdev, (dma_addr_t)
2616 txdlp->Buffer_Pointer,
2617 skb->len - skb->data_len,
2618 PCI_DMA_TODEVICE);
2619 if (frg_cnt) {
2620 TxD_t *temp;
2621 temp = txdlp;
2622 txdlp++;
2623 for (j = 0; j < frg_cnt; j++, txdlp++) {
2624 skb_frag_t *frag =
2625 &skb_shinfo(skb)->frags[j];
0b1f7ebe
K
2626 if (!txdlp->Buffer_Pointer)
2627 break;
20346722
K
2628 pci_unmap_page(nic->pdev,
2629 (dma_addr_t)
2630 txdlp->
2631 Buffer_Pointer,
2632 frag->size,
2633 PCI_DMA_TODEVICE);
1da177e4 2634 }
20346722 2635 txdlp = temp;
1da177e4 2636 }
20346722
K
2637 memset(txdlp, 0,
2638 (sizeof(TxD_t) * fifo_data->max_txds));
2639
2640 /* Updating the statistics block */
20346722
K
2641 nic->stats.tx_bytes += skb->len;
2642 dev_kfree_skb_irq(skb);
2643
2644 get_info.offset++;
2645 get_info.offset %= get_info.fifo_len + 1;
2646 txdlp = (TxD_t *) fifo_data->list_info
2647 [get_info.offset].list_virt_addr;
2648 fifo_data->tx_curr_get_info.offset =
2649 get_info.offset;
1da177e4
LT
2650 }
2651
2652 spin_lock(&nic->tx_lock);
2653 if (netif_queue_stopped(dev))
2654 netif_wake_queue(dev);
2655 spin_unlock(&nic->tx_lock);
2656}
2657
20346722 2658/**
1da177e4
LT
2659 * alarm_intr_handler - Alarm Interrrupt handler
2660 * @nic: device private variable
20346722 2661 * Description: If the interrupt was neither because of Rx packet or Tx
1da177e4 2662 * complete, this function is called. If the interrupt was to indicate
20346722
K
2663 * a loss of link, the OSM link status handler is invoked for any other
2664 * alarm interrupt the block that raised the interrupt is displayed
1da177e4
LT
2665 * and a H/W reset is issued.
2666 * Return Value:
2667 * NONE
2668*/
2669
2670static void alarm_intr_handler(struct s2io_nic *nic)
2671{
2672 struct net_device *dev = (struct net_device *) nic->dev;
2673 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2674 register u64 val64 = 0, err_reg = 0;
2675
2676 /* Handling link status change error Intr */
a371a07d
K
2677 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2678 err_reg = readq(&bar0->mac_rmac_err_reg);
2679 writeq(err_reg, &bar0->mac_rmac_err_reg);
2680 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2681 schedule_work(&nic->set_link_task);
2682 }
1da177e4
LT
2683 }
2684
5e25b9dd
K
2685 /* Handling Ecc errors */
2686 val64 = readq(&bar0->mc_err_reg);
2687 writeq(val64, &bar0->mc_err_reg);
2688 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2689 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
7ba013ac
K
2690 nic->mac_control.stats_info->sw_stat.
2691 double_ecc_errs++;
5e25b9dd
K
2692 DBG_PRINT(ERR_DBG, "%s: Device indicates ",
2693 dev->name);
2694 DBG_PRINT(ERR_DBG, "double ECC error!!\n");
e960fc5c 2695 if (nic->device_type != XFRAME_II_DEVICE) {
2696 netif_stop_queue(dev);
2697 schedule_work(&nic->rst_timer_task);
2698 }
5e25b9dd 2699 } else {
7ba013ac
K
2700 nic->mac_control.stats_info->sw_stat.
2701 single_ecc_errs++;
5e25b9dd
K
2702 }
2703 }
2704
1da177e4
LT
2705 /* In case of a serious error, the device will be Reset. */
2706 val64 = readq(&bar0->serr_source);
2707 if (val64 & SERR_SOURCE_ANY) {
2708 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2709 DBG_PRINT(ERR_DBG, "serious error!!\n");
2710 netif_stop_queue(dev);
2711 schedule_work(&nic->rst_timer_task);
2712 }
2713
2714 /*
2715 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2716 * Error occurs, the adapter will be recycled by disabling the
20346722 2717 * adapter enable bit and enabling it again after the device
1da177e4
LT
2718 * becomes Quiescent.
2719 */
2720 val64 = readq(&bar0->pcc_err_reg);
2721 writeq(val64, &bar0->pcc_err_reg);
2722 if (val64 & PCC_FB_ECC_DB_ERR) {
2723 u64 ac = readq(&bar0->adapter_control);
2724 ac &= ~(ADAPTER_CNTL_EN);
2725 writeq(ac, &bar0->adapter_control);
2726 ac = readq(&bar0->adapter_control);
2727 schedule_work(&nic->set_link_task);
2728 }
2729
2730 /* Other type of interrupts are not being handled now, TODO */
2731}
2732
20346722 2733/**
1da177e4 2734 * wait_for_cmd_complete - waits for a command to complete.
20346722 2735 * @sp : private member of the device structure, which is a pointer to the
1da177e4 2736 * s2io_nic structure.
20346722
K
2737 * Description: Function that waits for a command to Write into RMAC
2738 * ADDR DATA registers to be completed and returns either success or
2739 * error depending on whether the command was complete or not.
1da177e4
LT
2740 * Return value:
2741 * SUCCESS on success and FAILURE on failure.
2742 */
2743
20346722 2744int wait_for_cmd_complete(nic_t * sp)
1da177e4
LT
2745{
2746 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2747 int ret = FAILURE, cnt = 0;
2748 u64 val64;
2749
2750 while (TRUE) {
2751 val64 = readq(&bar0->rmac_addr_cmd_mem);
2752 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2753 ret = SUCCESS;
2754 break;
2755 }
2756 msleep(50);
2757 if (cnt++ > 10)
2758 break;
2759 }
2760
2761 return ret;
2762}
2763
20346722
K
2764/**
2765 * s2io_reset - Resets the card.
1da177e4
LT
2766 * @sp : private member of the device structure.
2767 * Description: Function to Reset the card. This function then also
20346722 2768 * restores the previously saved PCI configuration space registers as
1da177e4
LT
2769 * the card reset also resets the configuration space.
2770 * Return value:
2771 * void.
2772 */
2773
20346722 2774void s2io_reset(nic_t * sp)
1da177e4
LT
2775{
2776 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2777 u64 val64;
5e25b9dd 2778 u16 subid, pci_cmd;
1da177e4 2779
0b1f7ebe 2780 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
e960fc5c 2781 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
0b1f7ebe 2782
1da177e4
LT
2783 val64 = SW_RESET_ALL;
2784 writeq(val64, &bar0->sw_reset);
2785
20346722
K
2786 /*
2787 * At this stage, if the PCI write is indeed completed, the
2788 * card is reset and so is the PCI Config space of the device.
2789 * So a read cannot be issued at this stage on any of the
1da177e4
LT
2790 * registers to ensure the write into "sw_reset" register
2791 * has gone through.
2792 * Question: Is there any system call that will explicitly force
2793 * all the write commands still pending on the bus to be pushed
2794 * through?
2795 * As of now I'am just giving a 250ms delay and hoping that the
2796 * PCI write to sw_reset register is done by this time.
2797 */
2798 msleep(250);
2799
e960fc5c 2800 /* Restore the PCI state saved during initialization. */
2801 pci_restore_state(sp->pdev);
2802 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
0b1f7ebe 2803 pci_cmd);
1da177e4
LT
2804 s2io_init_pci(sp);
2805
2806 msleep(250);
2807
20346722
K
2808 /* Set swapper to enable I/O register access */
2809 s2io_set_swapper(sp);
2810
5e25b9dd 2811 /* Clear certain PCI/PCI-X fields after reset */
303bcb4b
K
2812 if (sp->device_type == XFRAME_II_DEVICE) {
2813 /* Clear parity err detect bit */
2814 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
5e25b9dd 2815
303bcb4b
K
2816 /* Clearing PCIX Ecc status register */
2817 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
5e25b9dd 2818
303bcb4b
K
2819 /* Clearing PCI_STATUS error reflected here */
2820 writeq(BIT(62), &bar0->txpic_int_reg);
2821 }
5e25b9dd 2822
20346722
K
2823 /* Reset device statistics maintained by OS */
2824 memset(&sp->stats, 0, sizeof (struct net_device_stats));
2825
1da177e4
LT
2826 /* SXE-002: Configure link and activity LED to turn it off */
2827 subid = sp->pdev->subsystem_device;
541ae68f
K
2828 if (((subid & 0xFF) >= 0x07) &&
2829 (sp->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
2830 val64 = readq(&bar0->gpio_control);
2831 val64 |= 0x0000800000000000ULL;
2832 writeq(val64, &bar0->gpio_control);
2833 val64 = 0x0411040400000000ULL;
20346722 2834 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1da177e4
LT
2835 }
2836
541ae68f
K
2837 /*
2838 * Clear spurious ECC interrupts that would have occured on
2839 * XFRAME II cards after reset.
2840 */
2841 if (sp->device_type == XFRAME_II_DEVICE) {
2842 val64 = readq(&bar0->pcc_err_reg);
2843 writeq(val64, &bar0->pcc_err_reg);
2844 }
2845
1da177e4
LT
2846 sp->device_enabled_once = FALSE;
2847}
2848
2849/**
20346722
K
2850 * s2io_set_swapper - to set the swapper controle on the card
2851 * @sp : private member of the device structure,
1da177e4 2852 * pointer to the s2io_nic structure.
20346722 2853 * Description: Function to set the swapper control on the card
1da177e4
LT
2854 * correctly depending on the 'endianness' of the system.
2855 * Return value:
2856 * SUCCESS on success and FAILURE on failure.
2857 */
2858
20346722 2859int s2io_set_swapper(nic_t * sp)
1da177e4
LT
2860{
2861 struct net_device *dev = sp->dev;
2862 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2863 u64 val64, valt, valr;
2864
20346722 2865 /*
1da177e4
LT
2866 * Set proper endian settings and verify the same by reading
2867 * the PIF Feed-back register.
2868 */
2869
2870 val64 = readq(&bar0->pif_rd_swapper_fb);
2871 if (val64 != 0x0123456789ABCDEFULL) {
2872 int i = 0;
2873 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2874 0x8100008181000081ULL, /* FE=1, SE=0 */
2875 0x4200004242000042ULL, /* FE=0, SE=1 */
2876 0}; /* FE=0, SE=0 */
2877
2878 while(i<4) {
2879 writeq(value[i], &bar0->swapper_ctrl);
2880 val64 = readq(&bar0->pif_rd_swapper_fb);
2881 if (val64 == 0x0123456789ABCDEFULL)
2882 break;
2883 i++;
2884 }
2885 if (i == 4) {
2886 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2887 dev->name);
2888 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2889 (unsigned long long) val64);
2890 return FAILURE;
2891 }
2892 valr = value[i];
2893 } else {
2894 valr = readq(&bar0->swapper_ctrl);
2895 }
2896
2897 valt = 0x0123456789ABCDEFULL;
2898 writeq(valt, &bar0->xmsi_address);
2899 val64 = readq(&bar0->xmsi_address);
2900
2901 if(val64 != valt) {
2902 int i = 0;
2903 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2904 0x0081810000818100ULL, /* FE=1, SE=0 */
2905 0x0042420000424200ULL, /* FE=0, SE=1 */
2906 0}; /* FE=0, SE=0 */
2907
2908 while(i<4) {
2909 writeq((value[i] | valr), &bar0->swapper_ctrl);
2910 writeq(valt, &bar0->xmsi_address);
2911 val64 = readq(&bar0->xmsi_address);
2912 if(val64 == valt)
2913 break;
2914 i++;
2915 }
2916 if(i == 4) {
20346722 2917 unsigned long long x = val64;
1da177e4 2918 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
20346722 2919 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
1da177e4
LT
2920 return FAILURE;
2921 }
2922 }
2923 val64 = readq(&bar0->swapper_ctrl);
2924 val64 &= 0xFFFF000000000000ULL;
2925
2926#ifdef __BIG_ENDIAN
20346722
K
2927 /*
2928 * The device by default set to a big endian format, so a
1da177e4
LT
2929 * big endian driver need not set anything.
2930 */
2931 val64 |= (SWAPPER_CTRL_TXP_FE |
2932 SWAPPER_CTRL_TXP_SE |
2933 SWAPPER_CTRL_TXD_R_FE |
2934 SWAPPER_CTRL_TXD_W_FE |
2935 SWAPPER_CTRL_TXF_R_FE |
2936 SWAPPER_CTRL_RXD_R_FE |
2937 SWAPPER_CTRL_RXD_W_FE |
2938 SWAPPER_CTRL_RXF_W_FE |
2939 SWAPPER_CTRL_XMSI_FE |
2940 SWAPPER_CTRL_XMSI_SE |
2941 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2942 writeq(val64, &bar0->swapper_ctrl);
2943#else
20346722 2944 /*
1da177e4 2945 * Initially we enable all bits to make it accessible by the
20346722 2946 * driver, then we selectively enable only those bits that
1da177e4
LT
2947 * we want to set.
2948 */
2949 val64 |= (SWAPPER_CTRL_TXP_FE |
2950 SWAPPER_CTRL_TXP_SE |
2951 SWAPPER_CTRL_TXD_R_FE |
2952 SWAPPER_CTRL_TXD_R_SE |
2953 SWAPPER_CTRL_TXD_W_FE |
2954 SWAPPER_CTRL_TXD_W_SE |
2955 SWAPPER_CTRL_TXF_R_FE |
2956 SWAPPER_CTRL_RXD_R_FE |
2957 SWAPPER_CTRL_RXD_R_SE |
2958 SWAPPER_CTRL_RXD_W_FE |
2959 SWAPPER_CTRL_RXD_W_SE |
2960 SWAPPER_CTRL_RXF_W_FE |
2961 SWAPPER_CTRL_XMSI_FE |
2962 SWAPPER_CTRL_XMSI_SE |
2963 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2964 writeq(val64, &bar0->swapper_ctrl);
2965#endif
2966 val64 = readq(&bar0->swapper_ctrl);
2967
20346722
K
2968 /*
2969 * Verifying if endian settings are accurate by reading a
1da177e4
LT
2970 * feedback register.
2971 */
2972 val64 = readq(&bar0->pif_rd_swapper_fb);
2973 if (val64 != 0x0123456789ABCDEFULL) {
2974 /* Endian settings are incorrect, calls for another dekko. */
2975 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2976 dev->name);
2977 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2978 (unsigned long long) val64);
2979 return FAILURE;
2980 }
2981
2982 return SUCCESS;
2983}
2984
2985/* ********************************************************* *
2986 * Functions defined below concern the OS part of the driver *
2987 * ********************************************************* */
2988
20346722 2989/**
1da177e4
LT
2990 * s2io_open - open entry point of the driver
2991 * @dev : pointer to the device structure.
2992 * Description:
2993 * This function is the open entry point of the driver. It mainly calls a
2994 * function to allocate Rx buffers and inserts them into the buffer
20346722 2995 * descriptors and then enables the Rx part of the NIC.
1da177e4
LT
2996 * Return value:
2997 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2998 * file on failure.
2999 */
3000
20346722 3001int s2io_open(struct net_device *dev)
1da177e4
LT
3002{
3003 nic_t *sp = dev->priv;
3004 int err = 0;
3005
20346722
K
3006 /*
3007 * Make sure you have link off by default every time
1da177e4
LT
3008 * Nic is initialized
3009 */
3010 netif_carrier_off(dev);
0b1f7ebe 3011 sp->last_link_state = 0;
1da177e4
LT
3012
3013 /* Initialize H/W and enable interrupts */
3014 if (s2io_card_up(sp)) {
3015 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3016 dev->name);
20346722
K
3017 err = -ENODEV;
3018 goto hw_init_failed;
1da177e4
LT
3019 }
3020
3021 /* After proper initialization of H/W, register ISR */
20346722 3022 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
1da177e4
LT
3023 sp->name, dev);
3024 if (err) {
1da177e4
LT
3025 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
3026 dev->name);
20346722 3027 goto isr_registration_failed;
1da177e4
LT
3028 }
3029
3030 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3031 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
20346722
K
3032 err = -ENODEV;
3033 goto setting_mac_address_failed;
1da177e4
LT
3034 }
3035
3036 netif_start_queue(dev);
3037 return 0;
20346722
K
3038
3039setting_mac_address_failed:
3040 free_irq(sp->pdev->irq, dev);
3041isr_registration_failed:
25fff88e 3042 del_timer_sync(&sp->alarm_timer);
20346722
K
3043 s2io_reset(sp);
3044hw_init_failed:
3045 return err;
1da177e4
LT
3046}
3047
3048/**
3049 * s2io_close -close entry point of the driver
3050 * @dev : device pointer.
3051 * Description:
3052 * This is the stop entry point of the driver. It needs to undo exactly
3053 * whatever was done by the open entry point,thus it's usually referred to
3054 * as the close function.Among other things this function mainly stops the
3055 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3056 * Return value:
3057 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3058 * file on failure.
3059 */
3060
20346722 3061int s2io_close(struct net_device *dev)
1da177e4
LT
3062{
3063 nic_t *sp = dev->priv;
1da177e4
LT
3064 flush_scheduled_work();
3065 netif_stop_queue(dev);
3066 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3067 s2io_card_down(sp);
3068
20346722 3069 free_irq(sp->pdev->irq, dev);
1da177e4
LT
3070 sp->device_close_flag = TRUE; /* Device is shut down. */
3071 return 0;
3072}
3073
3074/**
3075 * s2io_xmit - Tx entry point of te driver
3076 * @skb : the socket buffer containing the Tx data.
3077 * @dev : device pointer.
3078 * Description :
3079 * This function is the Tx entry point of the driver. S2IO NIC supports
3080 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3081 * NOTE: when device cant queue the pkt,just the trans_start variable will
3082 * not be upadted.
3083 * Return value:
3084 * 0 on success & 1 on failure.
3085 */
3086
20346722 3087int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
3088{
3089 nic_t *sp = dev->priv;
3090 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3091 register u64 val64;
3092 TxD_t *txdp;
3093 TxFIFO_element_t __iomem *tx_fifo;
3094 unsigned long flags;
3095#ifdef NETIF_F_TSO
3096 int mss;
3097#endif
be3a6b02
K
3098 u16 vlan_tag = 0;
3099 int vlan_priority = 0;
1da177e4
LT
3100 mac_info_t *mac_control;
3101 struct config_param *config;
1da177e4
LT
3102
3103 mac_control = &sp->mac_control;
3104 config = &sp->config;
3105
20346722 3106 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
1da177e4 3107 spin_lock_irqsave(&sp->tx_lock, flags);
1da177e4 3108 if (atomic_read(&sp->card_state) == CARD_DOWN) {
20346722 3109 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
1da177e4
LT
3110 dev->name);
3111 spin_unlock_irqrestore(&sp->tx_lock, flags);
20346722
K
3112 dev_kfree_skb(skb);
3113 return 0;
1da177e4
LT
3114 }
3115
3116 queue = 0;
1da177e4 3117
be3a6b02
K
3118 /* Get Fifo number to Transmit based on vlan priority */
3119 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3120 vlan_tag = vlan_tx_tag_get(skb);
3121 vlan_priority = vlan_tag >> 13;
3122 queue = config->fifo_mapping[vlan_priority];
3123 }
3124
20346722
K
3125 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3126 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3127 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3128 list_virt_addr;
3129
3130 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
1da177e4
LT
3131 /* Avoid "put" pointer going beyond "get" pointer */
3132 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
3133 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
3134 netif_stop_queue(dev);
3135 dev_kfree_skb(skb);
3136 spin_unlock_irqrestore(&sp->tx_lock, flags);
3137 return 0;
3138 }
0b1f7ebe
K
3139
3140 /* A buffer with no data will be dropped */
3141 if (!skb->len) {
3142 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3143 dev_kfree_skb(skb);
3144 spin_unlock_irqrestore(&sp->tx_lock, flags);
3145 return 0;
3146 }
3147
1da177e4
LT
3148#ifdef NETIF_F_TSO
3149 mss = skb_shinfo(skb)->tso_size;
3150 if (mss) {
3151 txdp->Control_1 |= TXD_TCP_LSO_EN;
3152 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
3153 }
3154#endif
3155
3156 frg_cnt = skb_shinfo(skb)->nr_frags;
3157 frg_len = skb->len - skb->data_len;
3158
1da177e4
LT
3159 txdp->Buffer_Pointer = pci_map_single
3160 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
20346722 3161 txdp->Host_Control = (unsigned long) skb;
1da177e4
LT
3162 if (skb->ip_summed == CHECKSUM_HW) {
3163 txdp->Control_2 |=
3164 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3165 TXD_TX_CKO_UDP_EN);
3166 }
3167
3168 txdp->Control_2 |= config->tx_intr_type;
d8892c6e 3169
be3a6b02
K
3170 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3171 txdp->Control_2 |= TXD_VLAN_ENABLE;
3172 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3173 }
3174
1da177e4
LT
3175 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
3176 TXD_GATHER_CODE_FIRST);
3177 txdp->Control_1 |= TXD_LIST_OWN_XENA;
3178
3179 /* For fragmented SKB. */
3180 for (i = 0; i < frg_cnt; i++) {
3181 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0b1f7ebe
K
3182 /* A '0' length fragment will be ignored */
3183 if (!frag->size)
3184 continue;
1da177e4
LT
3185 txdp++;
3186 txdp->Buffer_Pointer = (u64) pci_map_page
3187 (sp->pdev, frag->page, frag->page_offset,
3188 frag->size, PCI_DMA_TODEVICE);
3189 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
3190 }
3191 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3192
3193 tx_fifo = mac_control->tx_FIFO_start[queue];
20346722 3194 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
1da177e4
LT
3195 writeq(val64, &tx_fifo->TxDL_Pointer);
3196
3197 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3198 TX_FIFO_LAST_LIST);
20346722 3199
1da177e4
LT
3200#ifdef NETIF_F_TSO
3201 if (mss)
3202 val64 |= TX_FIFO_SPECIAL_FUNC;
3203#endif
3204 writeq(val64, &tx_fifo->List_Control);
3205
303bcb4b
K
3206 mmiowb();
3207
1da177e4 3208 put_off++;
20346722
K
3209 put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3210 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
1da177e4
LT
3211
3212 /* Avoid "put" pointer going beyond "get" pointer */
3213 if (((put_off + 1) % queue_len) == get_off) {
3214 DBG_PRINT(TX_DBG,
3215 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3216 put_off, get_off);
3217 netif_stop_queue(dev);
3218 }
3219
3220 dev->trans_start = jiffies;
3221 spin_unlock_irqrestore(&sp->tx_lock, flags);
3222
3223 return 0;
3224}
3225
25fff88e
K
3226static void
3227s2io_alarm_handle(unsigned long data)
3228{
3229 nic_t *sp = (nic_t *)data;
3230
3231 alarm_intr_handler(sp);
3232 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3233}
3234
a371a07d
K
3235static void s2io_txpic_intr_handle(nic_t *sp)
3236{
3237 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) sp->bar0;
3238 u64 val64;
3239
3240 val64 = readq(&bar0->pic_int_status);
3241 if (val64 & PIC_INT_GPIO) {
3242 val64 = readq(&bar0->gpio_int_reg);
3243 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
3244 (val64 & GPIO_INT_REG_LINK_UP)) {
3245 val64 |= GPIO_INT_REG_LINK_DOWN;
3246 val64 |= GPIO_INT_REG_LINK_UP;
3247 writeq(val64, &bar0->gpio_int_reg);
3248 goto masking;
3249 }
3250
3251 if (((sp->last_link_state == LINK_UP) &&
3252 (val64 & GPIO_INT_REG_LINK_DOWN)) ||
3253 ((sp->last_link_state == LINK_DOWN) &&
3254 (val64 & GPIO_INT_REG_LINK_UP))) {
3255 val64 = readq(&bar0->gpio_int_mask);
3256 val64 |= GPIO_INT_MASK_LINK_DOWN;
3257 val64 |= GPIO_INT_MASK_LINK_UP;
3258 writeq(val64, &bar0->gpio_int_mask);
3259 s2io_set_link((unsigned long)sp);
3260 }
3261masking:
3262 if (sp->last_link_state == LINK_UP) {
3263 /*enable down interrupt */
3264 val64 = readq(&bar0->gpio_int_mask);
3265 /* unmasks link down intr */
3266 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
3267 /* masks link up intr */
3268 val64 |= GPIO_INT_MASK_LINK_UP;
3269 writeq(val64, &bar0->gpio_int_mask);
3270 } else {
3271 /*enable UP Interrupt */
3272 val64 = readq(&bar0->gpio_int_mask);
3273 /* unmasks link up interrupt */
3274 val64 &= ~GPIO_INT_MASK_LINK_UP;
3275 /* masks link down interrupt */
3276 val64 |= GPIO_INT_MASK_LINK_DOWN;
3277 writeq(val64, &bar0->gpio_int_mask);
3278 }
3279 }
3280}
3281
1da177e4
LT
3282/**
3283 * s2io_isr - ISR handler of the device .
3284 * @irq: the irq of the device.
3285 * @dev_id: a void pointer to the dev structure of the NIC.
3286 * @pt_regs: pointer to the registers pushed on the stack.
20346722
K
3287 * Description: This function is the ISR handler of the device. It
3288 * identifies the reason for the interrupt and calls the relevant
3289 * service routines. As a contongency measure, this ISR allocates the
1da177e4
LT
3290 * recv buffers, if their numbers are below the panic value which is
3291 * presently set to 25% of the original number of rcv buffers allocated.
3292 * Return value:
20346722 3293 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
1da177e4
LT
3294 * IRQ_NONE: will be returned if interrupt is not from our device
3295 */
3296static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
3297{
3298 struct net_device *dev = (struct net_device *) dev_id;
3299 nic_t *sp = dev->priv;
3300 XENA_dev_config_t __iomem *bar0 = sp->bar0;
20346722 3301 int i;
fe113638 3302 u64 reason = 0, val64;
1da177e4
LT
3303 mac_info_t *mac_control;
3304 struct config_param *config;
3305
7ba013ac 3306 atomic_inc(&sp->isr_cnt);
1da177e4
LT
3307 mac_control = &sp->mac_control;
3308 config = &sp->config;
3309
20346722 3310 /*
1da177e4
LT
3311 * Identify the cause for interrupt and call the appropriate
3312 * interrupt handler. Causes for the interrupt could be;
3313 * 1. Rx of packet.
3314 * 2. Tx complete.
3315 * 3. Link down.
20346722 3316 * 4. Error in any functional blocks of the NIC.
1da177e4
LT
3317 */
3318 reason = readq(&bar0->general_int_status);
3319
3320 if (!reason) {
3321 /* The interrupt was not raised by Xena. */
7ba013ac 3322 atomic_dec(&sp->isr_cnt);
1da177e4
LT
3323 return IRQ_NONE;
3324 }
3325
1da177e4
LT
3326#ifdef CONFIG_S2IO_NAPI
3327 if (reason & GEN_INTR_RXTRAFFIC) {
3328 if (netif_rx_schedule_prep(dev)) {
3329 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
3330 DISABLE_INTRS);
3331 __netif_rx_schedule(dev);
3332 }
3333 }
3334#else
3335 /* If Intr is because of Rx Traffic */
3336 if (reason & GEN_INTR_RXTRAFFIC) {
fe113638
K
3337 /*
3338 * rx_traffic_int reg is an R1 register, writing all 1's
3339 * will ensure that the actual interrupt causing bit get's
3340 * cleared and hence a read can be avoided.
3341 */
3342 val64 = 0xFFFFFFFFFFFFFFFFULL;
3343 writeq(val64, &bar0->rx_traffic_int);
20346722
K
3344 for (i = 0; i < config->rx_ring_num; i++) {
3345 rx_intr_handler(&mac_control->rings[i]);
3346 }
1da177e4
LT
3347 }
3348#endif
3349
20346722
K
3350 /* If Intr is because of Tx Traffic */
3351 if (reason & GEN_INTR_TXTRAFFIC) {
fe113638
K
3352 /*
3353 * tx_traffic_int reg is an R1 register, writing all 1's
3354 * will ensure that the actual interrupt causing bit get's
3355 * cleared and hence a read can be avoided.
3356 */
3357 val64 = 0xFFFFFFFFFFFFFFFFULL;
3358 writeq(val64, &bar0->tx_traffic_int);
3359
20346722
K
3360 for (i = 0; i < config->tx_fifo_num; i++)
3361 tx_intr_handler(&mac_control->fifos[i]);
3362 }
3363
a371a07d
K
3364 if (reason & GEN_INTR_TXPIC)
3365 s2io_txpic_intr_handle(sp);
20346722
K
3366 /*
3367 * If the Rx buffer count is below the panic threshold then
3368 * reallocate the buffers from the interrupt handler itself,
1da177e4
LT
3369 * else schedule a tasklet to reallocate the buffers.
3370 */
3371#ifndef CONFIG_S2IO_NAPI
3372 for (i = 0; i < config->rx_ring_num; i++) {
20346722 3373 int ret;
1da177e4
LT
3374 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3375 int level = rx_buffer_level(sp, rxb_size, i);
3376
3377 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3378 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3379 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3380 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3381 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3382 dev->name);
3383 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3384 clear_bit(0, (&sp->tasklet_status));
7ba013ac 3385 atomic_dec(&sp->isr_cnt);
1da177e4
LT
3386 return IRQ_HANDLED;
3387 }
3388 clear_bit(0, (&sp->tasklet_status));
3389 } else if (level == LOW) {
3390 tasklet_schedule(&sp->task);
3391 }
3392 }
3393#endif
3394
7ba013ac 3395 atomic_dec(&sp->isr_cnt);
1da177e4
LT
3396 return IRQ_HANDLED;
3397}
3398
7ba013ac
K
3399/**
3400 * s2io_updt_stats -
3401 */
3402static void s2io_updt_stats(nic_t *sp)
3403{
3404 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3405 u64 val64;
3406 int cnt = 0;
3407
3408 if (atomic_read(&sp->card_state) == CARD_UP) {
3409 /* Apprx 30us on a 133 MHz bus */
3410 val64 = SET_UPDT_CLICKS(10) |
3411 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3412 writeq(val64, &bar0->stat_cfg);
3413 do {
3414 udelay(100);
3415 val64 = readq(&bar0->stat_cfg);
3416 if (!(val64 & BIT(0)))
3417 break;
3418 cnt++;
3419 if (cnt == 5)
3420 break; /* Updt failed */
3421 } while(1);
3422 }
3423}
3424
1da177e4 3425/**
20346722 3426 * s2io_get_stats - Updates the device statistics structure.
1da177e4
LT
3427 * @dev : pointer to the device structure.
3428 * Description:
20346722 3429 * This function updates the device statistics structure in the s2io_nic
1da177e4
LT
3430 * structure and returns a pointer to the same.
3431 * Return value:
3432 * pointer to the updated net_device_stats structure.
3433 */
3434
20346722 3435struct net_device_stats *s2io_get_stats(struct net_device *dev)
1da177e4
LT
3436{
3437 nic_t *sp = dev->priv;
3438 mac_info_t *mac_control;
3439 struct config_param *config;
3440
20346722 3441
1da177e4
LT
3442 mac_control = &sp->mac_control;
3443 config = &sp->config;
3444
7ba013ac
K
3445 /* Configure Stats for immediate updt */
3446 s2io_updt_stats(sp);
3447
3448 sp->stats.tx_packets =
3449 le32_to_cpu(mac_control->stats_info->tmac_frms);
20346722
K
3450 sp->stats.tx_errors =
3451 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3452 sp->stats.rx_errors =
3453 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3454 sp->stats.multicast =
3455 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
1da177e4 3456 sp->stats.rx_length_errors =
20346722 3457 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
1da177e4
LT
3458
3459 return (&sp->stats);
3460}
3461
3462/**
3463 * s2io_set_multicast - entry point for multicast address enable/disable.
3464 * @dev : pointer to the device structure
3465 * Description:
20346722
K
3466 * This function is a driver entry point which gets called by the kernel
3467 * whenever multicast addresses must be enabled/disabled. This also gets
1da177e4
LT
3468 * called to set/reset promiscuous mode. Depending on the deivce flag, we
3469 * determine, if multicast address must be enabled or if promiscuous mode
3470 * is to be disabled etc.
3471 * Return value:
3472 * void.
3473 */
3474
3475static void s2io_set_multicast(struct net_device *dev)
3476{
3477 int i, j, prev_cnt;
3478 struct dev_mc_list *mclist;
3479 nic_t *sp = dev->priv;
3480 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3481 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3482 0xfeffffffffffULL;
3483 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3484 void __iomem *add;
3485
3486 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3487 /* Enable all Multicast addresses */
3488 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3489 &bar0->rmac_addr_data0_mem);
3490 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3491 &bar0->rmac_addr_data1_mem);
3492 val64 = RMAC_ADDR_CMD_MEM_WE |
3493 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3494 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3495 writeq(val64, &bar0->rmac_addr_cmd_mem);
3496 /* Wait till command completes */
3497 wait_for_cmd_complete(sp);
3498
3499 sp->m_cast_flg = 1;
3500 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3501 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3502 /* Disable all Multicast addresses */
3503 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3504 &bar0->rmac_addr_data0_mem);
5e25b9dd
K
3505 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3506 &bar0->rmac_addr_data1_mem);
1da177e4
LT
3507 val64 = RMAC_ADDR_CMD_MEM_WE |
3508 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3509 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3510 writeq(val64, &bar0->rmac_addr_cmd_mem);
3511 /* Wait till command completes */
3512 wait_for_cmd_complete(sp);
3513
3514 sp->m_cast_flg = 0;
3515 sp->all_multi_pos = 0;
3516 }
3517
3518 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3519 /* Put the NIC into promiscuous mode */
3520 add = &bar0->mac_cfg;
3521 val64 = readq(&bar0->mac_cfg);
3522 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3523
3524 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3525 writel((u32) val64, add);
3526 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3527 writel((u32) (val64 >> 32), (add + 4));
3528
3529 val64 = readq(&bar0->mac_cfg);
3530 sp->promisc_flg = 1;
3531 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
3532 dev->name);
3533 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3534 /* Remove the NIC from promiscuous mode */
3535 add = &bar0->mac_cfg;
3536 val64 = readq(&bar0->mac_cfg);
3537 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3538
3539 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3540 writel((u32) val64, add);
3541 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3542 writel((u32) (val64 >> 32), (add + 4));
3543
3544 val64 = readq(&bar0->mac_cfg);
3545 sp->promisc_flg = 0;
3546 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
3547 dev->name);
3548 }
3549
3550 /* Update individual M_CAST address list */
3551 if ((!sp->m_cast_flg) && dev->mc_count) {
3552 if (dev->mc_count >
3553 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3554 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3555 dev->name);
3556 DBG_PRINT(ERR_DBG, "can be added, please enable ");
3557 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3558 return;
3559 }
3560
3561 prev_cnt = sp->mc_addr_count;
3562 sp->mc_addr_count = dev->mc_count;
3563
3564 /* Clear out the previous list of Mc in the H/W. */
3565 for (i = 0; i < prev_cnt; i++) {
3566 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3567 &bar0->rmac_addr_data0_mem);
3568 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 3569 &bar0->rmac_addr_data1_mem);
1da177e4
LT
3570 val64 = RMAC_ADDR_CMD_MEM_WE |
3571 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3572 RMAC_ADDR_CMD_MEM_OFFSET
3573 (MAC_MC_ADDR_START_OFFSET + i);
3574 writeq(val64, &bar0->rmac_addr_cmd_mem);
3575
3576 /* Wait for command completes */
3577 if (wait_for_cmd_complete(sp)) {
3578 DBG_PRINT(ERR_DBG, "%s: Adding ",
3579 dev->name);
3580 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3581 return;
3582 }
3583 }
3584
3585 /* Create the new Rx filter list and update the same in H/W. */
3586 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3587 i++, mclist = mclist->next) {
3588 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3589 ETH_ALEN);
3590 for (j = 0; j < ETH_ALEN; j++) {
3591 mac_addr |= mclist->dmi_addr[j];
3592 mac_addr <<= 8;
3593 }
3594 mac_addr >>= 8;
3595 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3596 &bar0->rmac_addr_data0_mem);
3597 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 3598 &bar0->rmac_addr_data1_mem);
1da177e4
LT
3599 val64 = RMAC_ADDR_CMD_MEM_WE |
3600 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3601 RMAC_ADDR_CMD_MEM_OFFSET
3602 (i + MAC_MC_ADDR_START_OFFSET);
3603 writeq(val64, &bar0->rmac_addr_cmd_mem);
3604
3605 /* Wait for command completes */
3606 if (wait_for_cmd_complete(sp)) {
3607 DBG_PRINT(ERR_DBG, "%s: Adding ",
3608 dev->name);
3609 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3610 return;
3611 }
3612 }
3613 }
3614}
3615
3616/**
20346722 3617 * s2io_set_mac_addr - Programs the Xframe mac address
1da177e4
LT
3618 * @dev : pointer to the device structure.
3619 * @addr: a uchar pointer to the new mac address which is to be set.
20346722 3620 * Description : This procedure will program the Xframe to receive
1da177e4 3621 * frames with new Mac Address
20346722 3622 * Return value: SUCCESS on success and an appropriate (-)ve integer
1da177e4
LT
3623 * as defined in errno.h file on failure.
3624 */
3625
3626int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3627{
3628 nic_t *sp = dev->priv;
3629 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3630 register u64 val64, mac_addr = 0;
3631 int i;
3632
20346722 3633 /*
1da177e4
LT
3634 * Set the new MAC address as the new unicast filter and reflect this
3635 * change on the device address registered with the OS. It will be
20346722 3636 * at offset 0.
1da177e4
LT
3637 */
3638 for (i = 0; i < ETH_ALEN; i++) {
3639 mac_addr <<= 8;
3640 mac_addr |= addr[i];
3641 }
3642
3643 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3644 &bar0->rmac_addr_data0_mem);
3645
3646 val64 =
3647 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3648 RMAC_ADDR_CMD_MEM_OFFSET(0);
3649 writeq(val64, &bar0->rmac_addr_cmd_mem);
3650 /* Wait till command completes */
3651 if (wait_for_cmd_complete(sp)) {
3652 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3653 return FAILURE;
3654 }
3655
3656 return SUCCESS;
3657}
3658
3659/**
20346722 3660 * s2io_ethtool_sset - Sets different link parameters.
1da177e4
LT
3661 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3662 * @info: pointer to the structure with parameters given by ethtool to set
3663 * link information.
3664 * Description:
20346722 3665 * The function sets different link parameters provided by the user onto
1da177e4
LT
3666 * the NIC.
3667 * Return value:
3668 * 0 on success.
3669*/
3670
3671static int s2io_ethtool_sset(struct net_device *dev,
3672 struct ethtool_cmd *info)
3673{
3674 nic_t *sp = dev->priv;
3675 if ((info->autoneg == AUTONEG_ENABLE) ||
3676 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3677 return -EINVAL;
3678 else {
3679 s2io_close(sp->dev);
3680 s2io_open(sp->dev);
3681 }
3682
3683 return 0;
3684}
3685
3686/**
20346722 3687 * s2io_ethtol_gset - Return link specific information.
1da177e4
LT
3688 * @sp : private member of the device structure, pointer to the
3689 * s2io_nic structure.
3690 * @info : pointer to the structure with parameters given by ethtool
3691 * to return link information.
3692 * Description:
3693 * Returns link specific information like speed, duplex etc.. to ethtool.
3694 * Return value :
3695 * return 0 on success.
3696 */
3697
3698static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3699{
3700 nic_t *sp = dev->priv;
3701 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3702 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3703 info->port = PORT_FIBRE;
3704 /* info->transceiver?? TODO */
3705
3706 if (netif_carrier_ok(sp->dev)) {
3707 info->speed = 10000;
3708 info->duplex = DUPLEX_FULL;
3709 } else {
3710 info->speed = -1;
3711 info->duplex = -1;
3712 }
3713
3714 info->autoneg = AUTONEG_DISABLE;
3715 return 0;
3716}
3717
3718/**
20346722
K
3719 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3720 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
3721 * s2io_nic structure.
3722 * @info : pointer to the structure with parameters given by ethtool to
3723 * return driver information.
3724 * Description:
3725 * Returns driver specefic information like name, version etc.. to ethtool.
3726 * Return value:
3727 * void
3728 */
3729
3730static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3731 struct ethtool_drvinfo *info)
3732{
3733 nic_t *sp = dev->priv;
3734
3735 strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3736 strncpy(info->version, s2io_driver_version,
3737 sizeof(s2io_driver_version));
3738 strncpy(info->fw_version, "", 32);
3739 strncpy(info->bus_info, pci_name(sp->pdev), 32);
3740 info->regdump_len = XENA_REG_SPACE;
3741 info->eedump_len = XENA_EEPROM_SPACE;
3742 info->testinfo_len = S2IO_TEST_LEN;
3743 info->n_stats = S2IO_STAT_LEN;
3744}
3745
3746/**
3747 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
20346722 3748 * @sp: private member of the device structure, which is a pointer to the
1da177e4 3749 * s2io_nic structure.
20346722 3750 * @regs : pointer to the structure with parameters given by ethtool for
1da177e4
LT
3751 * dumping the registers.
3752 * @reg_space: The input argumnet into which all the registers are dumped.
3753 * Description:
3754 * Dumps the entire register space of xFrame NIC into the user given
3755 * buffer area.
3756 * Return value :
3757 * void .
3758*/
3759
3760static void s2io_ethtool_gregs(struct net_device *dev,
3761 struct ethtool_regs *regs, void *space)
3762{
3763 int i;
3764 u64 reg;
3765 u8 *reg_space = (u8 *) space;
3766 nic_t *sp = dev->priv;
3767
3768 regs->len = XENA_REG_SPACE;
3769 regs->version = sp->pdev->subsystem_device;
3770
3771 for (i = 0; i < regs->len; i += 8) {
3772 reg = readq(sp->bar0 + i);
3773 memcpy((reg_space + i), &reg, 8);
3774 }
3775}
3776
3777/**
3778 * s2io_phy_id - timer function that alternates adapter LED.
20346722 3779 * @data : address of the private member of the device structure, which
1da177e4 3780 * is a pointer to the s2io_nic structure, provided as an u32.
20346722
K
3781 * Description: This is actually the timer function that alternates the
3782 * adapter LED bit of the adapter control bit to set/reset every time on
3783 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
1da177e4
LT
3784 * once every second.
3785*/
3786static void s2io_phy_id(unsigned long data)
3787{
3788 nic_t *sp = (nic_t *) data;
3789 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3790 u64 val64 = 0;
3791 u16 subid;
3792
3793 subid = sp->pdev->subsystem_device;
541ae68f
K
3794 if ((sp->device_type == XFRAME_II_DEVICE) ||
3795 ((subid & 0xFF) >= 0x07)) {
1da177e4
LT
3796 val64 = readq(&bar0->gpio_control);
3797 val64 ^= GPIO_CTRL_GPIO_0;
3798 writeq(val64, &bar0->gpio_control);
3799 } else {
3800 val64 = readq(&bar0->adapter_control);
3801 val64 ^= ADAPTER_LED_ON;
3802 writeq(val64, &bar0->adapter_control);
3803 }
3804
3805 mod_timer(&sp->id_timer, jiffies + HZ / 2);
3806}
3807
3808/**
3809 * s2io_ethtool_idnic - To physically identify the nic on the system.
3810 * @sp : private member of the device structure, which is a pointer to the
3811 * s2io_nic structure.
20346722 3812 * @id : pointer to the structure with identification parameters given by
1da177e4
LT
3813 * ethtool.
3814 * Description: Used to physically identify the NIC on the system.
20346722 3815 * The Link LED will blink for a time specified by the user for
1da177e4 3816 * identification.
20346722 3817 * NOTE: The Link has to be Up to be able to blink the LED. Hence
1da177e4
LT
3818 * identification is possible only if it's link is up.
3819 * Return value:
3820 * int , returns 0 on success
3821 */
3822
3823static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3824{
3825 u64 val64 = 0, last_gpio_ctrl_val;
3826 nic_t *sp = dev->priv;
3827 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3828 u16 subid;
3829
3830 subid = sp->pdev->subsystem_device;
3831 last_gpio_ctrl_val = readq(&bar0->gpio_control);
541ae68f
K
3832 if ((sp->device_type == XFRAME_I_DEVICE) &&
3833 ((subid & 0xFF) < 0x07)) {
1da177e4
LT
3834 val64 = readq(&bar0->adapter_control);
3835 if (!(val64 & ADAPTER_CNTL_EN)) {
3836 printk(KERN_ERR
3837 "Adapter Link down, cannot blink LED\n");
3838 return -EFAULT;
3839 }
3840 }
3841 if (sp->id_timer.function == NULL) {
3842 init_timer(&sp->id_timer);
3843 sp->id_timer.function = s2io_phy_id;
3844 sp->id_timer.data = (unsigned long) sp;
3845 }
3846 mod_timer(&sp->id_timer, jiffies);
3847 if (data)
20346722 3848 msleep_interruptible(data * HZ);
1da177e4 3849 else
20346722 3850 msleep_interruptible(MAX_FLICKER_TIME);
1da177e4
LT
3851 del_timer_sync(&sp->id_timer);
3852
541ae68f 3853 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
1da177e4
LT
3854 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3855 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3856 }
3857
3858 return 0;
3859}
3860
3861/**
3862 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
20346722
K
3863 * @sp : private member of the device structure, which is a pointer to the
3864 * s2io_nic structure.
1da177e4
LT
3865 * @ep : pointer to the structure with pause parameters given by ethtool.
3866 * Description:
3867 * Returns the Pause frame generation and reception capability of the NIC.
3868 * Return value:
3869 * void
3870 */
3871static void s2io_ethtool_getpause_data(struct net_device *dev,
3872 struct ethtool_pauseparam *ep)
3873{
3874 u64 val64;
3875 nic_t *sp = dev->priv;
3876 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3877
3878 val64 = readq(&bar0->rmac_pause_cfg);
3879 if (val64 & RMAC_PAUSE_GEN_ENABLE)
3880 ep->tx_pause = TRUE;
3881 if (val64 & RMAC_PAUSE_RX_ENABLE)
3882 ep->rx_pause = TRUE;
3883 ep->autoneg = FALSE;
3884}
3885
3886/**
3887 * s2io_ethtool_setpause_data - set/reset pause frame generation.
20346722 3888 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
3889 * s2io_nic structure.
3890 * @ep : pointer to the structure with pause parameters given by ethtool.
3891 * Description:
3892 * It can be used to set or reset Pause frame generation or reception
3893 * support of the NIC.
3894 * Return value:
3895 * int, returns 0 on Success
3896 */
3897
3898static int s2io_ethtool_setpause_data(struct net_device *dev,
20346722 3899 struct ethtool_pauseparam *ep)
1da177e4
LT
3900{
3901 u64 val64;
3902 nic_t *sp = dev->priv;
3903 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3904
3905 val64 = readq(&bar0->rmac_pause_cfg);
3906 if (ep->tx_pause)
3907 val64 |= RMAC_PAUSE_GEN_ENABLE;
3908 else
3909 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3910 if (ep->rx_pause)
3911 val64 |= RMAC_PAUSE_RX_ENABLE;
3912 else
3913 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3914 writeq(val64, &bar0->rmac_pause_cfg);
3915 return 0;
3916}
3917
3918/**
3919 * read_eeprom - reads 4 bytes of data from user given offset.
20346722 3920 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
3921 * s2io_nic structure.
3922 * @off : offset at which the data must be written
3923 * @data : Its an output parameter where the data read at the given
20346722 3924 * offset is stored.
1da177e4 3925 * Description:
20346722 3926 * Will read 4 bytes of data from the user given offset and return the
1da177e4
LT
3927 * read data.
3928 * NOTE: Will allow to read only part of the EEPROM visible through the
3929 * I2C bus.
3930 * Return value:
3931 * -1 on failure and 0 on success.
3932 */
3933
3934#define S2IO_DEV_ID 5
3935static int read_eeprom(nic_t * sp, int off, u32 * data)
3936{
3937 int ret = -1;
3938 u32 exit_cnt = 0;
3939 u64 val64;
3940 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3941
3942 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3943 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3944 I2C_CONTROL_CNTL_START;
3945 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3946
3947 while (exit_cnt < 5) {
3948 val64 = readq(&bar0->i2c_control);
3949 if (I2C_CONTROL_CNTL_END(val64)) {
3950 *data = I2C_CONTROL_GET_DATA(val64);
3951 ret = 0;
3952 break;
3953 }
3954 msleep(50);
3955 exit_cnt++;
3956 }
3957
3958 return ret;
3959}
3960
3961/**
3962 * write_eeprom - actually writes the relevant part of the data value.
3963 * @sp : private member of the device structure, which is a pointer to the
3964 * s2io_nic structure.
3965 * @off : offset at which the data must be written
3966 * @data : The data that is to be written
20346722 3967 * @cnt : Number of bytes of the data that are actually to be written into
1da177e4
LT
3968 * the Eeprom. (max of 3)
3969 * Description:
3970 * Actually writes the relevant part of the data value into the Eeprom
3971 * through the I2C bus.
3972 * Return value:
3973 * 0 on success, -1 on failure.
3974 */
3975
3976static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3977{
3978 int exit_cnt = 0, ret = -1;
3979 u64 val64;
3980 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3981
3982 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3983 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3984 I2C_CONTROL_CNTL_START;
3985 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3986
3987 while (exit_cnt < 5) {
3988 val64 = readq(&bar0->i2c_control);
3989 if (I2C_CONTROL_CNTL_END(val64)) {
3990 if (!(val64 & I2C_CONTROL_NACK))
3991 ret = 0;
3992 break;
3993 }
3994 msleep(50);
3995 exit_cnt++;
3996 }
3997
3998 return ret;
3999}
4000
4001/**
4002 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
4003 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
20346722 4004 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
4005 * containing all relevant information.
4006 * @data_buf : user defined value to be written into Eeprom.
4007 * Description: Reads the values stored in the Eeprom at given offset
4008 * for a given length. Stores these values int the input argument data
4009 * buffer 'data_buf' and returns these to the caller (ethtool.)
4010 * Return value:
4011 * int 0 on success
4012 */
4013
4014static int s2io_ethtool_geeprom(struct net_device *dev,
20346722 4015 struct ethtool_eeprom *eeprom, u8 * data_buf)
1da177e4
LT
4016{
4017 u32 data, i, valid;
4018 nic_t *sp = dev->priv;
4019
4020 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
4021
4022 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
4023 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
4024
4025 for (i = 0; i < eeprom->len; i += 4) {
4026 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
4027 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
4028 return -EFAULT;
4029 }
4030 valid = INV(data);
4031 memcpy((data_buf + i), &valid, 4);
4032 }
4033 return 0;
4034}
4035
4036/**
4037 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
4038 * @sp : private member of the device structure, which is a pointer to the
4039 * s2io_nic structure.
20346722 4040 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
4041 * containing all relevant information.
4042 * @data_buf ; user defined value to be written into Eeprom.
4043 * Description:
4044 * Tries to write the user provided value in the Eeprom, at the offset
4045 * given by the user.
4046 * Return value:
4047 * 0 on success, -EFAULT on failure.
4048 */
4049
4050static int s2io_ethtool_seeprom(struct net_device *dev,
4051 struct ethtool_eeprom *eeprom,
4052 u8 * data_buf)
4053{
4054 int len = eeprom->len, cnt = 0;
4055 u32 valid = 0, data;
4056 nic_t *sp = dev->priv;
4057
4058 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
4059 DBG_PRINT(ERR_DBG,
4060 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
4061 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
4062 eeprom->magic);
4063 return -EFAULT;
4064 }
4065
4066 while (len) {
4067 data = (u32) data_buf[cnt] & 0x000000FF;
4068 if (data) {
4069 valid = (u32) (data << 24);
4070 } else
4071 valid = data;
4072
4073 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
4074 DBG_PRINT(ERR_DBG,
4075 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
4076 DBG_PRINT(ERR_DBG,
4077 "write into the specified offset\n");
4078 return -EFAULT;
4079 }
4080 cnt++;
4081 len--;
4082 }
4083
4084 return 0;
4085}
4086
4087/**
20346722
K
4088 * s2io_register_test - reads and writes into all clock domains.
4089 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
4090 * s2io_nic structure.
4091 * @data : variable that returns the result of each of the test conducted b
4092 * by the driver.
4093 * Description:
4094 * Read and write into all clock domains. The NIC has 3 clock domains,
4095 * see that registers in all the three regions are accessible.
4096 * Return value:
4097 * 0 on success.
4098 */
4099
4100static int s2io_register_test(nic_t * sp, uint64_t * data)
4101{
4102 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4103 u64 val64 = 0;
4104 int fail = 0;
4105
20346722
K
4106 val64 = readq(&bar0->pif_rd_swapper_fb);
4107 if (val64 != 0x123456789abcdefULL) {
1da177e4
LT
4108 fail = 1;
4109 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
4110 }
4111
4112 val64 = readq(&bar0->rmac_pause_cfg);
4113 if (val64 != 0xc000ffff00000000ULL) {
4114 fail = 1;
4115 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
4116 }
4117
4118 val64 = readq(&bar0->rx_queue_cfg);
4119 if (val64 != 0x0808080808080808ULL) {
4120 fail = 1;
4121 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
4122 }
4123
4124 val64 = readq(&bar0->xgxs_efifo_cfg);
4125 if (val64 != 0x000000001923141EULL) {
4126 fail = 1;
4127 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
4128 }
4129
4130 val64 = 0x5A5A5A5A5A5A5A5AULL;
4131 writeq(val64, &bar0->xmsi_data);
4132 val64 = readq(&bar0->xmsi_data);
4133 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
4134 fail = 1;
4135 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
4136 }
4137
4138 val64 = 0xA5A5A5A5A5A5A5A5ULL;
4139 writeq(val64, &bar0->xmsi_data);
4140 val64 = readq(&bar0->xmsi_data);
4141 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
4142 fail = 1;
4143 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
4144 }
4145
4146 *data = fail;
4147 return 0;
4148}
4149
4150/**
20346722 4151 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
1da177e4
LT
4152 * @sp : private member of the device structure, which is a pointer to the
4153 * s2io_nic structure.
4154 * @data:variable that returns the result of each of the test conducted by
4155 * the driver.
4156 * Description:
20346722 4157 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
1da177e4
LT
4158 * register.
4159 * Return value:
4160 * 0 on success.
4161 */
4162
4163static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
4164{
4165 int fail = 0;
4166 u32 ret_data;
4167
4168 /* Test Write Error at offset 0 */
4169 if (!write_eeprom(sp, 0, 0, 3))
4170 fail = 1;
4171
4172 /* Test Write at offset 4f0 */
4173 if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
4174 fail = 1;
4175 if (read_eeprom(sp, 0x4F0, &ret_data))
4176 fail = 1;
4177
4178 if (ret_data != 0x01234567)
4179 fail = 1;
4180
4181 /* Reset the EEPROM data go FFFF */
4182 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
4183
4184 /* Test Write Request Error at offset 0x7c */
4185 if (!write_eeprom(sp, 0x07C, 0, 3))
4186 fail = 1;
4187
4188 /* Test Write Request at offset 0x7fc */
4189 if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
4190 fail = 1;
4191 if (read_eeprom(sp, 0x7FC, &ret_data))
4192 fail = 1;
4193
4194 if (ret_data != 0x01234567)
4195 fail = 1;
4196
4197 /* Reset the EEPROM data go FFFF */
4198 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
4199
4200 /* Test Write Error at offset 0x80 */
4201 if (!write_eeprom(sp, 0x080, 0, 3))
4202 fail = 1;
4203
4204 /* Test Write Error at offset 0xfc */
4205 if (!write_eeprom(sp, 0x0FC, 0, 3))
4206 fail = 1;
4207
4208 /* Test Write Error at offset 0x100 */
4209 if (!write_eeprom(sp, 0x100, 0, 3))
4210 fail = 1;
4211
4212 /* Test Write Error at offset 4ec */
4213 if (!write_eeprom(sp, 0x4EC, 0, 3))
4214 fail = 1;
4215
4216 *data = fail;
4217 return 0;
4218}
4219
4220/**
4221 * s2io_bist_test - invokes the MemBist test of the card .
20346722 4222 * @sp : private member of the device structure, which is a pointer to the
1da177e4 4223 * s2io_nic structure.
20346722 4224 * @data:variable that returns the result of each of the test conducted by
1da177e4
LT
4225 * the driver.
4226 * Description:
4227 * This invokes the MemBist test of the card. We give around
4228 * 2 secs time for the Test to complete. If it's still not complete
20346722 4229 * within this peiod, we consider that the test failed.
1da177e4
LT
4230 * Return value:
4231 * 0 on success and -1 on failure.
4232 */
4233
4234static int s2io_bist_test(nic_t * sp, uint64_t * data)
4235{
4236 u8 bist = 0;
4237 int cnt = 0, ret = -1;
4238
4239 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4240 bist |= PCI_BIST_START;
4241 pci_write_config_word(sp->pdev, PCI_BIST, bist);
4242
4243 while (cnt < 20) {
4244 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4245 if (!(bist & PCI_BIST_START)) {
4246 *data = (bist & PCI_BIST_CODE_MASK);
4247 ret = 0;
4248 break;
4249 }
4250 msleep(100);
4251 cnt++;
4252 }
4253
4254 return ret;
4255}
4256
4257/**
20346722
K
4258 * s2io-link_test - verifies the link state of the nic
4259 * @sp ; private member of the device structure, which is a pointer to the
1da177e4
LT
4260 * s2io_nic structure.
4261 * @data: variable that returns the result of each of the test conducted by
4262 * the driver.
4263 * Description:
20346722 4264 * The function verifies the link state of the NIC and updates the input
1da177e4
LT
4265 * argument 'data' appropriately.
4266 * Return value:
4267 * 0 on success.
4268 */
4269
4270static int s2io_link_test(nic_t * sp, uint64_t * data)
4271{
4272 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4273 u64 val64;
4274
4275 val64 = readq(&bar0->adapter_status);
4276 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
4277 *data = 1;
4278
4279 return 0;
4280}
4281
4282/**
20346722
K
4283 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
4284 * @sp - private member of the device structure, which is a pointer to the
1da177e4 4285 * s2io_nic structure.
20346722 4286 * @data - variable that returns the result of each of the test
1da177e4
LT
4287 * conducted by the driver.
4288 * Description:
20346722 4289 * This is one of the offline test that tests the read and write
1da177e4
LT
4290 * access to the RldRam chip on the NIC.
4291 * Return value:
4292 * 0 on success.
4293 */
4294
4295static int s2io_rldram_test(nic_t * sp, uint64_t * data)
4296{
4297 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4298 u64 val64;
4299 int cnt, iteration = 0, test_pass = 0;
4300
4301 val64 = readq(&bar0->adapter_control);
4302 val64 &= ~ADAPTER_ECC_EN;
4303 writeq(val64, &bar0->adapter_control);
4304
4305 val64 = readq(&bar0->mc_rldram_test_ctrl);
4306 val64 |= MC_RLDRAM_TEST_MODE;
4307 writeq(val64, &bar0->mc_rldram_test_ctrl);
4308
4309 val64 = readq(&bar0->mc_rldram_mrs);
4310 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
4311 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4312
4313 val64 |= MC_RLDRAM_MRS_ENABLE;
4314 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4315
4316 while (iteration < 2) {
4317 val64 = 0x55555555aaaa0000ULL;
4318 if (iteration == 1) {
4319 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4320 }
4321 writeq(val64, &bar0->mc_rldram_test_d0);
4322
4323 val64 = 0xaaaa5a5555550000ULL;
4324 if (iteration == 1) {
4325 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4326 }
4327 writeq(val64, &bar0->mc_rldram_test_d1);
4328
4329 val64 = 0x55aaaaaaaa5a0000ULL;
4330 if (iteration == 1) {
4331 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4332 }
4333 writeq(val64, &bar0->mc_rldram_test_d2);
4334
4335 val64 = (u64) (0x0000003fffff0000ULL);
4336 writeq(val64, &bar0->mc_rldram_test_add);
4337
4338
4339 val64 = MC_RLDRAM_TEST_MODE;
4340 writeq(val64, &bar0->mc_rldram_test_ctrl);
4341
4342 val64 |=
4343 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
4344 MC_RLDRAM_TEST_GO;
4345 writeq(val64, &bar0->mc_rldram_test_ctrl);
4346
4347 for (cnt = 0; cnt < 5; cnt++) {
4348 val64 = readq(&bar0->mc_rldram_test_ctrl);
4349 if (val64 & MC_RLDRAM_TEST_DONE)
4350 break;
4351 msleep(200);
4352 }
4353
4354 if (cnt == 5)
4355 break;
4356
4357 val64 = MC_RLDRAM_TEST_MODE;
4358 writeq(val64, &bar0->mc_rldram_test_ctrl);
4359
4360 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
4361 writeq(val64, &bar0->mc_rldram_test_ctrl);
4362
4363 for (cnt = 0; cnt < 5; cnt++) {
4364 val64 = readq(&bar0->mc_rldram_test_ctrl);
4365 if (val64 & MC_RLDRAM_TEST_DONE)
4366 break;
4367 msleep(500);
4368 }
4369
4370 if (cnt == 5)
4371 break;
4372
4373 val64 = readq(&bar0->mc_rldram_test_ctrl);
4374 if (val64 & MC_RLDRAM_TEST_PASS)
4375 test_pass = 1;
4376
4377 iteration++;
4378 }
4379
4380 if (!test_pass)
4381 *data = 1;
4382 else
4383 *data = 0;
4384
4385 return 0;
4386}
4387
4388/**
4389 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4390 * @sp : private member of the device structure, which is a pointer to the
4391 * s2io_nic structure.
4392 * @ethtest : pointer to a ethtool command specific structure that will be
4393 * returned to the user.
20346722 4394 * @data : variable that returns the result of each of the test
1da177e4
LT
4395 * conducted by the driver.
4396 * Description:
4397 * This function conducts 6 tests ( 4 offline and 2 online) to determine
4398 * the health of the card.
4399 * Return value:
4400 * void
4401 */
4402
4403static void s2io_ethtool_test(struct net_device *dev,
4404 struct ethtool_test *ethtest,
4405 uint64_t * data)
4406{
4407 nic_t *sp = dev->priv;
4408 int orig_state = netif_running(sp->dev);
4409
4410 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4411 /* Offline Tests. */
20346722 4412 if (orig_state)
1da177e4 4413 s2io_close(sp->dev);
1da177e4
LT
4414
4415 if (s2io_register_test(sp, &data[0]))
4416 ethtest->flags |= ETH_TEST_FL_FAILED;
4417
4418 s2io_reset(sp);
1da177e4
LT
4419
4420 if (s2io_rldram_test(sp, &data[3]))
4421 ethtest->flags |= ETH_TEST_FL_FAILED;
4422
4423 s2io_reset(sp);
1da177e4
LT
4424
4425 if (s2io_eeprom_test(sp, &data[1]))
4426 ethtest->flags |= ETH_TEST_FL_FAILED;
4427
4428 if (s2io_bist_test(sp, &data[4]))
4429 ethtest->flags |= ETH_TEST_FL_FAILED;
4430
4431 if (orig_state)
4432 s2io_open(sp->dev);
4433
4434 data[2] = 0;
4435 } else {
4436 /* Online Tests. */
4437 if (!orig_state) {
4438 DBG_PRINT(ERR_DBG,
4439 "%s: is not up, cannot run test\n",
4440 dev->name);
4441 data[0] = -1;
4442 data[1] = -1;
4443 data[2] = -1;
4444 data[3] = -1;
4445 data[4] = -1;
4446 }
4447
4448 if (s2io_link_test(sp, &data[2]))
4449 ethtest->flags |= ETH_TEST_FL_FAILED;
4450
4451 data[0] = 0;
4452 data[1] = 0;
4453 data[3] = 0;
4454 data[4] = 0;
4455 }
4456}
4457
4458static void s2io_get_ethtool_stats(struct net_device *dev,
4459 struct ethtool_stats *estats,
4460 u64 * tmp_stats)
4461{
4462 int i = 0;
4463 nic_t *sp = dev->priv;
4464 StatInfo_t *stat_info = sp->mac_control.stats_info;
4465
7ba013ac 4466 s2io_updt_stats(sp);
541ae68f
K
4467 tmp_stats[i++] =
4468 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
4469 le32_to_cpu(stat_info->tmac_frms);
4470 tmp_stats[i++] =
4471 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
4472 le32_to_cpu(stat_info->tmac_data_octets);
1da177e4 4473 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
541ae68f
K
4474 tmp_stats[i++] =
4475 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
4476 le32_to_cpu(stat_info->tmac_mcst_frms);
4477 tmp_stats[i++] =
4478 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
4479 le32_to_cpu(stat_info->tmac_bcst_frms);
1da177e4 4480 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
541ae68f
K
4481 tmp_stats[i++] =
4482 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
4483 le32_to_cpu(stat_info->tmac_any_err_frms);
1da177e4 4484 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
541ae68f
K
4485 tmp_stats[i++] =
4486 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
4487 le32_to_cpu(stat_info->tmac_vld_ip);
4488 tmp_stats[i++] =
4489 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
4490 le32_to_cpu(stat_info->tmac_drop_ip);
4491 tmp_stats[i++] =
4492 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
4493 le32_to_cpu(stat_info->tmac_icmp);
4494 tmp_stats[i++] =
4495 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
4496 le32_to_cpu(stat_info->tmac_rst_tcp);
1da177e4 4497 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
541ae68f
K
4498 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
4499 le32_to_cpu(stat_info->tmac_udp);
4500 tmp_stats[i++] =
4501 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
4502 le32_to_cpu(stat_info->rmac_vld_frms);
4503 tmp_stats[i++] =
4504 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
4505 le32_to_cpu(stat_info->rmac_data_octets);
1da177e4
LT
4506 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4507 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
541ae68f
K
4508 tmp_stats[i++] =
4509 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
4510 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4511 tmp_stats[i++] =
4512 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
4513 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
1da177e4
LT
4514 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4515 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4516 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
541ae68f
K
4517 tmp_stats[i++] =
4518 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
4519 le32_to_cpu(stat_info->rmac_discarded_frms);
4520 tmp_stats[i++] =
4521 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
4522 le32_to_cpu(stat_info->rmac_usized_frms);
4523 tmp_stats[i++] =
4524 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
4525 le32_to_cpu(stat_info->rmac_osized_frms);
4526 tmp_stats[i++] =
4527 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
4528 le32_to_cpu(stat_info->rmac_frag_frms);
4529 tmp_stats[i++] =
4530 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
4531 le32_to_cpu(stat_info->rmac_jabber_frms);
4532 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
4533 le32_to_cpu(stat_info->rmac_ip);
1da177e4
LT
4534 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4535 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
541ae68f
K
4536 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
4537 le32_to_cpu(stat_info->rmac_drop_ip);
4538 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
4539 le32_to_cpu(stat_info->rmac_icmp);
1da177e4 4540 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
541ae68f
K
4541 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
4542 le32_to_cpu(stat_info->rmac_udp);
4543 tmp_stats[i++] =
4544 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
4545 le32_to_cpu(stat_info->rmac_err_drp_udp);
4546 tmp_stats[i++] =
4547 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
4548 le32_to_cpu(stat_info->rmac_pause_cnt);
4549 tmp_stats[i++] =
4550 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
4551 le32_to_cpu(stat_info->rmac_accepted_ip);
1da177e4 4552 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
7ba013ac
K
4553 tmp_stats[i++] = 0;
4554 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4555 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
1da177e4
LT
4556}
4557
20346722 4558int s2io_ethtool_get_regs_len(struct net_device *dev)
1da177e4
LT
4559{
4560 return (XENA_REG_SPACE);
4561}
4562
4563
20346722 4564u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
1da177e4
LT
4565{
4566 nic_t *sp = dev->priv;
4567
4568 return (sp->rx_csum);
4569}
20346722 4570int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
1da177e4
LT
4571{
4572 nic_t *sp = dev->priv;
4573
4574 if (data)
4575 sp->rx_csum = 1;
4576 else
4577 sp->rx_csum = 0;
4578
4579 return 0;
4580}
20346722 4581int s2io_get_eeprom_len(struct net_device *dev)
1da177e4
LT
4582{
4583 return (XENA_EEPROM_SPACE);
4584}
4585
20346722 4586int s2io_ethtool_self_test_count(struct net_device *dev)
1da177e4
LT
4587{
4588 return (S2IO_TEST_LEN);
4589}
20346722
K
4590void s2io_ethtool_get_strings(struct net_device *dev,
4591 u32 stringset, u8 * data)
1da177e4
LT
4592{
4593 switch (stringset) {
4594 case ETH_SS_TEST:
4595 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
4596 break;
4597 case ETH_SS_STATS:
4598 memcpy(data, &ethtool_stats_keys,
4599 sizeof(ethtool_stats_keys));
4600 }
4601}
1da177e4
LT
4602static int s2io_ethtool_get_stats_count(struct net_device *dev)
4603{
4604 return (S2IO_STAT_LEN);
4605}
4606
20346722 4607int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
1da177e4
LT
4608{
4609 if (data)
4610 dev->features |= NETIF_F_IP_CSUM;
4611 else
4612 dev->features &= ~NETIF_F_IP_CSUM;
4613
4614 return 0;
4615}
4616
4617
4618static struct ethtool_ops netdev_ethtool_ops = {
4619 .get_settings = s2io_ethtool_gset,
4620 .set_settings = s2io_ethtool_sset,
4621 .get_drvinfo = s2io_ethtool_gdrvinfo,
4622 .get_regs_len = s2io_ethtool_get_regs_len,
4623 .get_regs = s2io_ethtool_gregs,
4624 .get_link = ethtool_op_get_link,
4625 .get_eeprom_len = s2io_get_eeprom_len,
4626 .get_eeprom = s2io_ethtool_geeprom,
4627 .set_eeprom = s2io_ethtool_seeprom,
4628 .get_pauseparam = s2io_ethtool_getpause_data,
4629 .set_pauseparam = s2io_ethtool_setpause_data,
4630 .get_rx_csum = s2io_ethtool_get_rx_csum,
4631 .set_rx_csum = s2io_ethtool_set_rx_csum,
4632 .get_tx_csum = ethtool_op_get_tx_csum,
4633 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4634 .get_sg = ethtool_op_get_sg,
4635 .set_sg = ethtool_op_set_sg,
4636#ifdef NETIF_F_TSO
4637 .get_tso = ethtool_op_get_tso,
4638 .set_tso = ethtool_op_set_tso,
4639#endif
4640 .self_test_count = s2io_ethtool_self_test_count,
4641 .self_test = s2io_ethtool_test,
4642 .get_strings = s2io_ethtool_get_strings,
4643 .phys_id = s2io_ethtool_idnic,
4644 .get_stats_count = s2io_ethtool_get_stats_count,
4645 .get_ethtool_stats = s2io_get_ethtool_stats
4646};
4647
4648/**
20346722 4649 * s2io_ioctl - Entry point for the Ioctl
1da177e4
LT
4650 * @dev : Device pointer.
4651 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4652 * a proprietary structure used to pass information to the driver.
4653 * @cmd : This is used to distinguish between the different commands that
4654 * can be passed to the IOCTL functions.
4655 * Description:
20346722
K
4656 * Currently there are no special functionality supported in IOCTL, hence
4657 * function always return EOPNOTSUPPORTED
1da177e4
LT
4658 */
4659
20346722 4660int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1da177e4
LT
4661{
4662 return -EOPNOTSUPP;
4663}
4664
4665/**
4666 * s2io_change_mtu - entry point to change MTU size for the device.
4667 * @dev : device pointer.
4668 * @new_mtu : the new MTU size for the device.
4669 * Description: A driver entry point to change MTU size for the device.
4670 * Before changing the MTU the device must be stopped.
4671 * Return value:
4672 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4673 * file on failure.
4674 */
4675
20346722 4676int s2io_change_mtu(struct net_device *dev, int new_mtu)
1da177e4
LT
4677{
4678 nic_t *sp = dev->priv;
1da177e4
LT
4679
4680 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4681 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4682 dev->name);
4683 return -EPERM;
4684 }
4685
1da177e4 4686 dev->mtu = new_mtu;
d8892c6e
K
4687 if (netif_running(dev)) {
4688 s2io_card_down(sp);
4689 netif_stop_queue(dev);
4690 if (s2io_card_up(sp)) {
4691 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4692 __FUNCTION__);
4693 }
4694 if (netif_queue_stopped(dev))
4695 netif_wake_queue(dev);
4696 } else { /* Device is down */
4697 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4698 u64 val64 = new_mtu;
4699
4700 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4701 }
1da177e4
LT
4702
4703 return 0;
4704}
4705
4706/**
4707 * s2io_tasklet - Bottom half of the ISR.
4708 * @dev_adr : address of the device structure in dma_addr_t format.
4709 * Description:
4710 * This is the tasklet or the bottom half of the ISR. This is
20346722 4711 * an extension of the ISR which is scheduled by the scheduler to be run
1da177e4 4712 * when the load on the CPU is low. All low priority tasks of the ISR can
20346722 4713 * be pushed into the tasklet. For now the tasklet is used only to
1da177e4
LT
4714 * replenish the Rx buffers in the Rx buffer descriptors.
4715 * Return value:
4716 * void.
4717 */
4718
4719static void s2io_tasklet(unsigned long dev_addr)
4720{
4721 struct net_device *dev = (struct net_device *) dev_addr;
4722 nic_t *sp = dev->priv;
4723 int i, ret;
4724 mac_info_t *mac_control;
4725 struct config_param *config;
4726
4727 mac_control = &sp->mac_control;
4728 config = &sp->config;
4729
4730 if (!TASKLET_IN_USE) {
4731 for (i = 0; i < config->rx_ring_num; i++) {
4732 ret = fill_rx_buffers(sp, i);
4733 if (ret == -ENOMEM) {
4734 DBG_PRINT(ERR_DBG, "%s: Out of ",
4735 dev->name);
4736 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4737 break;
4738 } else if (ret == -EFILL) {
4739 DBG_PRINT(ERR_DBG,
4740 "%s: Rx Ring %d is full\n",
4741 dev->name, i);
4742 break;
4743 }
4744 }
4745 clear_bit(0, (&sp->tasklet_status));
4746 }
4747}
4748
4749/**
4750 * s2io_set_link - Set the LInk status
4751 * @data: long pointer to device private structue
4752 * Description: Sets the link status for the adapter
4753 */
4754
4755static void s2io_set_link(unsigned long data)
4756{
4757 nic_t *nic = (nic_t *) data;
4758 struct net_device *dev = nic->dev;
4759 XENA_dev_config_t __iomem *bar0 = nic->bar0;
4760 register u64 val64;
4761 u16 subid;
4762
4763 if (test_and_set_bit(0, &(nic->link_state))) {
4764 /* The card is being reset, no point doing anything */
4765 return;
4766 }
4767
4768 subid = nic->pdev->subsystem_device;
a371a07d
K
4769 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
4770 /*
4771 * Allow a small delay for the NICs self initiated
4772 * cleanup to complete.
4773 */
4774 msleep(100);
4775 }
1da177e4
LT
4776
4777 val64 = readq(&bar0->adapter_status);
20346722 4778 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1da177e4
LT
4779 if (LINK_IS_UP(val64)) {
4780 val64 = readq(&bar0->adapter_control);
4781 val64 |= ADAPTER_CNTL_EN;
4782 writeq(val64, &bar0->adapter_control);
541ae68f
K
4783 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4784 subid)) {
1da177e4
LT
4785 val64 = readq(&bar0->gpio_control);
4786 val64 |= GPIO_CTRL_GPIO_0;
4787 writeq(val64, &bar0->gpio_control);
4788 val64 = readq(&bar0->gpio_control);
4789 } else {
4790 val64 |= ADAPTER_LED_ON;
4791 writeq(val64, &bar0->adapter_control);
4792 }
a371a07d
K
4793 if (s2io_link_fault_indication(nic) ==
4794 MAC_RMAC_ERR_TIMER) {
4795 val64 = readq(&bar0->adapter_status);
4796 if (!LINK_IS_UP(val64)) {
4797 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4798 DBG_PRINT(ERR_DBG, " Link down");
4799 DBG_PRINT(ERR_DBG, "after ");
4800 DBG_PRINT(ERR_DBG, "enabling ");
4801 DBG_PRINT(ERR_DBG, "device \n");
4802 }
1da177e4
LT
4803 }
4804 if (nic->device_enabled_once == FALSE) {
4805 nic->device_enabled_once = TRUE;
4806 }
4807 s2io_link(nic, LINK_UP);
4808 } else {
541ae68f
K
4809 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4810 subid)) {
1da177e4
LT
4811 val64 = readq(&bar0->gpio_control);
4812 val64 &= ~GPIO_CTRL_GPIO_0;
4813 writeq(val64, &bar0->gpio_control);
4814 val64 = readq(&bar0->gpio_control);
4815 }
4816 s2io_link(nic, LINK_DOWN);
4817 }
4818 } else { /* NIC is not Quiescent. */
4819 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4820 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4821 netif_stop_queue(dev);
4822 }
4823 clear_bit(0, &(nic->link_state));
4824}
4825
4826static void s2io_card_down(nic_t * sp)
4827{
4828 int cnt = 0;
4829 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4830 unsigned long flags;
4831 register u64 val64 = 0;
4832
25fff88e 4833 del_timer_sync(&sp->alarm_timer);
1da177e4 4834 /* If s2io_set_link task is executing, wait till it completes. */
20346722 4835 while (test_and_set_bit(0, &(sp->link_state))) {
1da177e4 4836 msleep(50);
20346722 4837 }
1da177e4
LT
4838 atomic_set(&sp->card_state, CARD_DOWN);
4839
4840 /* disable Tx and Rx traffic on the NIC */
4841 stop_nic(sp);
4842
4843 /* Kill tasklet. */
4844 tasklet_kill(&sp->task);
4845
4846 /* Check if the device is Quiescent and then Reset the NIC */
4847 do {
4848 val64 = readq(&bar0->adapter_status);
20346722 4849 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
1da177e4
LT
4850 break;
4851 }
4852
4853 msleep(50);
4854 cnt++;
4855 if (cnt == 10) {
4856 DBG_PRINT(ERR_DBG,
4857 "s2io_close:Device not Quiescent ");
4858 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4859 (unsigned long long) val64);
4860 break;
4861 }
4862 } while (1);
1da177e4
LT
4863 s2io_reset(sp);
4864
7ba013ac
K
4865 /* Waiting till all Interrupt handlers are complete */
4866 cnt = 0;
4867 do {
4868 msleep(10);
4869 if (!atomic_read(&sp->isr_cnt))
4870 break;
4871 cnt++;
4872 } while(cnt < 5);
4873
4874 spin_lock_irqsave(&sp->tx_lock, flags);
4875 /* Free all Tx buffers */
1da177e4 4876 free_tx_buffers(sp);
7ba013ac
K
4877 spin_unlock_irqrestore(&sp->tx_lock, flags);
4878
4879 /* Free all Rx buffers */
4880 spin_lock_irqsave(&sp->rx_lock, flags);
1da177e4 4881 free_rx_buffers(sp);
7ba013ac 4882 spin_unlock_irqrestore(&sp->rx_lock, flags);
1da177e4 4883
1da177e4
LT
4884 clear_bit(0, &(sp->link_state));
4885}
4886
4887static int s2io_card_up(nic_t * sp)
4888{
4889 int i, ret;
4890 mac_info_t *mac_control;
4891 struct config_param *config;
4892 struct net_device *dev = (struct net_device *) sp->dev;
4893
4894 /* Initialize the H/W I/O registers */
4895 if (init_nic(sp) != 0) {
4896 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4897 dev->name);
4898 return -ENODEV;
4899 }
4900
20346722
K
4901 /*
4902 * Initializing the Rx buffers. For now we are considering only 1
1da177e4
LT
4903 * Rx ring and initializing buffers into 30 Rx blocks
4904 */
4905 mac_control = &sp->mac_control;
4906 config = &sp->config;
4907
4908 for (i = 0; i < config->rx_ring_num; i++) {
4909 if ((ret = fill_rx_buffers(sp, i))) {
4910 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4911 dev->name);
4912 s2io_reset(sp);
4913 free_rx_buffers(sp);
4914 return -ENOMEM;
4915 }
4916 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4917 atomic_read(&sp->rx_bufs_left[i]));
4918 }
4919
4920 /* Setting its receive mode */
4921 s2io_set_multicast(dev);
4922
4923 /* Enable tasklet for the device */
4924 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4925
4926 /* Enable Rx Traffic and interrupts on the NIC */
4927 if (start_nic(sp)) {
4928 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4929 tasklet_kill(&sp->task);
4930 s2io_reset(sp);
4931 free_irq(dev->irq, dev);
4932 free_rx_buffers(sp);
4933 return -ENODEV;
4934 }
4935
25fff88e
K
4936 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
4937
1da177e4
LT
4938 atomic_set(&sp->card_state, CARD_UP);
4939 return 0;
4940}
4941
20346722 4942/**
1da177e4
LT
4943 * s2io_restart_nic - Resets the NIC.
4944 * @data : long pointer to the device private structure
4945 * Description:
4946 * This function is scheduled to be run by the s2io_tx_watchdog
20346722 4947 * function after 0.5 secs to reset the NIC. The idea is to reduce
1da177e4
LT
4948 * the run time of the watch dog routine which is run holding a
4949 * spin lock.
4950 */
4951
4952static void s2io_restart_nic(unsigned long data)
4953{
4954 struct net_device *dev = (struct net_device *) data;
4955 nic_t *sp = dev->priv;
4956
4957 s2io_card_down(sp);
4958 if (s2io_card_up(sp)) {
4959 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4960 dev->name);
4961 }
4962 netif_wake_queue(dev);
4963 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4964 dev->name);
20346722 4965
1da177e4
LT
4966}
4967
20346722
K
4968/**
4969 * s2io_tx_watchdog - Watchdog for transmit side.
1da177e4
LT
4970 * @dev : Pointer to net device structure
4971 * Description:
4972 * This function is triggered if the Tx Queue is stopped
4973 * for a pre-defined amount of time when the Interface is still up.
4974 * If the Interface is jammed in such a situation, the hardware is
4975 * reset (by s2io_close) and restarted again (by s2io_open) to
4976 * overcome any problem that might have been caused in the hardware.
4977 * Return value:
4978 * void
4979 */
4980
4981static void s2io_tx_watchdog(struct net_device *dev)
4982{
4983 nic_t *sp = dev->priv;
4984
4985 if (netif_carrier_ok(dev)) {
4986 schedule_work(&sp->rst_timer_task);
4987 }
4988}
4989
4990/**
4991 * rx_osm_handler - To perform some OS related operations on SKB.
4992 * @sp: private member of the device structure,pointer to s2io_nic structure.
4993 * @skb : the socket buffer pointer.
4994 * @len : length of the packet
4995 * @cksum : FCS checksum of the frame.
4996 * @ring_no : the ring from which this RxD was extracted.
20346722 4997 * Description:
1da177e4
LT
4998 * This function is called by the Tx interrupt serivce routine to perform
4999 * some OS related operations on the SKB before passing it to the upper
5000 * layers. It mainly checks if the checksum is OK, if so adds it to the
5001 * SKBs cksum variable, increments the Rx packet count and passes the SKB
5002 * to the upper layer. If the checksum is wrong, it increments the Rx
5003 * packet error count, frees the SKB and returns error.
5004 * Return value:
5005 * SUCCESS on success and -1 on failure.
5006 */
20346722 5007static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
1da177e4 5008{
20346722 5009 nic_t *sp = ring_data->nic;
1da177e4 5010 struct net_device *dev = (struct net_device *) sp->dev;
20346722
K
5011 struct sk_buff *skb = (struct sk_buff *)
5012 ((unsigned long) rxdp->Host_Control);
5013 int ring_no = ring_data->ring_no;
1da177e4
LT
5014 u16 l3_csum, l4_csum;
5015#ifdef CONFIG_2BUFF_MODE
20346722
K
5016 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
5017 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
5018 int get_block = ring_data->rx_curr_get_info.block_index;
5019 int get_off = ring_data->rx_curr_get_info.offset;
5020 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
1da177e4 5021 unsigned char *buff;
20346722
K
5022#else
5023 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
1da177e4 5024#endif
20346722
K
5025 skb->dev = dev;
5026 if (rxdp->Control_1 & RXD_T_CODE) {
5027 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
5028 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
5029 dev->name, err);
1ddc50d4
K
5030 dev_kfree_skb(skb);
5031 sp->stats.rx_crc_errors++;
5032 atomic_dec(&sp->rx_bufs_left[ring_no]);
5033 rxdp->Host_Control = 0;
5034 return 0;
20346722 5035 }
1da177e4 5036
20346722
K
5037 /* Updating statistics */
5038 rxdp->Host_Control = 0;
5039 sp->rx_pkt_count++;
5040 sp->stats.rx_packets++;
5041#ifndef CONFIG_2BUFF_MODE
5042 sp->stats.rx_bytes += len;
5043#else
5044 sp->stats.rx_bytes += buf0_len + buf2_len;
5045#endif
5046
5047#ifndef CONFIG_2BUFF_MODE
5048 skb_put(skb, len);
5049#else
5050 buff = skb_push(skb, buf0_len);
5051 memcpy(buff, ba->ba_0, buf0_len);
5052 skb_put(skb, buf2_len);
5053#endif
5054
5055 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
5056 (sp->rx_csum)) {
5057 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
1da177e4
LT
5058 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
5059 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
20346722 5060 /*
1da177e4
LT
5061 * NIC verifies if the Checksum of the received
5062 * frame is Ok or not and accordingly returns
5063 * a flag in the RxD.
5064 */
5065 skb->ip_summed = CHECKSUM_UNNECESSARY;
5066 } else {
20346722
K
5067 /*
5068 * Packet with erroneous checksum, let the
1da177e4
LT
5069 * upper layers deal with it.
5070 */
5071 skb->ip_summed = CHECKSUM_NONE;
5072 }
5073 } else {
5074 skb->ip_summed = CHECKSUM_NONE;
5075 }
5076
1da177e4 5077 skb->protocol = eth_type_trans(skb, dev);
1da177e4 5078#ifdef CONFIG_S2IO_NAPI
be3a6b02
K
5079 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5080 /* Queueing the vlan frame to the upper layer */
5081 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
5082 RXD_GET_VLAN_TAG(rxdp->Control_2));
5083 } else {
5084 netif_receive_skb(skb);
5085 }
1da177e4 5086#else
be3a6b02
K
5087 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5088 /* Queueing the vlan frame to the upper layer */
5089 vlan_hwaccel_rx(skb, sp->vlgrp,
5090 RXD_GET_VLAN_TAG(rxdp->Control_2));
5091 } else {
5092 netif_rx(skb);
5093 }
1da177e4 5094#endif
1da177e4 5095 dev->last_rx = jiffies;
1da177e4 5096 atomic_dec(&sp->rx_bufs_left[ring_no]);
1da177e4
LT
5097 return SUCCESS;
5098}
5099
5100/**
5101 * s2io_link - stops/starts the Tx queue.
5102 * @sp : private member of the device structure, which is a pointer to the
5103 * s2io_nic structure.
5104 * @link : inidicates whether link is UP/DOWN.
5105 * Description:
5106 * This function stops/starts the Tx queue depending on whether the link
20346722
K
5107 * status of the NIC is is down or up. This is called by the Alarm
5108 * interrupt handler whenever a link change interrupt comes up.
1da177e4
LT
5109 * Return value:
5110 * void.
5111 */
5112
20346722 5113void s2io_link(nic_t * sp, int link)
1da177e4
LT
5114{
5115 struct net_device *dev = (struct net_device *) sp->dev;
5116
5117 if (link != sp->last_link_state) {
5118 if (link == LINK_DOWN) {
5119 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
5120 netif_carrier_off(dev);
5121 } else {
5122 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
5123 netif_carrier_on(dev);
5124 }
5125 }
5126 sp->last_link_state = link;
5127}
5128
5129/**
20346722
K
5130 * get_xena_rev_id - to identify revision ID of xena.
5131 * @pdev : PCI Dev structure
5132 * Description:
5133 * Function to identify the Revision ID of xena.
5134 * Return value:
5135 * returns the revision ID of the device.
5136 */
5137
5138int get_xena_rev_id(struct pci_dev *pdev)
5139{
5140 u8 id = 0;
5141 int ret;
5142 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
5143 return id;
5144}
5145
5146/**
5147 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
5148 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5149 * s2io_nic structure.
5150 * Description:
5151 * This function initializes a few of the PCI and PCI-X configuration registers
5152 * with recommended values.
5153 * Return value:
5154 * void
5155 */
5156
5157static void s2io_init_pci(nic_t * sp)
5158{
20346722 5159 u16 pci_cmd = 0, pcix_cmd = 0;
1da177e4
LT
5160
5161 /* Enable Data Parity Error Recovery in PCI-X command register. */
5162 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5163 &(pcix_cmd));
1da177e4 5164 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5165 (pcix_cmd | 1));
1da177e4 5166 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5167 &(pcix_cmd));
1da177e4
LT
5168
5169 /* Set the PErr Response bit in PCI command register. */
5170 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5171 pci_write_config_word(sp->pdev, PCI_COMMAND,
5172 (pci_cmd | PCI_COMMAND_PARITY));
5173 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5174
1da177e4 5175 /* Forcibly disabling relaxed ordering capability of the card. */
20346722 5176 pcix_cmd &= 0xfffd;
1da177e4 5177 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5178 pcix_cmd);
1da177e4 5179 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 5180 &(pcix_cmd));
1da177e4
LT
5181}
5182
5183MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
5184MODULE_LICENSE("GPL");
5185module_param(tx_fifo_num, int, 0);
1da177e4 5186module_param(rx_ring_num, int, 0);
20346722
K
5187module_param_array(tx_fifo_len, uint, NULL, 0);
5188module_param_array(rx_ring_sz, uint, NULL, 0);
20346722 5189module_param_array(rts_frm_len, uint, NULL, 0);
5e25b9dd 5190module_param(use_continuous_tx_intrs, int, 1);
1da177e4
LT
5191module_param(rmac_pause_time, int, 0);
5192module_param(mc_pause_threshold_q0q3, int, 0);
5193module_param(mc_pause_threshold_q4q7, int, 0);
5194module_param(shared_splits, int, 0);
5195module_param(tmac_util_period, int, 0);
5196module_param(rmac_util_period, int, 0);
b6e3f982 5197module_param(bimodal, bool, 0);
1da177e4
LT
5198#ifndef CONFIG_S2IO_NAPI
5199module_param(indicate_max_pkts, int, 0);
5200#endif
303bcb4b 5201module_param(rxsync_frequency, int, 0);
20346722 5202
1da177e4 5203/**
20346722 5204 * s2io_init_nic - Initialization of the adapter .
1da177e4
LT
5205 * @pdev : structure containing the PCI related information of the device.
5206 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
5207 * Description:
5208 * The function initializes an adapter identified by the pci_dec structure.
20346722
K
5209 * All OS related initialization including memory and device structure and
5210 * initlaization of the device private variable is done. Also the swapper
5211 * control register is initialized to enable read and write into the I/O
1da177e4
LT
5212 * registers of the device.
5213 * Return value:
5214 * returns 0 on success and negative on failure.
5215 */
5216
5217static int __devinit
5218s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5219{
5220 nic_t *sp;
5221 struct net_device *dev;
1da177e4
LT
5222 int i, j, ret;
5223 int dma_flag = FALSE;
5224 u32 mac_up, mac_down;
5225 u64 val64 = 0, tmp64 = 0;
5226 XENA_dev_config_t __iomem *bar0 = NULL;
5227 u16 subid;
5228 mac_info_t *mac_control;
5229 struct config_param *config;
541ae68f 5230 int mode;
1da177e4 5231
20346722
K
5232#ifdef CONFIG_S2IO_NAPI
5233 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
5234#endif
1da177e4
LT
5235
5236 if ((ret = pci_enable_device(pdev))) {
5237 DBG_PRINT(ERR_DBG,
5238 "s2io_init_nic: pci_enable_device failed\n");
5239 return ret;
5240 }
5241
1e7f0bd8 5242 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1da177e4
LT
5243 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
5244 dma_flag = TRUE;
1da177e4 5245 if (pci_set_consistent_dma_mask
1e7f0bd8 5246 (pdev, DMA_64BIT_MASK)) {
1da177e4
LT
5247 DBG_PRINT(ERR_DBG,
5248 "Unable to obtain 64bit DMA for \
5249 consistent allocations\n");
5250 pci_disable_device(pdev);
5251 return -ENOMEM;
5252 }
1e7f0bd8 5253 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
1da177e4
LT
5254 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
5255 } else {
5256 pci_disable_device(pdev);
5257 return -ENOMEM;
5258 }
5259
5260 if (pci_request_regions(pdev, s2io_driver_name)) {
5261 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
5262 pci_disable_device(pdev);
5263 return -ENODEV;
5264 }
5265
5266 dev = alloc_etherdev(sizeof(nic_t));
5267 if (dev == NULL) {
5268 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
5269 pci_disable_device(pdev);
5270 pci_release_regions(pdev);
5271 return -ENODEV;
5272 }
5273
5274 pci_set_master(pdev);
5275 pci_set_drvdata(pdev, dev);
5276 SET_MODULE_OWNER(dev);
5277 SET_NETDEV_DEV(dev, &pdev->dev);
5278
5279 /* Private member variable initialized to s2io NIC structure */
5280 sp = dev->priv;
5281 memset(sp, 0, sizeof(nic_t));
5282 sp->dev = dev;
5283 sp->pdev = pdev;
1da177e4 5284 sp->high_dma_flag = dma_flag;
1da177e4 5285 sp->device_enabled_once = FALSE;
1da177e4 5286
541ae68f
K
5287 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
5288 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
5289 sp->device_type = XFRAME_II_DEVICE;
5290 else
5291 sp->device_type = XFRAME_I_DEVICE;
5292
1da177e4
LT
5293 /* Initialize some PCI/PCI-X fields of the NIC. */
5294 s2io_init_pci(sp);
5295
20346722 5296 /*
1da177e4 5297 * Setting the device configuration parameters.
20346722
K
5298 * Most of these parameters can be specified by the user during
5299 * module insertion as they are module loadable parameters. If
5300 * these parameters are not not specified during load time, they
1da177e4
LT
5301 * are initialized with default values.
5302 */
5303 mac_control = &sp->mac_control;
5304 config = &sp->config;
5305
5306 /* Tx side parameters. */
0b1f7ebe
K
5307 if (tx_fifo_len[0] == 0)
5308 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
1da177e4
LT
5309 config->tx_fifo_num = tx_fifo_num;
5310 for (i = 0; i < MAX_TX_FIFOS; i++) {
5311 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
5312 config->tx_cfg[i].fifo_priority = i;
5313 }
5314
20346722
K
5315 /* mapping the QoS priority to the configured fifos */
5316 for (i = 0; i < MAX_TX_FIFOS; i++)
5317 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
5318
1da177e4
LT
5319 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
5320 for (i = 0; i < config->tx_fifo_num; i++) {
5321 config->tx_cfg[i].f_no_snoop =
5322 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
5323 if (config->tx_cfg[i].fifo_len < 65) {
5324 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
5325 break;
5326 }
5327 }
5328 config->max_txds = MAX_SKB_FRAGS;
5329
5330 /* Rx side parameters. */
0b1f7ebe
K
5331 if (rx_ring_sz[0] == 0)
5332 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
1da177e4
LT
5333 config->rx_ring_num = rx_ring_num;
5334 for (i = 0; i < MAX_RX_RINGS; i++) {
5335 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
5336 (MAX_RXDS_PER_BLOCK + 1);
5337 config->rx_cfg[i].ring_priority = i;
5338 }
5339
5340 for (i = 0; i < rx_ring_num; i++) {
5341 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
5342 config->rx_cfg[i].f_no_snoop =
5343 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
5344 }
5345
5346 /* Setting Mac Control parameters */
5347 mac_control->rmac_pause_time = rmac_pause_time;
5348 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
5349 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
5350
5351
5352 /* Initialize Ring buffer parameters. */
5353 for (i = 0; i < config->rx_ring_num; i++)
5354 atomic_set(&sp->rx_bufs_left[i], 0);
5355
7ba013ac
K
5356 /* Initialize the number of ISRs currently running */
5357 atomic_set(&sp->isr_cnt, 0);
5358
1da177e4
LT
5359 /* initialize the shared memory used by the NIC and the host */
5360 if (init_shared_mem(sp)) {
5361 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
0b1f7ebe 5362 __FUNCTION__);
1da177e4
LT
5363 ret = -ENOMEM;
5364 goto mem_alloc_failed;
5365 }
5366
5367 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
5368 pci_resource_len(pdev, 0));
5369 if (!sp->bar0) {
5370 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
5371 dev->name);
5372 ret = -ENOMEM;
5373 goto bar0_remap_failed;
5374 }
5375
5376 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
5377 pci_resource_len(pdev, 2));
5378 if (!sp->bar1) {
5379 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
5380 dev->name);
5381 ret = -ENOMEM;
5382 goto bar1_remap_failed;
5383 }
5384
5385 dev->irq = pdev->irq;
5386 dev->base_addr = (unsigned long) sp->bar0;
5387
5388 /* Initializing the BAR1 address as the start of the FIFO pointer. */
5389 for (j = 0; j < MAX_TX_FIFOS; j++) {
5390 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
5391 (sp->bar1 + (j * 0x00020000));
5392 }
5393
5394 /* Driver entry points */
5395 dev->open = &s2io_open;
5396 dev->stop = &s2io_close;
5397 dev->hard_start_xmit = &s2io_xmit;
5398 dev->get_stats = &s2io_get_stats;
5399 dev->set_multicast_list = &s2io_set_multicast;
5400 dev->do_ioctl = &s2io_ioctl;
5401 dev->change_mtu = &s2io_change_mtu;
5402 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
be3a6b02
K
5403 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5404 dev->vlan_rx_register = s2io_vlan_rx_register;
5405 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
20346722 5406
1da177e4
LT
5407 /*
5408 * will use eth_mac_addr() for dev->set_mac_address
5409 * mac address will be set every time dev->open() is called
5410 */
20346722 5411#if defined(CONFIG_S2IO_NAPI)
1da177e4 5412 dev->poll = s2io_poll;
20346722 5413 dev->weight = 32;
1da177e4
LT
5414#endif
5415
5416 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
5417 if (sp->high_dma_flag == TRUE)
5418 dev->features |= NETIF_F_HIGHDMA;
5419#ifdef NETIF_F_TSO
5420 dev->features |= NETIF_F_TSO;
5421#endif
5422
5423 dev->tx_timeout = &s2io_tx_watchdog;
5424 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
5425 INIT_WORK(&sp->rst_timer_task,
5426 (void (*)(void *)) s2io_restart_nic, dev);
5427 INIT_WORK(&sp->set_link_task,
5428 (void (*)(void *)) s2io_set_link, sp);
5429
e960fc5c 5430 pci_save_state(sp->pdev);
1da177e4
LT
5431
5432 /* Setting swapper control on the NIC, for proper reset operation */
5433 if (s2io_set_swapper(sp)) {
5434 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
5435 dev->name);
5436 ret = -EAGAIN;
5437 goto set_swap_failed;
5438 }
5439
541ae68f
K
5440 /* Verify if the Herc works on the slot its placed into */
5441 if (sp->device_type & XFRAME_II_DEVICE) {
5442 mode = s2io_verify_pci_mode(sp);
5443 if (mode < 0) {
5444 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
5445 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
5446 ret = -EBADSLT;
5447 goto set_swap_failed;
5448 }
5449 }
5450
5451 /* Not needed for Herc */
5452 if (sp->device_type & XFRAME_I_DEVICE) {
5453 /*
5454 * Fix for all "FFs" MAC address problems observed on
5455 * Alpha platforms
5456 */
5457 fix_mac_address(sp);
5458 s2io_reset(sp);
5459 }
1da177e4
LT
5460
5461 /*
1da177e4
LT
5462 * MAC address initialization.
5463 * For now only one mac address will be read and used.
5464 */
5465 bar0 = sp->bar0;
5466 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5467 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
5468 writeq(val64, &bar0->rmac_addr_cmd_mem);
5469 wait_for_cmd_complete(sp);
5470
5471 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5472 mac_down = (u32) tmp64;
5473 mac_up = (u32) (tmp64 >> 32);
5474
5475 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
5476
5477 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
5478 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
5479 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
5480 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
5481 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
5482 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
5483
1da177e4
LT
5484 /* Set the factory defined MAC address initially */
5485 dev->addr_len = ETH_ALEN;
5486 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
5487
5488 /*
20346722 5489 * Initialize the tasklet status and link state flags
541ae68f 5490 * and the card state parameter
1da177e4
LT
5491 */
5492 atomic_set(&(sp->card_state), 0);
5493 sp->tasklet_status = 0;
5494 sp->link_state = 0;
5495
1da177e4
LT
5496 /* Initialize spinlocks */
5497 spin_lock_init(&sp->tx_lock);
5498#ifndef CONFIG_S2IO_NAPI
5499 spin_lock_init(&sp->put_lock);
5500#endif
7ba013ac 5501 spin_lock_init(&sp->rx_lock);
1da177e4 5502
20346722
K
5503 /*
5504 * SXE-002: Configure link and activity LED to init state
5505 * on driver load.
1da177e4
LT
5506 */
5507 subid = sp->pdev->subsystem_device;
5508 if ((subid & 0xFF) >= 0x07) {
5509 val64 = readq(&bar0->gpio_control);
5510 val64 |= 0x0000800000000000ULL;
5511 writeq(val64, &bar0->gpio_control);
5512 val64 = 0x0411040400000000ULL;
5513 writeq(val64, (void __iomem *) bar0 + 0x2700);
5514 val64 = readq(&bar0->gpio_control);
5515 }
5516
5517 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
5518
5519 if (register_netdev(dev)) {
5520 DBG_PRINT(ERR_DBG, "Device registration failed\n");
5521 ret = -ENODEV;
5522 goto register_failed;
5523 }
5524
541ae68f
K
5525 if (sp->device_type & XFRAME_II_DEVICE) {
5526 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
5527 dev->name);
5528 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
5529 get_xena_rev_id(sp->pdev),
5530 s2io_driver_version);
5531 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5532 sp->def_mac_addr[0].mac_addr[0],
5533 sp->def_mac_addr[0].mac_addr[1],
5534 sp->def_mac_addr[0].mac_addr[2],
5535 sp->def_mac_addr[0].mac_addr[3],
5536 sp->def_mac_addr[0].mac_addr[4],
5537 sp->def_mac_addr[0].mac_addr[5]);
0b1f7ebe 5538 mode = s2io_print_pci_mode(sp);
541ae68f
K
5539 if (mode < 0) {
5540 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
5541 ret = -EBADSLT;
5542 goto set_swap_failed;
5543 }
5544 } else {
5545 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
5546 dev->name);
5547 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
5548 get_xena_rev_id(sp->pdev),
5549 s2io_driver_version);
5550 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5551 sp->def_mac_addr[0].mac_addr[0],
5552 sp->def_mac_addr[0].mac_addr[1],
5553 sp->def_mac_addr[0].mac_addr[2],
5554 sp->def_mac_addr[0].mac_addr[3],
5555 sp->def_mac_addr[0].mac_addr[4],
5556 sp->def_mac_addr[0].mac_addr[5]);
5557 }
5558
7ba013ac
K
5559 /* Initialize device name */
5560 strcpy(sp->name, dev->name);
541ae68f
K
5561 if (sp->device_type & XFRAME_II_DEVICE)
5562 strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
5563 else
5564 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
7ba013ac 5565
b6e3f982
K
5566 /* Initialize bimodal Interrupts */
5567 sp->config.bimodal = bimodal;
5568 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
5569 sp->config.bimodal = 0;
5570 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
5571 dev->name);
5572 }
5573
20346722
K
5574 /*
5575 * Make Link state as off at this point, when the Link change
5576 * interrupt comes the state will be automatically changed to
1da177e4
LT
5577 * the right state.
5578 */
5579 netif_carrier_off(dev);
1da177e4
LT
5580
5581 return 0;
5582
5583 register_failed:
5584 set_swap_failed:
5585 iounmap(sp->bar1);
5586 bar1_remap_failed:
5587 iounmap(sp->bar0);
5588 bar0_remap_failed:
5589 mem_alloc_failed:
5590 free_shared_mem(sp);
5591 pci_disable_device(pdev);
5592 pci_release_regions(pdev);
5593 pci_set_drvdata(pdev, NULL);
5594 free_netdev(dev);
5595
5596 return ret;
5597}
5598
5599/**
20346722 5600 * s2io_rem_nic - Free the PCI device
1da177e4 5601 * @pdev: structure containing the PCI related information of the device.
20346722 5602 * Description: This function is called by the Pci subsystem to release a
1da177e4 5603 * PCI device and free up all resource held up by the device. This could
20346722 5604 * be in response to a Hot plug event or when the driver is to be removed
1da177e4
LT
5605 * from memory.
5606 */
5607
5608static void __devexit s2io_rem_nic(struct pci_dev *pdev)
5609{
5610 struct net_device *dev =
5611 (struct net_device *) pci_get_drvdata(pdev);
5612 nic_t *sp;
5613
5614 if (dev == NULL) {
5615 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
5616 return;
5617 }
5618
5619 sp = dev->priv;
5620 unregister_netdev(dev);
5621
5622 free_shared_mem(sp);
5623 iounmap(sp->bar0);
5624 iounmap(sp->bar1);
5625 pci_disable_device(pdev);
5626 pci_release_regions(pdev);
5627 pci_set_drvdata(pdev, NULL);
1da177e4
LT
5628 free_netdev(dev);
5629}
5630
5631/**
5632 * s2io_starter - Entry point for the driver
5633 * Description: This function is the entry point for the driver. It verifies
5634 * the module loadable parameters and initializes PCI configuration space.
5635 */
5636
5637int __init s2io_starter(void)
5638{
5639 return pci_module_init(&s2io_driver);
5640}
5641
5642/**
20346722 5643 * s2io_closer - Cleanup routine for the driver
1da177e4
LT
5644 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
5645 */
5646
20346722 5647void s2io_closer(void)
1da177e4
LT
5648{
5649 pci_unregister_driver(&s2io_driver);
5650 DBG_PRINT(INIT_DBG, "cleanup done\n");
5651}
5652
5653module_init(s2io_starter);
5654module_exit(s2io_closer);