]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/s2io.c
S2IO: Optimized the delay to wait for command completion
[net-next-2.6.git] / drivers / net / s2io.c
CommitLineData
1da177e4 1/************************************************************************
776bd20f 2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
1da177e4
LT
3 * Copyright(c) 2002-2005 Neterion Inc.
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
20346722
K
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
1da177e4
LT
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
20346722 21 * Francois Romieu : For pointing out all code part that were
1da177e4 22 * deprecated and also styling related comments.
20346722 23 * Grant Grundler : For helping me get rid of some Architecture
1da177e4
LT
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
20346722 26 *
1da177e4
LT
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
9dc737a7 29 *
20346722
K
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
9dc737a7
AR
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
da6971d8
AR
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2 and 3.
1da177e4 36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
20346722 37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
1da177e4 38 * Tx descriptors that can be associated with each corresponding FIFO.
9dc737a7
AR
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 1(MSI), 2(MSI_X). Default value is '0(INTA)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
1da177e4
LT
45 ************************************************************************/
46
1da177e4
LT
47#include <linux/module.h>
48#include <linux/types.h>
49#include <linux/errno.h>
50#include <linux/ioport.h>
51#include <linux/pci.h>
1e7f0bd8 52#include <linux/dma-mapping.h>
1da177e4
LT
53#include <linux/kernel.h>
54#include <linux/netdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/skbuff.h>
57#include <linux/init.h>
58#include <linux/delay.h>
59#include <linux/stddef.h>
60#include <linux/ioctl.h>
61#include <linux/timex.h>
1da177e4 62#include <linux/ethtool.h>
1da177e4 63#include <linux/workqueue.h>
be3a6b02 64#include <linux/if_vlan.h>
7d3d0439
RA
65#include <linux/ip.h>
66#include <linux/tcp.h>
67#include <net/tcp.h>
1da177e4 68
1da177e4
LT
69#include <asm/system.h>
70#include <asm/uaccess.h>
20346722 71#include <asm/io.h>
fe931395 72#include <asm/div64.h>
330ce0de 73#include <asm/irq.h>
1da177e4
LT
74
75/* local include */
76#include "s2io.h"
77#include "s2io-regs.h"
78
1ee6dd77 79#define DRV_VERSION "2.0.16.1"
6c1792f4 80
1da177e4 81/* S2io Driver name & version. */
20346722 82static char s2io_driver_name[] = "Neterion";
6c1792f4 83static char s2io_driver_version[] = DRV_VERSION;
1da177e4 84
26df54bf
AB
85static int rxd_size[4] = {32,48,48,64};
86static int rxd_count[4] = {127,85,85,63};
da6971d8 87
1ee6dd77 88static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
5e25b9dd
K
89{
90 int ret;
91
92 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
93 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
94
95 return ret;
96}
97
20346722 98/*
1da177e4
LT
99 * Cards with following subsystem_id have a link state indication
100 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
101 * macro below identifies these cards given the subsystem_id.
102 */
541ae68f
K
103#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
104 (dev_type == XFRAME_I_DEVICE) ? \
105 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
106 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
1da177e4
LT
107
108#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
109 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
110#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
111#define PANIC 1
112#define LOW 2
1ee6dd77 113static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
1da177e4 114{
1ee6dd77 115 struct mac_info *mac_control;
20346722
K
116
117 mac_control = &sp->mac_control;
863c11a9
AR
118 if (rxb_size <= rxd_count[sp->rxd_mode])
119 return PANIC;
120 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
121 return LOW;
122 return 0;
1da177e4
LT
123}
124
125/* Ethtool related variables and Macros. */
126static char s2io_gstrings[][ETH_GSTRING_LEN] = {
127 "Register test\t(offline)",
128 "Eeprom test\t(offline)",
129 "Link test\t(online)",
130 "RLDRAM test\t(offline)",
131 "BIST Test\t(offline)"
132};
133
134static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
135 {"tmac_frms"},
136 {"tmac_data_octets"},
137 {"tmac_drop_frms"},
138 {"tmac_mcst_frms"},
139 {"tmac_bcst_frms"},
140 {"tmac_pause_ctrl_frms"},
bd1034f0
AR
141 {"tmac_ttl_octets"},
142 {"tmac_ucst_frms"},
143 {"tmac_nucst_frms"},
1da177e4 144 {"tmac_any_err_frms"},
bd1034f0 145 {"tmac_ttl_less_fb_octets"},
1da177e4
LT
146 {"tmac_vld_ip_octets"},
147 {"tmac_vld_ip"},
148 {"tmac_drop_ip"},
149 {"tmac_icmp"},
150 {"tmac_rst_tcp"},
151 {"tmac_tcp"},
152 {"tmac_udp"},
153 {"rmac_vld_frms"},
154 {"rmac_data_octets"},
155 {"rmac_fcs_err_frms"},
156 {"rmac_drop_frms"},
157 {"rmac_vld_mcst_frms"},
158 {"rmac_vld_bcst_frms"},
159 {"rmac_in_rng_len_err_frms"},
bd1034f0 160 {"rmac_out_rng_len_err_frms"},
1da177e4
LT
161 {"rmac_long_frms"},
162 {"rmac_pause_ctrl_frms"},
bd1034f0
AR
163 {"rmac_unsup_ctrl_frms"},
164 {"rmac_ttl_octets"},
165 {"rmac_accepted_ucst_frms"},
166 {"rmac_accepted_nucst_frms"},
1da177e4 167 {"rmac_discarded_frms"},
bd1034f0
AR
168 {"rmac_drop_events"},
169 {"rmac_ttl_less_fb_octets"},
170 {"rmac_ttl_frms"},
1da177e4
LT
171 {"rmac_usized_frms"},
172 {"rmac_osized_frms"},
173 {"rmac_frag_frms"},
174 {"rmac_jabber_frms"},
bd1034f0
AR
175 {"rmac_ttl_64_frms"},
176 {"rmac_ttl_65_127_frms"},
177 {"rmac_ttl_128_255_frms"},
178 {"rmac_ttl_256_511_frms"},
179 {"rmac_ttl_512_1023_frms"},
180 {"rmac_ttl_1024_1518_frms"},
1da177e4
LT
181 {"rmac_ip"},
182 {"rmac_ip_octets"},
183 {"rmac_hdr_err_ip"},
184 {"rmac_drop_ip"},
185 {"rmac_icmp"},
186 {"rmac_tcp"},
187 {"rmac_udp"},
188 {"rmac_err_drp_udp"},
bd1034f0
AR
189 {"rmac_xgmii_err_sym"},
190 {"rmac_frms_q0"},
191 {"rmac_frms_q1"},
192 {"rmac_frms_q2"},
193 {"rmac_frms_q3"},
194 {"rmac_frms_q4"},
195 {"rmac_frms_q5"},
196 {"rmac_frms_q6"},
197 {"rmac_frms_q7"},
198 {"rmac_full_q0"},
199 {"rmac_full_q1"},
200 {"rmac_full_q2"},
201 {"rmac_full_q3"},
202 {"rmac_full_q4"},
203 {"rmac_full_q5"},
204 {"rmac_full_q6"},
205 {"rmac_full_q7"},
1da177e4 206 {"rmac_pause_cnt"},
bd1034f0
AR
207 {"rmac_xgmii_data_err_cnt"},
208 {"rmac_xgmii_ctrl_err_cnt"},
1da177e4
LT
209 {"rmac_accepted_ip"},
210 {"rmac_err_tcp"},
bd1034f0
AR
211 {"rd_req_cnt"},
212 {"new_rd_req_cnt"},
213 {"new_rd_req_rtry_cnt"},
214 {"rd_rtry_cnt"},
215 {"wr_rtry_rd_ack_cnt"},
216 {"wr_req_cnt"},
217 {"new_wr_req_cnt"},
218 {"new_wr_req_rtry_cnt"},
219 {"wr_rtry_cnt"},
220 {"wr_disc_cnt"},
221 {"rd_rtry_wr_ack_cnt"},
222 {"txp_wr_cnt"},
223 {"txd_rd_cnt"},
224 {"txd_wr_cnt"},
225 {"rxd_rd_cnt"},
226 {"rxd_wr_cnt"},
227 {"txf_rd_cnt"},
228 {"rxf_wr_cnt"},
229 {"rmac_ttl_1519_4095_frms"},
230 {"rmac_ttl_4096_8191_frms"},
231 {"rmac_ttl_8192_max_frms"},
232 {"rmac_ttl_gt_max_frms"},
233 {"rmac_osized_alt_frms"},
234 {"rmac_jabber_alt_frms"},
235 {"rmac_gt_max_alt_frms"},
236 {"rmac_vlan_frms"},
237 {"rmac_len_discard"},
238 {"rmac_fcs_discard"},
239 {"rmac_pf_discard"},
240 {"rmac_da_discard"},
241 {"rmac_red_discard"},
242 {"rmac_rts_discard"},
243 {"rmac_ingm_full_discard"},
244 {"link_fault_cnt"},
7ba013ac
K
245 {"\n DRIVER STATISTICS"},
246 {"single_bit_ecc_errs"},
247 {"double_bit_ecc_errs"},
bd1034f0
AR
248 {"parity_err_cnt"},
249 {"serious_err_cnt"},
250 {"soft_reset_cnt"},
251 {"fifo_full_cnt"},
252 {"ring_full_cnt"},
253 ("alarm_transceiver_temp_high"),
254 ("alarm_transceiver_temp_low"),
255 ("alarm_laser_bias_current_high"),
256 ("alarm_laser_bias_current_low"),
257 ("alarm_laser_output_power_high"),
258 ("alarm_laser_output_power_low"),
259 ("warn_transceiver_temp_high"),
260 ("warn_transceiver_temp_low"),
261 ("warn_laser_bias_current_high"),
262 ("warn_laser_bias_current_low"),
263 ("warn_laser_output_power_high"),
264 ("warn_laser_output_power_low"),
7d3d0439
RA
265 ("lro_aggregated_pkts"),
266 ("lro_flush_both_count"),
267 ("lro_out_of_sequence_pkts"),
268 ("lro_flush_due_to_max_pkts"),
269 ("lro_avg_aggr_pkts"),
1da177e4
LT
270};
271
272#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
273#define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
274
275#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
276#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
277
25fff88e
K
278#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
279 init_timer(&timer); \
280 timer.function = handle; \
281 timer.data = (unsigned long) arg; \
282 mod_timer(&timer, (jiffies + exp)) \
283
be3a6b02
K
284/* Add the vlan */
285static void s2io_vlan_rx_register(struct net_device *dev,
286 struct vlan_group *grp)
287{
1ee6dd77 288 struct s2io_nic *nic = dev->priv;
be3a6b02
K
289 unsigned long flags;
290
291 spin_lock_irqsave(&nic->tx_lock, flags);
292 nic->vlgrp = grp;
293 spin_unlock_irqrestore(&nic->tx_lock, flags);
294}
295
296/* Unregister the vlan */
297static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
298{
1ee6dd77 299 struct s2io_nic *nic = dev->priv;
be3a6b02
K
300 unsigned long flags;
301
302 spin_lock_irqsave(&nic->tx_lock, flags);
303 if (nic->vlgrp)
304 nic->vlgrp->vlan_devices[vid] = NULL;
305 spin_unlock_irqrestore(&nic->tx_lock, flags);
306}
307
20346722 308/*
1da177e4
LT
309 * Constants to be programmed into the Xena's registers, to configure
310 * the XAUI.
311 */
312
1da177e4 313#define END_SIGN 0x0
f71e1309 314static const u64 herc_act_dtx_cfg[] = {
541ae68f 315 /* Set address */
e960fc5c 316 0x8000051536750000ULL, 0x80000515367500E0ULL,
541ae68f 317 /* Write data */
e960fc5c 318 0x8000051536750004ULL, 0x80000515367500E4ULL,
541ae68f
K
319 /* Set address */
320 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
321 /* Write data */
322 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
323 /* Set address */
e960fc5c 324 0x801205150D440000ULL, 0x801205150D4400E0ULL,
325 /* Write data */
326 0x801205150D440004ULL, 0x801205150D4400E4ULL,
327 /* Set address */
541ae68f
K
328 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
329 /* Write data */
330 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
331 /* Done */
332 END_SIGN
333};
334
f71e1309 335static const u64 xena_dtx_cfg[] = {
c92ca04b 336 /* Set address */
1da177e4 337 0x8000051500000000ULL, 0x80000515000000E0ULL,
c92ca04b
AR
338 /* Write data */
339 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
340 /* Set address */
341 0x8001051500000000ULL, 0x80010515000000E0ULL,
342 /* Write data */
343 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
344 /* Set address */
1da177e4 345 0x8002051500000000ULL, 0x80020515000000E0ULL,
c92ca04b
AR
346 /* Write data */
347 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1da177e4
LT
348 END_SIGN
349};
350
20346722 351/*
1da177e4
LT
352 * Constants for Fixing the MacAddress problem seen mostly on
353 * Alpha machines.
354 */
f71e1309 355static const u64 fix_mac[] = {
1da177e4
LT
356 0x0060000000000000ULL, 0x0060600000000000ULL,
357 0x0040600000000000ULL, 0x0000600000000000ULL,
358 0x0020600000000000ULL, 0x0060600000000000ULL,
359 0x0020600000000000ULL, 0x0060600000000000ULL,
360 0x0020600000000000ULL, 0x0060600000000000ULL,
361 0x0020600000000000ULL, 0x0060600000000000ULL,
362 0x0020600000000000ULL, 0x0060600000000000ULL,
363 0x0020600000000000ULL, 0x0060600000000000ULL,
364 0x0020600000000000ULL, 0x0060600000000000ULL,
365 0x0020600000000000ULL, 0x0060600000000000ULL,
366 0x0020600000000000ULL, 0x0060600000000000ULL,
367 0x0020600000000000ULL, 0x0060600000000000ULL,
368 0x0020600000000000ULL, 0x0000600000000000ULL,
369 0x0040600000000000ULL, 0x0060600000000000ULL,
370 END_SIGN
371};
372
b41477f3
AR
373MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
374MODULE_LICENSE("GPL");
375MODULE_VERSION(DRV_VERSION);
376
377
1da177e4 378/* Module Loadable parameters. */
b41477f3
AR
379S2IO_PARM_INT(tx_fifo_num, 1);
380S2IO_PARM_INT(rx_ring_num, 1);
381
382
383S2IO_PARM_INT(rx_ring_mode, 1);
384S2IO_PARM_INT(use_continuous_tx_intrs, 1);
385S2IO_PARM_INT(rmac_pause_time, 0x100);
386S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
387S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
388S2IO_PARM_INT(shared_splits, 0);
389S2IO_PARM_INT(tmac_util_period, 5);
390S2IO_PARM_INT(rmac_util_period, 5);
391S2IO_PARM_INT(bimodal, 0);
392S2IO_PARM_INT(l3l4hdr_size, 128);
303bcb4b 393/* Frequency of Rx desc syncs expressed as power of 2 */
b41477f3 394S2IO_PARM_INT(rxsync_frequency, 3);
cc6e7c44 395/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
b41477f3 396S2IO_PARM_INT(intr_type, 0);
7d3d0439 397/* Large receive offload feature */
b41477f3 398S2IO_PARM_INT(lro, 0);
7d3d0439
RA
399/* Max pkts to be aggregated by LRO at one time. If not specified,
400 * aggregation happens until we hit max IP pkt size(64K)
401 */
b41477f3 402S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
b41477f3 403S2IO_PARM_INT(indicate_max_pkts, 0);
db874e65
SS
404
405S2IO_PARM_INT(napi, 1);
406S2IO_PARM_INT(ufo, 0);
b41477f3
AR
407
408static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
409 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
410static unsigned int rx_ring_sz[MAX_RX_RINGS] =
411 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
412static unsigned int rts_frm_len[MAX_RX_RINGS] =
413 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
414
415module_param_array(tx_fifo_len, uint, NULL, 0);
416module_param_array(rx_ring_sz, uint, NULL, 0);
417module_param_array(rts_frm_len, uint, NULL, 0);
1da177e4 418
20346722 419/*
1da177e4 420 * S2IO device table.
20346722 421 * This table lists all the devices that this driver supports.
1da177e4
LT
422 */
423static struct pci_device_id s2io_tbl[] __devinitdata = {
424 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
425 PCI_ANY_ID, PCI_ANY_ID},
426 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
427 PCI_ANY_ID, PCI_ANY_ID},
428 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
20346722
K
429 PCI_ANY_ID, PCI_ANY_ID},
430 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
431 PCI_ANY_ID, PCI_ANY_ID},
1da177e4
LT
432 {0,}
433};
434
435MODULE_DEVICE_TABLE(pci, s2io_tbl);
436
437static struct pci_driver s2io_driver = {
438 .name = "S2IO",
439 .id_table = s2io_tbl,
440 .probe = s2io_init_nic,
441 .remove = __devexit_p(s2io_rem_nic),
442};
443
444/* A simplifier macro used both by init and free shared_mem Fns(). */
445#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
446
447/**
448 * init_shared_mem - Allocation and Initialization of Memory
449 * @nic: Device private variable.
20346722
K
450 * Description: The function allocates all the memory areas shared
451 * between the NIC and the driver. This includes Tx descriptors,
1da177e4
LT
452 * Rx descriptors and the statistics block.
453 */
454
455static int init_shared_mem(struct s2io_nic *nic)
456{
457 u32 size;
458 void *tmp_v_addr, *tmp_v_addr_next;
459 dma_addr_t tmp_p_addr, tmp_p_addr_next;
1ee6dd77 460 struct RxD_block *pre_rxd_blk = NULL;
372cc597 461 int i, j, blk_cnt;
1da177e4
LT
462 int lst_size, lst_per_page;
463 struct net_device *dev = nic->dev;
8ae418cf 464 unsigned long tmp;
1ee6dd77 465 struct buffAdd *ba;
1da177e4 466
1ee6dd77 467 struct mac_info *mac_control;
1da177e4
LT
468 struct config_param *config;
469
470 mac_control = &nic->mac_control;
471 config = &nic->config;
472
473
474 /* Allocation and initialization of TXDLs in FIOFs */
475 size = 0;
476 for (i = 0; i < config->tx_fifo_num; i++) {
477 size += config->tx_cfg[i].fifo_len;
478 }
479 if (size > MAX_AVAILABLE_TXDS) {
b41477f3 480 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
0b1f7ebe 481 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
b41477f3 482 return -EINVAL;
1da177e4
LT
483 }
484
1ee6dd77 485 lst_size = (sizeof(struct TxD) * config->max_txds);
1da177e4
LT
486 lst_per_page = PAGE_SIZE / lst_size;
487
488 for (i = 0; i < config->tx_fifo_num; i++) {
489 int fifo_len = config->tx_cfg[i].fifo_len;
1ee6dd77 490 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
20346722
K
491 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
492 GFP_KERNEL);
493 if (!mac_control->fifos[i].list_info) {
1da177e4
LT
494 DBG_PRINT(ERR_DBG,
495 "Malloc failed for list_info\n");
496 return -ENOMEM;
497 }
20346722 498 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
1da177e4
LT
499 }
500 for (i = 0; i < config->tx_fifo_num; i++) {
501 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
502 lst_per_page);
20346722
K
503 mac_control->fifos[i].tx_curr_put_info.offset = 0;
504 mac_control->fifos[i].tx_curr_put_info.fifo_len =
1da177e4 505 config->tx_cfg[i].fifo_len - 1;
20346722
K
506 mac_control->fifos[i].tx_curr_get_info.offset = 0;
507 mac_control->fifos[i].tx_curr_get_info.fifo_len =
1da177e4 508 config->tx_cfg[i].fifo_len - 1;
20346722
K
509 mac_control->fifos[i].fifo_no = i;
510 mac_control->fifos[i].nic = nic;
fed5eccd 511 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
20346722 512
1da177e4
LT
513 for (j = 0; j < page_num; j++) {
514 int k = 0;
515 dma_addr_t tmp_p;
516 void *tmp_v;
517 tmp_v = pci_alloc_consistent(nic->pdev,
518 PAGE_SIZE, &tmp_p);
519 if (!tmp_v) {
520 DBG_PRINT(ERR_DBG,
521 "pci_alloc_consistent ");
522 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
523 return -ENOMEM;
524 }
776bd20f 525 /* If we got a zero DMA address(can happen on
526 * certain platforms like PPC), reallocate.
527 * Store virtual address of page we don't want,
528 * to be freed later.
529 */
530 if (!tmp_p) {
531 mac_control->zerodma_virt_addr = tmp_v;
6aa20a22 532 DBG_PRINT(INIT_DBG,
776bd20f 533 "%s: Zero DMA address for TxDL. ", dev->name);
6aa20a22 534 DBG_PRINT(INIT_DBG,
6b4d617d 535 "Virtual address %p\n", tmp_v);
776bd20f 536 tmp_v = pci_alloc_consistent(nic->pdev,
537 PAGE_SIZE, &tmp_p);
538 if (!tmp_v) {
539 DBG_PRINT(ERR_DBG,
540 "pci_alloc_consistent ");
541 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
542 return -ENOMEM;
543 }
544 }
1da177e4
LT
545 while (k < lst_per_page) {
546 int l = (j * lst_per_page) + k;
547 if (l == config->tx_cfg[i].fifo_len)
20346722
K
548 break;
549 mac_control->fifos[i].list_info[l].list_virt_addr =
1da177e4 550 tmp_v + (k * lst_size);
20346722 551 mac_control->fifos[i].list_info[l].list_phy_addr =
1da177e4
LT
552 tmp_p + (k * lst_size);
553 k++;
554 }
555 }
556 }
1da177e4 557
4384247b 558 nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
fed5eccd
AR
559 if (!nic->ufo_in_band_v)
560 return -ENOMEM;
561
1da177e4
LT
562 /* Allocation and initialization of RXDs in Rings */
563 size = 0;
564 for (i = 0; i < config->rx_ring_num; i++) {
da6971d8
AR
565 if (config->rx_cfg[i].num_rxd %
566 (rxd_count[nic->rxd_mode] + 1)) {
1da177e4
LT
567 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
568 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
569 i);
570 DBG_PRINT(ERR_DBG, "RxDs per Block");
571 return FAILURE;
572 }
573 size += config->rx_cfg[i].num_rxd;
20346722 574 mac_control->rings[i].block_count =
da6971d8
AR
575 config->rx_cfg[i].num_rxd /
576 (rxd_count[nic->rxd_mode] + 1 );
577 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
578 mac_control->rings[i].block_count;
1da177e4 579 }
da6971d8 580 if (nic->rxd_mode == RXD_MODE_1)
1ee6dd77 581 size = (size * (sizeof(struct RxD1)));
da6971d8 582 else
1ee6dd77 583 size = (size * (sizeof(struct RxD3)));
1da177e4
LT
584
585 for (i = 0; i < config->rx_ring_num; i++) {
20346722
K
586 mac_control->rings[i].rx_curr_get_info.block_index = 0;
587 mac_control->rings[i].rx_curr_get_info.offset = 0;
588 mac_control->rings[i].rx_curr_get_info.ring_len =
1da177e4 589 config->rx_cfg[i].num_rxd - 1;
20346722
K
590 mac_control->rings[i].rx_curr_put_info.block_index = 0;
591 mac_control->rings[i].rx_curr_put_info.offset = 0;
592 mac_control->rings[i].rx_curr_put_info.ring_len =
1da177e4 593 config->rx_cfg[i].num_rxd - 1;
20346722
K
594 mac_control->rings[i].nic = nic;
595 mac_control->rings[i].ring_no = i;
596
da6971d8
AR
597 blk_cnt = config->rx_cfg[i].num_rxd /
598 (rxd_count[nic->rxd_mode] + 1);
1da177e4
LT
599 /* Allocating all the Rx blocks */
600 for (j = 0; j < blk_cnt; j++) {
1ee6dd77 601 struct rx_block_info *rx_blocks;
da6971d8
AR
602 int l;
603
604 rx_blocks = &mac_control->rings[i].rx_blocks[j];
605 size = SIZE_OF_BLOCK; //size is always page size
1da177e4
LT
606 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
607 &tmp_p_addr);
608 if (tmp_v_addr == NULL) {
609 /*
20346722
K
610 * In case of failure, free_shared_mem()
611 * is called, which should free any
612 * memory that was alloced till the
1da177e4
LT
613 * failure happened.
614 */
da6971d8 615 rx_blocks->block_virt_addr = tmp_v_addr;
1da177e4
LT
616 return -ENOMEM;
617 }
618 memset(tmp_v_addr, 0, size);
da6971d8
AR
619 rx_blocks->block_virt_addr = tmp_v_addr;
620 rx_blocks->block_dma_addr = tmp_p_addr;
1ee6dd77 621 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
da6971d8
AR
622 rxd_count[nic->rxd_mode],
623 GFP_KERNEL);
372cc597
SS
624 if (!rx_blocks->rxds)
625 return -ENOMEM;
da6971d8
AR
626 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
627 rx_blocks->rxds[l].virt_addr =
628 rx_blocks->block_virt_addr +
629 (rxd_size[nic->rxd_mode] * l);
630 rx_blocks->rxds[l].dma_addr =
631 rx_blocks->block_dma_addr +
632 (rxd_size[nic->rxd_mode] * l);
633 }
1da177e4
LT
634 }
635 /* Interlinking all Rx Blocks */
636 for (j = 0; j < blk_cnt; j++) {
20346722
K
637 tmp_v_addr =
638 mac_control->rings[i].rx_blocks[j].block_virt_addr;
1da177e4 639 tmp_v_addr_next =
20346722 640 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4 641 blk_cnt].block_virt_addr;
20346722
K
642 tmp_p_addr =
643 mac_control->rings[i].rx_blocks[j].block_dma_addr;
1da177e4 644 tmp_p_addr_next =
20346722 645 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4
LT
646 blk_cnt].block_dma_addr;
647
1ee6dd77 648 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
1da177e4
LT
649 pre_rxd_blk->reserved_2_pNext_RxD_block =
650 (unsigned long) tmp_v_addr_next;
1da177e4
LT
651 pre_rxd_blk->pNext_RxD_Blk_physical =
652 (u64) tmp_p_addr_next;
653 }
654 }
da6971d8
AR
655 if (nic->rxd_mode >= RXD_MODE_3A) {
656 /*
657 * Allocation of Storages for buffer addresses in 2BUFF mode
658 * and the buffers as well.
659 */
660 for (i = 0; i < config->rx_ring_num; i++) {
661 blk_cnt = config->rx_cfg[i].num_rxd /
662 (rxd_count[nic->rxd_mode]+ 1);
663 mac_control->rings[i].ba =
1ee6dd77 664 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
1da177e4 665 GFP_KERNEL);
da6971d8 666 if (!mac_control->rings[i].ba)
1da177e4 667 return -ENOMEM;
da6971d8
AR
668 for (j = 0; j < blk_cnt; j++) {
669 int k = 0;
670 mac_control->rings[i].ba[j] =
1ee6dd77 671 kmalloc((sizeof(struct buffAdd) *
da6971d8
AR
672 (rxd_count[nic->rxd_mode] + 1)),
673 GFP_KERNEL);
674 if (!mac_control->rings[i].ba[j])
1da177e4 675 return -ENOMEM;
da6971d8
AR
676 while (k != rxd_count[nic->rxd_mode]) {
677 ba = &mac_control->rings[i].ba[j][k];
678
679 ba->ba_0_org = (void *) kmalloc
680 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
681 if (!ba->ba_0_org)
682 return -ENOMEM;
683 tmp = (unsigned long)ba->ba_0_org;
684 tmp += ALIGN_SIZE;
685 tmp &= ~((unsigned long) ALIGN_SIZE);
686 ba->ba_0 = (void *) tmp;
687
688 ba->ba_1_org = (void *) kmalloc
689 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
690 if (!ba->ba_1_org)
691 return -ENOMEM;
692 tmp = (unsigned long) ba->ba_1_org;
693 tmp += ALIGN_SIZE;
694 tmp &= ~((unsigned long) ALIGN_SIZE);
695 ba->ba_1 = (void *) tmp;
696 k++;
697 }
1da177e4
LT
698 }
699 }
700 }
1da177e4
LT
701
702 /* Allocation and initialization of Statistics block */
1ee6dd77 703 size = sizeof(struct stat_block);
1da177e4
LT
704 mac_control->stats_mem = pci_alloc_consistent
705 (nic->pdev, size, &mac_control->stats_mem_phy);
706
707 if (!mac_control->stats_mem) {
20346722
K
708 /*
709 * In case of failure, free_shared_mem() is called, which
710 * should free any memory that was alloced till the
1da177e4
LT
711 * failure happened.
712 */
713 return -ENOMEM;
714 }
715 mac_control->stats_mem_sz = size;
716
717 tmp_v_addr = mac_control->stats_mem;
1ee6dd77 718 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
1da177e4 719 memset(tmp_v_addr, 0, size);
1da177e4
LT
720 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
721 (unsigned long long) tmp_p_addr);
722
723 return SUCCESS;
724}
725
20346722
K
726/**
727 * free_shared_mem - Free the allocated Memory
1da177e4
LT
728 * @nic: Device private variable.
729 * Description: This function is to free all memory locations allocated by
730 * the init_shared_mem() function and return it to the kernel.
731 */
732
733static void free_shared_mem(struct s2io_nic *nic)
734{
735 int i, j, blk_cnt, size;
736 void *tmp_v_addr;
737 dma_addr_t tmp_p_addr;
1ee6dd77 738 struct mac_info *mac_control;
1da177e4
LT
739 struct config_param *config;
740 int lst_size, lst_per_page;
776bd20f 741 struct net_device *dev = nic->dev;
1da177e4
LT
742
743 if (!nic)
744 return;
745
746 mac_control = &nic->mac_control;
747 config = &nic->config;
748
1ee6dd77 749 lst_size = (sizeof(struct TxD) * config->max_txds);
1da177e4
LT
750 lst_per_page = PAGE_SIZE / lst_size;
751
752 for (i = 0; i < config->tx_fifo_num; i++) {
753 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
754 lst_per_page);
755 for (j = 0; j < page_num; j++) {
756 int mem_blks = (j * lst_per_page);
776bd20f 757 if (!mac_control->fifos[i].list_info)
6aa20a22 758 return;
776bd20f 759 if (!mac_control->fifos[i].list_info[mem_blks].
760 list_virt_addr)
1da177e4
LT
761 break;
762 pci_free_consistent(nic->pdev, PAGE_SIZE,
20346722
K
763 mac_control->fifos[i].
764 list_info[mem_blks].
1da177e4 765 list_virt_addr,
20346722
K
766 mac_control->fifos[i].
767 list_info[mem_blks].
1da177e4
LT
768 list_phy_addr);
769 }
776bd20f 770 /* If we got a zero DMA address during allocation,
771 * free the page now
772 */
773 if (mac_control->zerodma_virt_addr) {
774 pci_free_consistent(nic->pdev, PAGE_SIZE,
775 mac_control->zerodma_virt_addr,
776 (dma_addr_t)0);
6aa20a22 777 DBG_PRINT(INIT_DBG,
6b4d617d
AM
778 "%s: Freeing TxDL with zero DMA addr. ",
779 dev->name);
780 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
781 mac_control->zerodma_virt_addr);
776bd20f 782 }
20346722 783 kfree(mac_control->fifos[i].list_info);
1da177e4
LT
784 }
785
1da177e4 786 size = SIZE_OF_BLOCK;
1da177e4 787 for (i = 0; i < config->rx_ring_num; i++) {
20346722 788 blk_cnt = mac_control->rings[i].block_count;
1da177e4 789 for (j = 0; j < blk_cnt; j++) {
20346722
K
790 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
791 block_virt_addr;
792 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
793 block_dma_addr;
1da177e4
LT
794 if (tmp_v_addr == NULL)
795 break;
796 pci_free_consistent(nic->pdev, size,
797 tmp_v_addr, tmp_p_addr);
da6971d8 798 kfree(mac_control->rings[i].rx_blocks[j].rxds);
1da177e4
LT
799 }
800 }
801
da6971d8
AR
802 if (nic->rxd_mode >= RXD_MODE_3A) {
803 /* Freeing buffer storage addresses in 2BUFF mode. */
804 for (i = 0; i < config->rx_ring_num; i++) {
805 blk_cnt = config->rx_cfg[i].num_rxd /
806 (rxd_count[nic->rxd_mode] + 1);
807 for (j = 0; j < blk_cnt; j++) {
808 int k = 0;
809 if (!mac_control->rings[i].ba[j])
810 continue;
811 while (k != rxd_count[nic->rxd_mode]) {
1ee6dd77 812 struct buffAdd *ba =
da6971d8
AR
813 &mac_control->rings[i].ba[j][k];
814 kfree(ba->ba_0_org);
815 kfree(ba->ba_1_org);
816 k++;
817 }
818 kfree(mac_control->rings[i].ba[j]);
1da177e4 819 }
da6971d8 820 kfree(mac_control->rings[i].ba);
1da177e4 821 }
1da177e4 822 }
1da177e4
LT
823
824 if (mac_control->stats_mem) {
825 pci_free_consistent(nic->pdev,
826 mac_control->stats_mem_sz,
827 mac_control->stats_mem,
828 mac_control->stats_mem_phy);
829 }
fed5eccd
AR
830 if (nic->ufo_in_band_v)
831 kfree(nic->ufo_in_band_v);
1da177e4
LT
832}
833
541ae68f
K
834/**
835 * s2io_verify_pci_mode -
836 */
837
1ee6dd77 838static int s2io_verify_pci_mode(struct s2io_nic *nic)
541ae68f 839{
1ee6dd77 840 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f
K
841 register u64 val64 = 0;
842 int mode;
843
844 val64 = readq(&bar0->pci_mode);
845 mode = (u8)GET_PCI_MODE(val64);
846
847 if ( val64 & PCI_MODE_UNKNOWN_MODE)
848 return -1; /* Unknown PCI mode */
849 return mode;
850}
851
c92ca04b
AR
852#define NEC_VENID 0x1033
853#define NEC_DEVID 0x0125
854static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
855{
856 struct pci_dev *tdev = NULL;
26d36b64
AC
857 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
858 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
c92ca04b 859 if (tdev->bus == s2io_pdev->bus->parent)
26d36b64 860 pci_dev_put(tdev);
c92ca04b
AR
861 return 1;
862 }
863 }
864 return 0;
865}
541ae68f 866
7b32a312 867static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
541ae68f
K
868/**
869 * s2io_print_pci_mode -
870 */
1ee6dd77 871static int s2io_print_pci_mode(struct s2io_nic *nic)
541ae68f 872{
1ee6dd77 873 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f
K
874 register u64 val64 = 0;
875 int mode;
876 struct config_param *config = &nic->config;
877
878 val64 = readq(&bar0->pci_mode);
879 mode = (u8)GET_PCI_MODE(val64);
880
881 if ( val64 & PCI_MODE_UNKNOWN_MODE)
882 return -1; /* Unknown PCI mode */
883
c92ca04b
AR
884 config->bus_speed = bus_speed[mode];
885
886 if (s2io_on_nec_bridge(nic->pdev)) {
887 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
888 nic->dev->name);
889 return mode;
890 }
891
541ae68f
K
892 if (val64 & PCI_MODE_32_BITS) {
893 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
894 } else {
895 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
896 }
897
898 switch(mode) {
899 case PCI_MODE_PCI_33:
900 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
541ae68f
K
901 break;
902 case PCI_MODE_PCI_66:
903 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
541ae68f
K
904 break;
905 case PCI_MODE_PCIX_M1_66:
906 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
541ae68f
K
907 break;
908 case PCI_MODE_PCIX_M1_100:
909 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
541ae68f
K
910 break;
911 case PCI_MODE_PCIX_M1_133:
912 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
541ae68f
K
913 break;
914 case PCI_MODE_PCIX_M2_66:
915 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
541ae68f
K
916 break;
917 case PCI_MODE_PCIX_M2_100:
918 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
541ae68f
K
919 break;
920 case PCI_MODE_PCIX_M2_133:
921 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
541ae68f
K
922 break;
923 default:
924 return -1; /* Unsupported bus speed */
925 }
926
927 return mode;
928}
929
20346722
K
930/**
931 * init_nic - Initialization of hardware
1da177e4 932 * @nic: device peivate variable
20346722
K
933 * Description: The function sequentially configures every block
934 * of the H/W from their reset values.
935 * Return Value: SUCCESS on success and
1da177e4
LT
936 * '-1' on failure (endian settings incorrect).
937 */
938
939static int init_nic(struct s2io_nic *nic)
940{
1ee6dd77 941 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
942 struct net_device *dev = nic->dev;
943 register u64 val64 = 0;
944 void __iomem *add;
945 u32 time;
946 int i, j;
1ee6dd77 947 struct mac_info *mac_control;
1da177e4 948 struct config_param *config;
c92ca04b 949 int dtx_cnt = 0;
1da177e4 950 unsigned long long mem_share;
20346722 951 int mem_size;
1da177e4
LT
952
953 mac_control = &nic->mac_control;
954 config = &nic->config;
955
5e25b9dd 956 /* to set the swapper controle on the card */
20346722 957 if(s2io_set_swapper(nic)) {
1da177e4
LT
958 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
959 return -1;
960 }
961
541ae68f
K
962 /*
963 * Herc requires EOI to be removed from reset before XGXS, so..
964 */
965 if (nic->device_type & XFRAME_II_DEVICE) {
966 val64 = 0xA500000000ULL;
967 writeq(val64, &bar0->sw_reset);
968 msleep(500);
969 val64 = readq(&bar0->sw_reset);
970 }
971
1da177e4
LT
972 /* Remove XGXS from reset state */
973 val64 = 0;
974 writeq(val64, &bar0->sw_reset);
1da177e4 975 msleep(500);
20346722 976 val64 = readq(&bar0->sw_reset);
1da177e4
LT
977
978 /* Enable Receiving broadcasts */
979 add = &bar0->mac_cfg;
980 val64 = readq(&bar0->mac_cfg);
981 val64 |= MAC_RMAC_BCAST_ENABLE;
982 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
983 writel((u32) val64, add);
984 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
985 writel((u32) (val64 >> 32), (add + 4));
986
987 /* Read registers in all blocks */
988 val64 = readq(&bar0->mac_int_mask);
989 val64 = readq(&bar0->mc_int_mask);
990 val64 = readq(&bar0->xgxs_int_mask);
991
992 /* Set MTU */
993 val64 = dev->mtu;
994 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
995
541ae68f
K
996 if (nic->device_type & XFRAME_II_DEVICE) {
997 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
303bcb4b 998 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1da177e4 999 &bar0->dtx_control, UF);
541ae68f
K
1000 if (dtx_cnt & 0x1)
1001 msleep(1); /* Necessary!! */
1da177e4
LT
1002 dtx_cnt++;
1003 }
541ae68f 1004 } else {
c92ca04b
AR
1005 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1006 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1007 &bar0->dtx_control, UF);
1008 val64 = readq(&bar0->dtx_control);
1009 dtx_cnt++;
1da177e4
LT
1010 }
1011 }
1012
1013 /* Tx DMA Initialization */
1014 val64 = 0;
1015 writeq(val64, &bar0->tx_fifo_partition_0);
1016 writeq(val64, &bar0->tx_fifo_partition_1);
1017 writeq(val64, &bar0->tx_fifo_partition_2);
1018 writeq(val64, &bar0->tx_fifo_partition_3);
1019
1020
1021 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1022 val64 |=
1023 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1024 13) | vBIT(config->tx_cfg[i].fifo_priority,
1025 ((i * 32) + 5), 3);
1026
1027 if (i == (config->tx_fifo_num - 1)) {
1028 if (i % 2 == 0)
1029 i++;
1030 }
1031
1032 switch (i) {
1033 case 1:
1034 writeq(val64, &bar0->tx_fifo_partition_0);
1035 val64 = 0;
1036 break;
1037 case 3:
1038 writeq(val64, &bar0->tx_fifo_partition_1);
1039 val64 = 0;
1040 break;
1041 case 5:
1042 writeq(val64, &bar0->tx_fifo_partition_2);
1043 val64 = 0;
1044 break;
1045 case 7:
1046 writeq(val64, &bar0->tx_fifo_partition_3);
1047 break;
1048 }
1049 }
1050
5e25b9dd
K
1051 /*
1052 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1053 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1054 */
541ae68f
K
1055 if ((nic->device_type == XFRAME_I_DEVICE) &&
1056 (get_xena_rev_id(nic->pdev) < 4))
5e25b9dd
K
1057 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1058
1da177e4
LT
1059 val64 = readq(&bar0->tx_fifo_partition_0);
1060 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1061 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1062
20346722
K
1063 /*
1064 * Initialization of Tx_PA_CONFIG register to ignore packet
1da177e4
LT
1065 * integrity checking.
1066 */
1067 val64 = readq(&bar0->tx_pa_cfg);
1068 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1069 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1070 writeq(val64, &bar0->tx_pa_cfg);
1071
1072 /* Rx DMA intialization. */
1073 val64 = 0;
1074 for (i = 0; i < config->rx_ring_num; i++) {
1075 val64 |=
1076 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1077 3);
1078 }
1079 writeq(val64, &bar0->rx_queue_priority);
1080
20346722
K
1081 /*
1082 * Allocating equal share of memory to all the
1da177e4
LT
1083 * configured Rings.
1084 */
1085 val64 = 0;
541ae68f
K
1086 if (nic->device_type & XFRAME_II_DEVICE)
1087 mem_size = 32;
1088 else
1089 mem_size = 64;
1090
1da177e4
LT
1091 for (i = 0; i < config->rx_ring_num; i++) {
1092 switch (i) {
1093 case 0:
20346722
K
1094 mem_share = (mem_size / config->rx_ring_num +
1095 mem_size % config->rx_ring_num);
1da177e4
LT
1096 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1097 continue;
1098 case 1:
20346722 1099 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1100 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1101 continue;
1102 case 2:
20346722 1103 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1104 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1105 continue;
1106 case 3:
20346722 1107 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1108 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1109 continue;
1110 case 4:
20346722 1111 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1112 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1113 continue;
1114 case 5:
20346722 1115 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1116 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1117 continue;
1118 case 6:
20346722 1119 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1120 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1121 continue;
1122 case 7:
20346722 1123 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1124 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1125 continue;
1126 }
1127 }
1128 writeq(val64, &bar0->rx_queue_cfg);
1129
20346722 1130 /*
5e25b9dd
K
1131 * Filling Tx round robin registers
1132 * as per the number of FIFOs
1da177e4 1133 */
5e25b9dd
K
1134 switch (config->tx_fifo_num) {
1135 case 1:
1136 val64 = 0x0000000000000000ULL;
1137 writeq(val64, &bar0->tx_w_round_robin_0);
1138 writeq(val64, &bar0->tx_w_round_robin_1);
1139 writeq(val64, &bar0->tx_w_round_robin_2);
1140 writeq(val64, &bar0->tx_w_round_robin_3);
1141 writeq(val64, &bar0->tx_w_round_robin_4);
1142 break;
1143 case 2:
1144 val64 = 0x0000010000010000ULL;
1145 writeq(val64, &bar0->tx_w_round_robin_0);
1146 val64 = 0x0100000100000100ULL;
1147 writeq(val64, &bar0->tx_w_round_robin_1);
1148 val64 = 0x0001000001000001ULL;
1149 writeq(val64, &bar0->tx_w_round_robin_2);
1150 val64 = 0x0000010000010000ULL;
1151 writeq(val64, &bar0->tx_w_round_robin_3);
1152 val64 = 0x0100000000000000ULL;
1153 writeq(val64, &bar0->tx_w_round_robin_4);
1154 break;
1155 case 3:
1156 val64 = 0x0001000102000001ULL;
1157 writeq(val64, &bar0->tx_w_round_robin_0);
1158 val64 = 0x0001020000010001ULL;
1159 writeq(val64, &bar0->tx_w_round_robin_1);
1160 val64 = 0x0200000100010200ULL;
1161 writeq(val64, &bar0->tx_w_round_robin_2);
1162 val64 = 0x0001000102000001ULL;
1163 writeq(val64, &bar0->tx_w_round_robin_3);
1164 val64 = 0x0001020000000000ULL;
1165 writeq(val64, &bar0->tx_w_round_robin_4);
1166 break;
1167 case 4:
1168 val64 = 0x0001020300010200ULL;
1169 writeq(val64, &bar0->tx_w_round_robin_0);
1170 val64 = 0x0100000102030001ULL;
1171 writeq(val64, &bar0->tx_w_round_robin_1);
1172 val64 = 0x0200010000010203ULL;
1173 writeq(val64, &bar0->tx_w_round_robin_2);
1174 val64 = 0x0001020001000001ULL;
1175 writeq(val64, &bar0->tx_w_round_robin_3);
1176 val64 = 0x0203000100000000ULL;
1177 writeq(val64, &bar0->tx_w_round_robin_4);
1178 break;
1179 case 5:
1180 val64 = 0x0001000203000102ULL;
1181 writeq(val64, &bar0->tx_w_round_robin_0);
1182 val64 = 0x0001020001030004ULL;
1183 writeq(val64, &bar0->tx_w_round_robin_1);
1184 val64 = 0x0001000203000102ULL;
1185 writeq(val64, &bar0->tx_w_round_robin_2);
1186 val64 = 0x0001020001030004ULL;
1187 writeq(val64, &bar0->tx_w_round_robin_3);
1188 val64 = 0x0001000000000000ULL;
1189 writeq(val64, &bar0->tx_w_round_robin_4);
1190 break;
1191 case 6:
1192 val64 = 0x0001020304000102ULL;
1193 writeq(val64, &bar0->tx_w_round_robin_0);
1194 val64 = 0x0304050001020001ULL;
1195 writeq(val64, &bar0->tx_w_round_robin_1);
1196 val64 = 0x0203000100000102ULL;
1197 writeq(val64, &bar0->tx_w_round_robin_2);
1198 val64 = 0x0304000102030405ULL;
1199 writeq(val64, &bar0->tx_w_round_robin_3);
1200 val64 = 0x0001000200000000ULL;
1201 writeq(val64, &bar0->tx_w_round_robin_4);
1202 break;
1203 case 7:
1204 val64 = 0x0001020001020300ULL;
1205 writeq(val64, &bar0->tx_w_round_robin_0);
1206 val64 = 0x0102030400010203ULL;
1207 writeq(val64, &bar0->tx_w_round_robin_1);
1208 val64 = 0x0405060001020001ULL;
1209 writeq(val64, &bar0->tx_w_round_robin_2);
1210 val64 = 0x0304050000010200ULL;
1211 writeq(val64, &bar0->tx_w_round_robin_3);
1212 val64 = 0x0102030000000000ULL;
1213 writeq(val64, &bar0->tx_w_round_robin_4);
1214 break;
1215 case 8:
1216 val64 = 0x0001020300040105ULL;
1217 writeq(val64, &bar0->tx_w_round_robin_0);
1218 val64 = 0x0200030106000204ULL;
1219 writeq(val64, &bar0->tx_w_round_robin_1);
1220 val64 = 0x0103000502010007ULL;
1221 writeq(val64, &bar0->tx_w_round_robin_2);
1222 val64 = 0x0304010002060500ULL;
1223 writeq(val64, &bar0->tx_w_round_robin_3);
1224 val64 = 0x0103020400000000ULL;
1225 writeq(val64, &bar0->tx_w_round_robin_4);
1226 break;
1227 }
1228
b41477f3 1229 /* Enable all configured Tx FIFO partitions */
5d3213cc
AR
1230 val64 = readq(&bar0->tx_fifo_partition_0);
1231 val64 |= (TX_FIFO_PARTITION_EN);
1232 writeq(val64, &bar0->tx_fifo_partition_0);
1233
5e25b9dd
K
1234 /* Filling the Rx round robin registers as per the
1235 * number of Rings and steering based on QoS.
1236 */
1237 switch (config->rx_ring_num) {
1238 case 1:
1239 val64 = 0x8080808080808080ULL;
1240 writeq(val64, &bar0->rts_qos_steering);
1241 break;
1242 case 2:
1243 val64 = 0x0000010000010000ULL;
1244 writeq(val64, &bar0->rx_w_round_robin_0);
1245 val64 = 0x0100000100000100ULL;
1246 writeq(val64, &bar0->rx_w_round_robin_1);
1247 val64 = 0x0001000001000001ULL;
1248 writeq(val64, &bar0->rx_w_round_robin_2);
1249 val64 = 0x0000010000010000ULL;
1250 writeq(val64, &bar0->rx_w_round_robin_3);
1251 val64 = 0x0100000000000000ULL;
1252 writeq(val64, &bar0->rx_w_round_robin_4);
1253
1254 val64 = 0x8080808040404040ULL;
1255 writeq(val64, &bar0->rts_qos_steering);
1256 break;
1257 case 3:
1258 val64 = 0x0001000102000001ULL;
1259 writeq(val64, &bar0->rx_w_round_robin_0);
1260 val64 = 0x0001020000010001ULL;
1261 writeq(val64, &bar0->rx_w_round_robin_1);
1262 val64 = 0x0200000100010200ULL;
1263 writeq(val64, &bar0->rx_w_round_robin_2);
1264 val64 = 0x0001000102000001ULL;
1265 writeq(val64, &bar0->rx_w_round_robin_3);
1266 val64 = 0x0001020000000000ULL;
1267 writeq(val64, &bar0->rx_w_round_robin_4);
1268
1269 val64 = 0x8080804040402020ULL;
1270 writeq(val64, &bar0->rts_qos_steering);
1271 break;
1272 case 4:
1273 val64 = 0x0001020300010200ULL;
1274 writeq(val64, &bar0->rx_w_round_robin_0);
1275 val64 = 0x0100000102030001ULL;
1276 writeq(val64, &bar0->rx_w_round_robin_1);
1277 val64 = 0x0200010000010203ULL;
1278 writeq(val64, &bar0->rx_w_round_robin_2);
6aa20a22 1279 val64 = 0x0001020001000001ULL;
5e25b9dd
K
1280 writeq(val64, &bar0->rx_w_round_robin_3);
1281 val64 = 0x0203000100000000ULL;
1282 writeq(val64, &bar0->rx_w_round_robin_4);
1283
1284 val64 = 0x8080404020201010ULL;
1285 writeq(val64, &bar0->rts_qos_steering);
1286 break;
1287 case 5:
1288 val64 = 0x0001000203000102ULL;
1289 writeq(val64, &bar0->rx_w_round_robin_0);
1290 val64 = 0x0001020001030004ULL;
1291 writeq(val64, &bar0->rx_w_round_robin_1);
1292 val64 = 0x0001000203000102ULL;
1293 writeq(val64, &bar0->rx_w_round_robin_2);
1294 val64 = 0x0001020001030004ULL;
1295 writeq(val64, &bar0->rx_w_round_robin_3);
1296 val64 = 0x0001000000000000ULL;
1297 writeq(val64, &bar0->rx_w_round_robin_4);
1298
1299 val64 = 0x8080404020201008ULL;
1300 writeq(val64, &bar0->rts_qos_steering);
1301 break;
1302 case 6:
1303 val64 = 0x0001020304000102ULL;
1304 writeq(val64, &bar0->rx_w_round_robin_0);
1305 val64 = 0x0304050001020001ULL;
1306 writeq(val64, &bar0->rx_w_round_robin_1);
1307 val64 = 0x0203000100000102ULL;
1308 writeq(val64, &bar0->rx_w_round_robin_2);
1309 val64 = 0x0304000102030405ULL;
1310 writeq(val64, &bar0->rx_w_round_robin_3);
1311 val64 = 0x0001000200000000ULL;
1312 writeq(val64, &bar0->rx_w_round_robin_4);
1313
1314 val64 = 0x8080404020100804ULL;
1315 writeq(val64, &bar0->rts_qos_steering);
1316 break;
1317 case 7:
1318 val64 = 0x0001020001020300ULL;
1319 writeq(val64, &bar0->rx_w_round_robin_0);
1320 val64 = 0x0102030400010203ULL;
1321 writeq(val64, &bar0->rx_w_round_robin_1);
1322 val64 = 0x0405060001020001ULL;
1323 writeq(val64, &bar0->rx_w_round_robin_2);
1324 val64 = 0x0304050000010200ULL;
1325 writeq(val64, &bar0->rx_w_round_robin_3);
1326 val64 = 0x0102030000000000ULL;
1327 writeq(val64, &bar0->rx_w_round_robin_4);
1328
1329 val64 = 0x8080402010080402ULL;
1330 writeq(val64, &bar0->rts_qos_steering);
1331 break;
1332 case 8:
1333 val64 = 0x0001020300040105ULL;
1334 writeq(val64, &bar0->rx_w_round_robin_0);
1335 val64 = 0x0200030106000204ULL;
1336 writeq(val64, &bar0->rx_w_round_robin_1);
1337 val64 = 0x0103000502010007ULL;
1338 writeq(val64, &bar0->rx_w_round_robin_2);
1339 val64 = 0x0304010002060500ULL;
1340 writeq(val64, &bar0->rx_w_round_robin_3);
1341 val64 = 0x0103020400000000ULL;
1342 writeq(val64, &bar0->rx_w_round_robin_4);
1343
1344 val64 = 0x8040201008040201ULL;
1345 writeq(val64, &bar0->rts_qos_steering);
1346 break;
1347 }
1da177e4
LT
1348
1349 /* UDP Fix */
1350 val64 = 0;
20346722 1351 for (i = 0; i < 8; i++)
1da177e4
LT
1352 writeq(val64, &bar0->rts_frm_len_n[i]);
1353
5e25b9dd
K
1354 /* Set the default rts frame length for the rings configured */
1355 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1356 for (i = 0 ; i < config->rx_ring_num ; i++)
1357 writeq(val64, &bar0->rts_frm_len_n[i]);
1358
1359 /* Set the frame length for the configured rings
1360 * desired by the user
1361 */
1362 for (i = 0; i < config->rx_ring_num; i++) {
1363 /* If rts_frm_len[i] == 0 then it is assumed that user not
1364 * specified frame length steering.
1365 * If the user provides the frame length then program
1366 * the rts_frm_len register for those values or else
1367 * leave it as it is.
1368 */
1369 if (rts_frm_len[i] != 0) {
1370 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1371 &bar0->rts_frm_len_n[i]);
1372 }
1373 }
1da177e4 1374
9fc93a41
SS
1375 /* Disable differentiated services steering logic */
1376 for (i = 0; i < 64; i++) {
1377 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1378 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1379 dev->name);
1380 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1381 return FAILURE;
1382 }
1383 }
1384
20346722 1385 /* Program statistics memory */
1da177e4 1386 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1da177e4 1387
541ae68f
K
1388 if (nic->device_type == XFRAME_II_DEVICE) {
1389 val64 = STAT_BC(0x320);
1390 writeq(val64, &bar0->stat_byte_cnt);
1391 }
1392
20346722 1393 /*
1da177e4
LT
1394 * Initializing the sampling rate for the device to calculate the
1395 * bandwidth utilization.
1396 */
1397 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1398 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1399 writeq(val64, &bar0->mac_link_util);
1400
1401
20346722
K
1402 /*
1403 * Initializing the Transmit and Receive Traffic Interrupt
1da177e4
LT
1404 * Scheme.
1405 */
20346722
K
1406 /*
1407 * TTI Initialization. Default Tx timer gets us about
1da177e4
LT
1408 * 250 interrupts per sec. Continuous interrupts are enabled
1409 * by default.
1410 */
541ae68f
K
1411 if (nic->device_type == XFRAME_II_DEVICE) {
1412 int count = (nic->config.bus_speed * 125)/2;
1413 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1414 } else {
1415
1416 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1417 }
1418 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1da177e4 1419 TTI_DATA1_MEM_TX_URNG_B(0x10) |
5e25b9dd 1420 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
541ae68f
K
1421 if (use_continuous_tx_intrs)
1422 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1da177e4
LT
1423 writeq(val64, &bar0->tti_data1_mem);
1424
1425 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1426 TTI_DATA2_MEM_TX_UFC_B(0x20) |
19a60522 1427 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1da177e4
LT
1428 writeq(val64, &bar0->tti_data2_mem);
1429
1430 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1431 writeq(val64, &bar0->tti_command_mem);
1432
20346722 1433 /*
1da177e4
LT
1434 * Once the operation completes, the Strobe bit of the command
1435 * register will be reset. We poll for this particular condition
1436 * We wait for a maximum of 500ms for the operation to complete,
1437 * if it's not complete by then we return error.
1438 */
1439 time = 0;
1440 while (TRUE) {
1441 val64 = readq(&bar0->tti_command_mem);
1442 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1443 break;
1444 }
1445 if (time > 10) {
1446 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1447 dev->name);
1448 return -1;
1449 }
1450 msleep(50);
1451 time++;
1452 }
1453
b6e3f982
K
1454 if (nic->config.bimodal) {
1455 int k = 0;
1456 for (k = 0; k < config->rx_ring_num; k++) {
1457 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1458 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1459 writeq(val64, &bar0->tti_command_mem);
541ae68f 1460
541ae68f 1461 /*
b6e3f982
K
1462 * Once the operation completes, the Strobe bit of the command
1463 * register will be reset. We poll for this particular condition
1464 * We wait for a maximum of 500ms for the operation to complete,
1465 * if it's not complete by then we return error.
1466 */
1467 time = 0;
1468 while (TRUE) {
1469 val64 = readq(&bar0->tti_command_mem);
1470 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1471 break;
1472 }
1473 if (time > 10) {
1474 DBG_PRINT(ERR_DBG,
1475 "%s: TTI init Failed\n",
1476 dev->name);
1477 return -1;
1478 }
1479 time++;
1480 msleep(50);
1481 }
1482 }
541ae68f 1483 } else {
1da177e4 1484
b6e3f982
K
1485 /* RTI Initialization */
1486 if (nic->device_type == XFRAME_II_DEVICE) {
1487 /*
1488 * Programmed to generate Apprx 500 Intrs per
1489 * second
1490 */
1491 int count = (nic->config.bus_speed * 125)/4;
1492 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1493 } else {
1494 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1495 }
1496 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1497 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1498 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1da177e4 1499
b6e3f982 1500 writeq(val64, &bar0->rti_data1_mem);
1da177e4 1501
b6e3f982 1502 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
cc6e7c44
RA
1503 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1504 if (nic->intr_type == MSI_X)
1505 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1506 RTI_DATA2_MEM_RX_UFC_D(0x40));
1507 else
1508 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1509 RTI_DATA2_MEM_RX_UFC_D(0x80));
b6e3f982 1510 writeq(val64, &bar0->rti_data2_mem);
1da177e4 1511
b6e3f982
K
1512 for (i = 0; i < config->rx_ring_num; i++) {
1513 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1514 | RTI_CMD_MEM_OFFSET(i);
1515 writeq(val64, &bar0->rti_command_mem);
1516
1517 /*
1518 * Once the operation completes, the Strobe bit of the
1519 * command register will be reset. We poll for this
1520 * particular condition. We wait for a maximum of 500ms
1521 * for the operation to complete, if it's not complete
1522 * by then we return error.
1523 */
1524 time = 0;
1525 while (TRUE) {
1526 val64 = readq(&bar0->rti_command_mem);
1527 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1528 break;
1529 }
1530 if (time > 10) {
1531 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1532 dev->name);
1533 return -1;
1534 }
1535 time++;
1536 msleep(50);
1537 }
1da177e4 1538 }
1da177e4
LT
1539 }
1540
20346722
K
1541 /*
1542 * Initializing proper values as Pause threshold into all
1da177e4
LT
1543 * the 8 Queues on Rx side.
1544 */
1545 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1546 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1547
1548 /* Disable RMAC PAD STRIPPING */
509a2671 1549 add = &bar0->mac_cfg;
1da177e4
LT
1550 val64 = readq(&bar0->mac_cfg);
1551 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1552 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1553 writel((u32) (val64), add);
1554 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1555 writel((u32) (val64 >> 32), (add + 4));
1556 val64 = readq(&bar0->mac_cfg);
1557
7d3d0439
RA
1558 /* Enable FCS stripping by adapter */
1559 add = &bar0->mac_cfg;
1560 val64 = readq(&bar0->mac_cfg);
1561 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1562 if (nic->device_type == XFRAME_II_DEVICE)
1563 writeq(val64, &bar0->mac_cfg);
1564 else {
1565 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1566 writel((u32) (val64), add);
1567 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1568 writel((u32) (val64 >> 32), (add + 4));
1569 }
1570
20346722
K
1571 /*
1572 * Set the time value to be inserted in the pause frame
1da177e4
LT
1573 * generated by xena.
1574 */
1575 val64 = readq(&bar0->rmac_pause_cfg);
1576 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1577 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1578 writeq(val64, &bar0->rmac_pause_cfg);
1579
20346722 1580 /*
1da177e4
LT
1581 * Set the Threshold Limit for Generating the pause frame
1582 * If the amount of data in any Queue exceeds ratio of
1583 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1584 * pause frame is generated
1585 */
1586 val64 = 0;
1587 for (i = 0; i < 4; i++) {
1588 val64 |=
1589 (((u64) 0xFF00 | nic->mac_control.
1590 mc_pause_threshold_q0q3)
1591 << (i * 2 * 8));
1592 }
1593 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1594
1595 val64 = 0;
1596 for (i = 0; i < 4; i++) {
1597 val64 |=
1598 (((u64) 0xFF00 | nic->mac_control.
1599 mc_pause_threshold_q4q7)
1600 << (i * 2 * 8));
1601 }
1602 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1603
20346722
K
1604 /*
1605 * TxDMA will stop Read request if the number of read split has
1da177e4
LT
1606 * exceeded the limit pointed by shared_splits
1607 */
1608 val64 = readq(&bar0->pic_control);
1609 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1610 writeq(val64, &bar0->pic_control);
1611
863c11a9
AR
1612 if (nic->config.bus_speed == 266) {
1613 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1614 writeq(0x0, &bar0->read_retry_delay);
1615 writeq(0x0, &bar0->write_retry_delay);
1616 }
1617
541ae68f
K
1618 /*
1619 * Programming the Herc to split every write transaction
1620 * that does not start on an ADB to reduce disconnects.
1621 */
1622 if (nic->device_type == XFRAME_II_DEVICE) {
19a60522
SS
1623 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1624 MISC_LINK_STABILITY_PRD(3);
863c11a9
AR
1625 writeq(val64, &bar0->misc_control);
1626 val64 = readq(&bar0->pic_control2);
1627 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1628 writeq(val64, &bar0->pic_control2);
541ae68f 1629 }
c92ca04b
AR
1630 if (strstr(nic->product_name, "CX4")) {
1631 val64 = TMAC_AVG_IPG(0x17);
1632 writeq(val64, &bar0->tmac_avg_ipg);
a371a07d
K
1633 }
1634
1da177e4
LT
1635 return SUCCESS;
1636}
a371a07d
K
1637#define LINK_UP_DOWN_INTERRUPT 1
1638#define MAC_RMAC_ERR_TIMER 2
1639
1ee6dd77 1640static int s2io_link_fault_indication(struct s2io_nic *nic)
a371a07d 1641{
cc6e7c44
RA
1642 if (nic->intr_type != INTA)
1643 return MAC_RMAC_ERR_TIMER;
a371a07d
K
1644 if (nic->device_type == XFRAME_II_DEVICE)
1645 return LINK_UP_DOWN_INTERRUPT;
1646 else
1647 return MAC_RMAC_ERR_TIMER;
1648}
1da177e4 1649
20346722
K
1650/**
1651 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1da177e4
LT
1652 * @nic: device private variable,
1653 * @mask: A mask indicating which Intr block must be modified and,
1654 * @flag: A flag indicating whether to enable or disable the Intrs.
1655 * Description: This function will either disable or enable the interrupts
20346722
K
1656 * depending on the flag argument. The mask argument can be used to
1657 * enable/disable any Intr block.
1da177e4
LT
1658 * Return Value: NONE.
1659 */
1660
1661static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1662{
1ee6dd77 1663 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
1664 register u64 val64 = 0, temp64 = 0;
1665
1666 /* Top level interrupt classification */
1667 /* PIC Interrupts */
1668 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1669 /* Enable PIC Intrs in the general intr mask register */
a113ae06 1670 val64 = TXPIC_INT_M;
1da177e4
LT
1671 if (flag == ENABLE_INTRS) {
1672 temp64 = readq(&bar0->general_int_mask);
1673 temp64 &= ~((u64) val64);
1674 writeq(temp64, &bar0->general_int_mask);
20346722 1675 /*
a371a07d 1676 * If Hercules adapter enable GPIO otherwise
b41477f3 1677 * disable all PCIX, Flash, MDIO, IIC and GPIO
20346722
K
1678 * interrupts for now.
1679 * TODO
1da177e4 1680 */
a371a07d
K
1681 if (s2io_link_fault_indication(nic) ==
1682 LINK_UP_DOWN_INTERRUPT ) {
1683 temp64 = readq(&bar0->pic_int_mask);
1684 temp64 &= ~((u64) PIC_INT_GPIO);
1685 writeq(temp64, &bar0->pic_int_mask);
1686 temp64 = readq(&bar0->gpio_int_mask);
1687 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1688 writeq(temp64, &bar0->gpio_int_mask);
1689 } else {
1690 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1691 }
20346722 1692 /*
1da177e4
LT
1693 * No MSI Support is available presently, so TTI and
1694 * RTI interrupts are also disabled.
1695 */
1696 } else if (flag == DISABLE_INTRS) {
20346722
K
1697 /*
1698 * Disable PIC Intrs in the general
1699 * intr mask register
1da177e4
LT
1700 */
1701 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1702 temp64 = readq(&bar0->general_int_mask);
1703 val64 |= temp64;
1704 writeq(val64, &bar0->general_int_mask);
1705 }
1706 }
1707
1da177e4
LT
1708 /* MAC Interrupts */
1709 /* Enabling/Disabling MAC interrupts */
1710 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1711 val64 = TXMAC_INT_M | RXMAC_INT_M;
1712 if (flag == ENABLE_INTRS) {
1713 temp64 = readq(&bar0->general_int_mask);
1714 temp64 &= ~((u64) val64);
1715 writeq(temp64, &bar0->general_int_mask);
20346722
K
1716 /*
1717 * All MAC block error interrupts are disabled for now
1da177e4
LT
1718 * TODO
1719 */
1da177e4 1720 } else if (flag == DISABLE_INTRS) {
20346722
K
1721 /*
1722 * Disable MAC Intrs in the general intr mask register
1da177e4
LT
1723 */
1724 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1725 writeq(DISABLE_ALL_INTRS,
1726 &bar0->mac_rmac_err_mask);
1727
1728 temp64 = readq(&bar0->general_int_mask);
1729 val64 |= temp64;
1730 writeq(val64, &bar0->general_int_mask);
1731 }
1732 }
1733
1da177e4
LT
1734 /* Tx traffic interrupts */
1735 if (mask & TX_TRAFFIC_INTR) {
1736 val64 = TXTRAFFIC_INT_M;
1737 if (flag == ENABLE_INTRS) {
1738 temp64 = readq(&bar0->general_int_mask);
1739 temp64 &= ~((u64) val64);
1740 writeq(temp64, &bar0->general_int_mask);
20346722 1741 /*
1da177e4 1742 * Enable all the Tx side interrupts
20346722 1743 * writing 0 Enables all 64 TX interrupt levels
1da177e4
LT
1744 */
1745 writeq(0x0, &bar0->tx_traffic_mask);
1746 } else if (flag == DISABLE_INTRS) {
20346722
K
1747 /*
1748 * Disable Tx Traffic Intrs in the general intr mask
1da177e4
LT
1749 * register.
1750 */
1751 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1752 temp64 = readq(&bar0->general_int_mask);
1753 val64 |= temp64;
1754 writeq(val64, &bar0->general_int_mask);
1755 }
1756 }
1757
1758 /* Rx traffic interrupts */
1759 if (mask & RX_TRAFFIC_INTR) {
1760 val64 = RXTRAFFIC_INT_M;
1761 if (flag == ENABLE_INTRS) {
1762 temp64 = readq(&bar0->general_int_mask);
1763 temp64 &= ~((u64) val64);
1764 writeq(temp64, &bar0->general_int_mask);
1765 /* writing 0 Enables all 8 RX interrupt levels */
1766 writeq(0x0, &bar0->rx_traffic_mask);
1767 } else if (flag == DISABLE_INTRS) {
20346722
K
1768 /*
1769 * Disable Rx Traffic Intrs in the general intr mask
1da177e4
LT
1770 * register.
1771 */
1772 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1773 temp64 = readq(&bar0->general_int_mask);
1774 val64 |= temp64;
1775 writeq(val64, &bar0->general_int_mask);
1776 }
1777 }
1778}
1779
19a60522
SS
1780/**
1781 * verify_pcc_quiescent- Checks for PCC quiescent state
1782 * Return: 1 If PCC is quiescence
1783 * 0 If PCC is not quiescence
1784 */
1ee6dd77 1785static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
20346722 1786{
19a60522 1787 int ret = 0, herc;
1ee6dd77 1788 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522
SS
1789 u64 val64 = readq(&bar0->adapter_status);
1790
1791 herc = (sp->device_type == XFRAME_II_DEVICE);
20346722
K
1792
1793 if (flag == FALSE) {
19a60522
SS
1794 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1795 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 1796 ret = 1;
19a60522
SS
1797 } else {
1798 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 1799 ret = 1;
20346722
K
1800 }
1801 } else {
19a60522 1802 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
5e25b9dd 1803 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
19a60522 1804 ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 1805 ret = 1;
5e25b9dd
K
1806 } else {
1807 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
19a60522 1808 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 1809 ret = 1;
20346722
K
1810 }
1811 }
1812
1813 return ret;
1814}
1815/**
1816 * verify_xena_quiescence - Checks whether the H/W is ready
1da177e4 1817 * Description: Returns whether the H/W is ready to go or not. Depending
20346722 1818 * on whether adapter enable bit was written or not the comparison
1da177e4
LT
1819 * differs and the calling function passes the input argument flag to
1820 * indicate this.
20346722 1821 * Return: 1 If xena is quiescence
1da177e4
LT
1822 * 0 If Xena is not quiescence
1823 */
1824
1ee6dd77 1825static int verify_xena_quiescence(struct s2io_nic *sp)
1da177e4 1826{
19a60522 1827 int mode;
1ee6dd77 1828 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522
SS
1829 u64 val64 = readq(&bar0->adapter_status);
1830 mode = s2io_verify_pci_mode(sp);
1da177e4 1831
19a60522
SS
1832 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
1833 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
1834 return 0;
1835 }
1836 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
1837 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
1838 return 0;
1839 }
1840 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
1841 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
1842 return 0;
1843 }
1844 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
1845 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
1846 return 0;
1847 }
1848 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
1849 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
1850 return 0;
1851 }
1852 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
1853 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
1854 return 0;
1855 }
1856 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
1857 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
1858 return 0;
1859 }
1860 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
1861 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
1862 return 0;
1da177e4
LT
1863 }
1864
19a60522
SS
1865 /*
1866 * In PCI 33 mode, the P_PLL is not used, and therefore,
1867 * the the P_PLL_LOCK bit in the adapter_status register will
1868 * not be asserted.
1869 */
1870 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
1871 sp->device_type == XFRAME_II_DEVICE && mode !=
1872 PCI_MODE_PCI_33) {
1873 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
1874 return 0;
1875 }
1876 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1877 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1878 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
1879 return 0;
1880 }
1881 return 1;
1da177e4
LT
1882}
1883
1884/**
1885 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1886 * @sp: Pointer to device specifc structure
20346722 1887 * Description :
1da177e4
LT
1888 * New procedure to clear mac address reading problems on Alpha platforms
1889 *
1890 */
1891
1ee6dd77 1892static void fix_mac_address(struct s2io_nic * sp)
1da177e4 1893{
1ee6dd77 1894 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
1895 u64 val64;
1896 int i = 0;
1897
1898 while (fix_mac[i] != END_SIGN) {
1899 writeq(fix_mac[i++], &bar0->gpio_control);
20346722 1900 udelay(10);
1da177e4
LT
1901 val64 = readq(&bar0->gpio_control);
1902 }
1903}
1904
1905/**
20346722 1906 * start_nic - Turns the device on
1da177e4 1907 * @nic : device private variable.
20346722
K
1908 * Description:
1909 * This function actually turns the device on. Before this function is
1910 * called,all Registers are configured from their reset states
1911 * and shared memory is allocated but the NIC is still quiescent. On
1da177e4
LT
1912 * calling this function, the device interrupts are cleared and the NIC is
1913 * literally switched on by writing into the adapter control register.
20346722 1914 * Return Value:
1da177e4
LT
1915 * SUCCESS on success and -1 on failure.
1916 */
1917
1918static int start_nic(struct s2io_nic *nic)
1919{
1ee6dd77 1920 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
1921 struct net_device *dev = nic->dev;
1922 register u64 val64 = 0;
20346722 1923 u16 subid, i;
1ee6dd77 1924 struct mac_info *mac_control;
1da177e4
LT
1925 struct config_param *config;
1926
1927 mac_control = &nic->mac_control;
1928 config = &nic->config;
1929
1930 /* PRC Initialization and configuration */
1931 for (i = 0; i < config->rx_ring_num; i++) {
20346722 1932 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1da177e4
LT
1933 &bar0->prc_rxd0_n[i]);
1934
1935 val64 = readq(&bar0->prc_ctrl_n[i]);
b6e3f982
K
1936 if (nic->config.bimodal)
1937 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
da6971d8
AR
1938 if (nic->rxd_mode == RXD_MODE_1)
1939 val64 |= PRC_CTRL_RC_ENABLED;
1940 else
1941 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
863c11a9
AR
1942 if (nic->device_type == XFRAME_II_DEVICE)
1943 val64 |= PRC_CTRL_GROUP_READS;
1944 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
1945 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1da177e4
LT
1946 writeq(val64, &bar0->prc_ctrl_n[i]);
1947 }
1948
da6971d8
AR
1949 if (nic->rxd_mode == RXD_MODE_3B) {
1950 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1951 val64 = readq(&bar0->rx_pa_cfg);
1952 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1953 writeq(val64, &bar0->rx_pa_cfg);
1954 }
1da177e4 1955
20346722 1956 /*
1da177e4
LT
1957 * Enabling MC-RLDRAM. After enabling the device, we timeout
1958 * for around 100ms, which is approximately the time required
1959 * for the device to be ready for operation.
1960 */
1961 val64 = readq(&bar0->mc_rldram_mrs);
1962 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1963 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1964 val64 = readq(&bar0->mc_rldram_mrs);
1965
20346722 1966 msleep(100); /* Delay by around 100 ms. */
1da177e4
LT
1967
1968 /* Enabling ECC Protection. */
1969 val64 = readq(&bar0->adapter_control);
1970 val64 &= ~ADAPTER_ECC_EN;
1971 writeq(val64, &bar0->adapter_control);
1972
20346722
K
1973 /*
1974 * Clearing any possible Link state change interrupts that
1da177e4
LT
1975 * could have popped up just before Enabling the card.
1976 */
1977 val64 = readq(&bar0->mac_rmac_err_reg);
1978 if (val64)
1979 writeq(val64, &bar0->mac_rmac_err_reg);
1980
20346722
K
1981 /*
1982 * Verify if the device is ready to be enabled, if so enable
1da177e4
LT
1983 * it.
1984 */
1985 val64 = readq(&bar0->adapter_status);
19a60522 1986 if (!verify_xena_quiescence(nic)) {
1da177e4
LT
1987 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1988 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1989 (unsigned long long) val64);
1990 return FAILURE;
1991 }
1992
20346722 1993 /*
1da177e4 1994 * With some switches, link might be already up at this point.
20346722
K
1995 * Because of this weird behavior, when we enable laser,
1996 * we may not get link. We need to handle this. We cannot
1997 * figure out which switch is misbehaving. So we are forced to
1998 * make a global change.
1da177e4
LT
1999 */
2000
2001 /* Enabling Laser. */
2002 val64 = readq(&bar0->adapter_control);
2003 val64 |= ADAPTER_EOI_TX_ON;
2004 writeq(val64, &bar0->adapter_control);
2005
c92ca04b
AR
2006 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2007 /*
2008 * Dont see link state interrupts initally on some switches,
2009 * so directly scheduling the link state task here.
2010 */
2011 schedule_work(&nic->set_link_task);
2012 }
1da177e4
LT
2013 /* SXE-002: Initialize link and activity LED */
2014 subid = nic->pdev->subsystem_device;
541ae68f
K
2015 if (((subid & 0xFF) >= 0x07) &&
2016 (nic->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
2017 val64 = readq(&bar0->gpio_control);
2018 val64 |= 0x0000800000000000ULL;
2019 writeq(val64, &bar0->gpio_control);
2020 val64 = 0x0411040400000000ULL;
509a2671 2021 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
2022 }
2023
1da177e4
LT
2024 return SUCCESS;
2025}
fed5eccd
AR
2026/**
2027 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2028 */
1ee6dd77
RB
2029static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2030 TxD *txdlp, int get_off)
fed5eccd 2031{
1ee6dd77 2032 struct s2io_nic *nic = fifo_data->nic;
fed5eccd 2033 struct sk_buff *skb;
1ee6dd77 2034 struct TxD *txds;
fed5eccd
AR
2035 u16 j, frg_cnt;
2036
2037 txds = txdlp;
26b7625c 2038 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
fed5eccd
AR
2039 pci_unmap_single(nic->pdev, (dma_addr_t)
2040 txds->Buffer_Pointer, sizeof(u64),
2041 PCI_DMA_TODEVICE);
2042 txds++;
2043 }
2044
2045 skb = (struct sk_buff *) ((unsigned long)
2046 txds->Host_Control);
2047 if (!skb) {
1ee6dd77 2048 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
fed5eccd
AR
2049 return NULL;
2050 }
2051 pci_unmap_single(nic->pdev, (dma_addr_t)
2052 txds->Buffer_Pointer,
2053 skb->len - skb->data_len,
2054 PCI_DMA_TODEVICE);
2055 frg_cnt = skb_shinfo(skb)->nr_frags;
2056 if (frg_cnt) {
2057 txds++;
2058 for (j = 0; j < frg_cnt; j++, txds++) {
2059 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2060 if (!txds->Buffer_Pointer)
2061 break;
6aa20a22 2062 pci_unmap_page(nic->pdev, (dma_addr_t)
fed5eccd
AR
2063 txds->Buffer_Pointer,
2064 frag->size, PCI_DMA_TODEVICE);
2065 }
2066 }
1ee6dd77 2067 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
fed5eccd
AR
2068 return(skb);
2069}
1da177e4 2070
20346722
K
2071/**
2072 * free_tx_buffers - Free all queued Tx buffers
1da177e4 2073 * @nic : device private variable.
20346722 2074 * Description:
1da177e4 2075 * Free all queued Tx buffers.
20346722 2076 * Return Value: void
1da177e4
LT
2077*/
2078
2079static void free_tx_buffers(struct s2io_nic *nic)
2080{
2081 struct net_device *dev = nic->dev;
2082 struct sk_buff *skb;
1ee6dd77 2083 struct TxD *txdp;
1da177e4 2084 int i, j;
1ee6dd77 2085 struct mac_info *mac_control;
1da177e4 2086 struct config_param *config;
fed5eccd 2087 int cnt = 0;
1da177e4
LT
2088
2089 mac_control = &nic->mac_control;
2090 config = &nic->config;
2091
2092 for (i = 0; i < config->tx_fifo_num; i++) {
2093 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1ee6dd77 2094 txdp = (struct TxD *) mac_control->fifos[i].list_info[j].
1da177e4 2095 list_virt_addr;
fed5eccd
AR
2096 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2097 if (skb) {
2098 dev_kfree_skb(skb);
2099 cnt++;
1da177e4 2100 }
1da177e4
LT
2101 }
2102 DBG_PRINT(INTR_DBG,
2103 "%s:forcibly freeing %d skbs on FIFO%d\n",
2104 dev->name, cnt, i);
20346722
K
2105 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2106 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1da177e4
LT
2107 }
2108}
2109
20346722
K
2110/**
2111 * stop_nic - To stop the nic
1da177e4 2112 * @nic ; device private variable.
20346722
K
2113 * Description:
2114 * This function does exactly the opposite of what the start_nic()
1da177e4
LT
2115 * function does. This function is called to stop the device.
2116 * Return Value:
2117 * void.
2118 */
2119
2120static void stop_nic(struct s2io_nic *nic)
2121{
1ee6dd77 2122 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4 2123 register u64 val64 = 0;
5d3213cc 2124 u16 interruptible;
1ee6dd77 2125 struct mac_info *mac_control;
1da177e4
LT
2126 struct config_param *config;
2127
2128 mac_control = &nic->mac_control;
2129 config = &nic->config;
2130
2131 /* Disable all interrupts */
e960fc5c 2132 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
a371a07d
K
2133 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2134 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1da177e4
LT
2135 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2136
5d3213cc
AR
2137 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2138 val64 = readq(&bar0->adapter_control);
2139 val64 &= ~(ADAPTER_CNTL_EN);
2140 writeq(val64, &bar0->adapter_control);
1da177e4
LT
2141}
2142
1ee6dd77
RB
2143static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \
2144 sk_buff *skb)
da6971d8
AR
2145{
2146 struct net_device *dev = nic->dev;
2147 struct sk_buff *frag_list;
50eb8006 2148 void *tmp;
da6971d8
AR
2149
2150 /* Buffer-1 receives L3/L4 headers */
1ee6dd77 2151 ((struct RxD3*)rxdp)->Buffer1_ptr = pci_map_single
da6971d8
AR
2152 (nic->pdev, skb->data, l3l4hdr_size + 4,
2153 PCI_DMA_FROMDEVICE);
2154
2155 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2156 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2157 if (skb_shinfo(skb)->frag_list == NULL) {
2158 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2159 return -ENOMEM ;
2160 }
2161 frag_list = skb_shinfo(skb)->frag_list;
372cc597 2162 skb->truesize += frag_list->truesize;
da6971d8 2163 frag_list->next = NULL;
50eb8006
JG
2164 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2165 frag_list->data = tmp;
2166 frag_list->tail = tmp;
da6971d8
AR
2167
2168 /* Buffer-2 receives L4 data payload */
1ee6dd77 2169 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
da6971d8
AR
2170 frag_list->data, dev->mtu,
2171 PCI_DMA_FROMDEVICE);
2172 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2173 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2174
2175 return SUCCESS;
2176}
2177
20346722
K
2178/**
2179 * fill_rx_buffers - Allocates the Rx side skbs
1da177e4 2180 * @nic: device private variable
20346722
K
2181 * @ring_no: ring number
2182 * Description:
1da177e4
LT
2183 * The function allocates Rx side skbs and puts the physical
2184 * address of these buffers into the RxD buffer pointers, so that the NIC
2185 * can DMA the received frame into these locations.
2186 * The NIC supports 3 receive modes, viz
2187 * 1. single buffer,
2188 * 2. three buffer and
2189 * 3. Five buffer modes.
20346722
K
2190 * Each mode defines how many fragments the received frame will be split
2191 * up into by the NIC. The frame is split into L3 header, L4 Header,
1da177e4
LT
2192 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2193 * is split into 3 fragments. As of now only single buffer mode is
2194 * supported.
2195 * Return Value:
2196 * SUCCESS on success or an appropriate -ve value on failure.
2197 */
2198
ac1f60db 2199static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1da177e4
LT
2200{
2201 struct net_device *dev = nic->dev;
2202 struct sk_buff *skb;
1ee6dd77 2203 struct RxD_t *rxdp;
1da177e4 2204 int off, off1, size, block_no, block_no1;
1da177e4 2205 u32 alloc_tab = 0;
20346722 2206 u32 alloc_cnt;
1ee6dd77 2207 struct mac_info *mac_control;
1da177e4 2208 struct config_param *config;
20346722 2209 u64 tmp;
1ee6dd77 2210 struct buffAdd *ba;
1da177e4 2211 unsigned long flags;
1ee6dd77 2212 struct RxD_t *first_rxdp = NULL;
1da177e4
LT
2213
2214 mac_control = &nic->mac_control;
2215 config = &nic->config;
20346722
K
2216 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2217 atomic_read(&nic->rx_bufs_left[ring_no]);
1da177e4 2218
5d3213cc 2219 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
863c11a9 2220 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1da177e4 2221 while (alloc_tab < alloc_cnt) {
20346722 2222 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2223 block_index;
20346722 2224 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
1da177e4 2225
da6971d8
AR
2226 rxdp = mac_control->rings[ring_no].
2227 rx_blocks[block_no].rxds[off].virt_addr;
2228
2229 if ((block_no == block_no1) && (off == off1) &&
2230 (rxdp->Host_Control)) {
2231 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2232 dev->name);
1da177e4
LT
2233 DBG_PRINT(INTR_DBG, " info equated\n");
2234 goto end;
2235 }
da6971d8 2236 if (off && (off == rxd_count[nic->rxd_mode])) {
20346722 2237 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2238 block_index++;
da6971d8
AR
2239 if (mac_control->rings[ring_no].rx_curr_put_info.
2240 block_index == mac_control->rings[ring_no].
2241 block_count)
2242 mac_control->rings[ring_no].rx_curr_put_info.
2243 block_index = 0;
2244 block_no = mac_control->rings[ring_no].
2245 rx_curr_put_info.block_index;
2246 if (off == rxd_count[nic->rxd_mode])
2247 off = 0;
20346722 2248 mac_control->rings[ring_no].rx_curr_put_info.
da6971d8
AR
2249 offset = off;
2250 rxdp = mac_control->rings[ring_no].
2251 rx_blocks[block_no].block_virt_addr;
1da177e4
LT
2252 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2253 dev->name, rxdp);
2254 }
db874e65
SS
2255 if(!napi) {
2256 spin_lock_irqsave(&nic->put_lock, flags);
2257 mac_control->rings[ring_no].put_pos =
2258 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2259 spin_unlock_irqrestore(&nic->put_lock, flags);
2260 } else {
2261 mac_control->rings[ring_no].put_pos =
2262 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2263 }
da6971d8
AR
2264 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2265 ((nic->rxd_mode >= RXD_MODE_3A) &&
2266 (rxdp->Control_2 & BIT(0)))) {
20346722 2267 mac_control->rings[ring_no].rx_curr_put_info.
da6971d8 2268 offset = off;
1da177e4
LT
2269 goto end;
2270 }
da6971d8
AR
2271 /* calculate size of skb based on ring mode */
2272 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2273 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2274 if (nic->rxd_mode == RXD_MODE_1)
2275 size += NET_IP_ALIGN;
2276 else if (nic->rxd_mode == RXD_MODE_3B)
2277 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2278 else
2279 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
1da177e4 2280
da6971d8
AR
2281 /* allocate skb */
2282 skb = dev_alloc_skb(size);
2283 if(!skb) {
1da177e4
LT
2284 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2285 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
303bcb4b
K
2286 if (first_rxdp) {
2287 wmb();
2288 first_rxdp->Control_1 |= RXD_OWN_XENA;
2289 }
da6971d8
AR
2290 return -ENOMEM ;
2291 }
2292 if (nic->rxd_mode == RXD_MODE_1) {
2293 /* 1 buffer mode - normal operation mode */
1ee6dd77 2294 memset(rxdp, 0, sizeof(struct RxD1));
da6971d8 2295 skb_reserve(skb, NET_IP_ALIGN);
1ee6dd77 2296 ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single
863c11a9
AR
2297 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2298 PCI_DMA_FROMDEVICE);
2299 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
da6971d8
AR
2300
2301 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2302 /*
2303 * 2 or 3 buffer mode -
2304 * Both 2 buffer mode and 3 buffer mode provides 128
2305 * byte aligned receive buffers.
2306 *
2307 * 3 buffer mode provides header separation where in
2308 * skb->data will have L3/L4 headers where as
2309 * skb_shinfo(skb)->frag_list will have the L4 data
2310 * payload
2311 */
2312
1ee6dd77 2313 memset(rxdp, 0, sizeof(struct RxD3));
da6971d8
AR
2314 ba = &mac_control->rings[ring_no].ba[block_no][off];
2315 skb_reserve(skb, BUF0_LEN);
2316 tmp = (u64)(unsigned long) skb->data;
2317 tmp += ALIGN_SIZE;
2318 tmp &= ~ALIGN_SIZE;
2319 skb->data = (void *) (unsigned long)tmp;
2320 skb->tail = (void *) (unsigned long)tmp;
2321
1ee6dd77
RB
2322 if (!(((struct RxD3*)rxdp)->Buffer0_ptr))
2323 ((struct RxD3*)rxdp)->Buffer0_ptr =
75c30b13 2324 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
da6971d8 2325 PCI_DMA_FROMDEVICE);
75c30b13
AR
2326 else
2327 pci_dma_sync_single_for_device(nic->pdev,
1ee6dd77 2328 (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr,
75c30b13 2329 BUF0_LEN, PCI_DMA_FROMDEVICE);
da6971d8
AR
2330 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2331 if (nic->rxd_mode == RXD_MODE_3B) {
2332 /* Two buffer mode */
2333
2334 /*
6aa20a22 2335 * Buffer2 will have L3/L4 header plus
da6971d8
AR
2336 * L4 payload
2337 */
1ee6dd77 2338 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single
da6971d8
AR
2339 (nic->pdev, skb->data, dev->mtu + 4,
2340 PCI_DMA_FROMDEVICE);
2341
75c30b13 2342 /* Buffer-1 will be dummy buffer. Not used */
1ee6dd77
RB
2343 if (!(((struct RxD3*)rxdp)->Buffer1_ptr)) {
2344 ((struct RxD3*)rxdp)->Buffer1_ptr =
6aa20a22 2345 pci_map_single(nic->pdev,
75c30b13
AR
2346 ba->ba_1, BUF1_LEN,
2347 PCI_DMA_FROMDEVICE);
2348 }
da6971d8
AR
2349 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2350 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2351 (dev->mtu + 4);
2352 } else {
2353 /* 3 buffer mode */
2354 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2355 dev_kfree_skb_irq(skb);
2356 if (first_rxdp) {
2357 wmb();
2358 first_rxdp->Control_1 |=
2359 RXD_OWN_XENA;
2360 }
2361 return -ENOMEM ;
2362 }
2363 }
2364 rxdp->Control_2 |= BIT(0);
1da177e4 2365 }
1da177e4 2366 rxdp->Host_Control = (unsigned long) (skb);
303bcb4b
K
2367 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2368 rxdp->Control_1 |= RXD_OWN_XENA;
1da177e4 2369 off++;
da6971d8
AR
2370 if (off == (rxd_count[nic->rxd_mode] + 1))
2371 off = 0;
20346722 2372 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
20346722 2373
da6971d8 2374 rxdp->Control_2 |= SET_RXD_MARKER;
303bcb4b
K
2375 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2376 if (first_rxdp) {
2377 wmb();
2378 first_rxdp->Control_1 |= RXD_OWN_XENA;
2379 }
2380 first_rxdp = rxdp;
2381 }
1da177e4
LT
2382 atomic_inc(&nic->rx_bufs_left[ring_no]);
2383 alloc_tab++;
2384 }
2385
2386 end:
303bcb4b
K
2387 /* Transfer ownership of first descriptor to adapter just before
2388 * exiting. Before that, use memory barrier so that ownership
2389 * and other fields are seen by adapter correctly.
2390 */
2391 if (first_rxdp) {
2392 wmb();
2393 first_rxdp->Control_1 |= RXD_OWN_XENA;
2394 }
2395
1da177e4
LT
2396 return SUCCESS;
2397}
2398
da6971d8
AR
2399static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2400{
2401 struct net_device *dev = sp->dev;
2402 int j;
2403 struct sk_buff *skb;
1ee6dd77
RB
2404 struct RxD_t *rxdp;
2405 struct mac_info *mac_control;
2406 struct buffAdd *ba;
da6971d8
AR
2407
2408 mac_control = &sp->mac_control;
2409 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2410 rxdp = mac_control->rings[ring_no].
2411 rx_blocks[blk].rxds[j].virt_addr;
2412 skb = (struct sk_buff *)
2413 ((unsigned long) rxdp->Host_Control);
2414 if (!skb) {
2415 continue;
2416 }
2417 if (sp->rxd_mode == RXD_MODE_1) {
2418 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2419 ((struct RxD1*)rxdp)->Buffer0_ptr,
da6971d8
AR
2420 dev->mtu +
2421 HEADER_ETHERNET_II_802_3_SIZE
2422 + HEADER_802_2_SIZE +
2423 HEADER_SNAP_SIZE,
2424 PCI_DMA_FROMDEVICE);
1ee6dd77 2425 memset(rxdp, 0, sizeof(struct RxD1));
da6971d8
AR
2426 } else if(sp->rxd_mode == RXD_MODE_3B) {
2427 ba = &mac_control->rings[ring_no].
2428 ba[blk][j];
2429 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2430 ((struct RxD3*)rxdp)->Buffer0_ptr,
da6971d8
AR
2431 BUF0_LEN,
2432 PCI_DMA_FROMDEVICE);
2433 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2434 ((struct RxD3*)rxdp)->Buffer1_ptr,
da6971d8
AR
2435 BUF1_LEN,
2436 PCI_DMA_FROMDEVICE);
2437 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2438 ((struct RxD3*)rxdp)->Buffer2_ptr,
da6971d8
AR
2439 dev->mtu + 4,
2440 PCI_DMA_FROMDEVICE);
1ee6dd77 2441 memset(rxdp, 0, sizeof(struct RxD3));
da6971d8
AR
2442 } else {
2443 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2444 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
da6971d8
AR
2445 PCI_DMA_FROMDEVICE);
2446 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2447 ((struct RxD3*)rxdp)->Buffer1_ptr,
da6971d8
AR
2448 l3l4hdr_size + 4,
2449 PCI_DMA_FROMDEVICE);
2450 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2451 ((struct RxD3*)rxdp)->Buffer2_ptr, dev->mtu,
da6971d8 2452 PCI_DMA_FROMDEVICE);
1ee6dd77 2453 memset(rxdp, 0, sizeof(struct RxD3));
da6971d8
AR
2454 }
2455 dev_kfree_skb(skb);
2456 atomic_dec(&sp->rx_bufs_left[ring_no]);
2457 }
2458}
2459
1da177e4 2460/**
20346722 2461 * free_rx_buffers - Frees all Rx buffers
1da177e4 2462 * @sp: device private variable.
20346722 2463 * Description:
1da177e4
LT
2464 * This function will free all Rx buffers allocated by host.
2465 * Return Value:
2466 * NONE.
2467 */
2468
2469static void free_rx_buffers(struct s2io_nic *sp)
2470{
2471 struct net_device *dev = sp->dev;
da6971d8 2472 int i, blk = 0, buf_cnt = 0;
1ee6dd77 2473 struct mac_info *mac_control;
1da177e4 2474 struct config_param *config;
1da177e4
LT
2475
2476 mac_control = &sp->mac_control;
2477 config = &sp->config;
2478
2479 for (i = 0; i < config->rx_ring_num; i++) {
da6971d8
AR
2480 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2481 free_rxd_blk(sp,i,blk);
1da177e4 2482
20346722
K
2483 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2484 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2485 mac_control->rings[i].rx_curr_put_info.offset = 0;
2486 mac_control->rings[i].rx_curr_get_info.offset = 0;
1da177e4
LT
2487 atomic_set(&sp->rx_bufs_left[i], 0);
2488 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2489 dev->name, buf_cnt, i);
2490 }
2491}
2492
2493/**
2494 * s2io_poll - Rx interrupt handler for NAPI support
2495 * @dev : pointer to the device structure.
20346722 2496 * @budget : The number of packets that were budgeted to be processed
1da177e4
LT
2497 * during one pass through the 'Poll" function.
2498 * Description:
2499 * Comes into picture only if NAPI support has been incorporated. It does
2500 * the same thing that rx_intr_handler does, but not in a interrupt context
2501 * also It will process only a given number of packets.
2502 * Return value:
2503 * 0 on success and 1 if there are No Rx packets to be processed.
2504 */
2505
1da177e4
LT
2506static int s2io_poll(struct net_device *dev, int *budget)
2507{
1ee6dd77 2508 struct s2io_nic *nic = dev->priv;
20346722 2509 int pkt_cnt = 0, org_pkts_to_process;
1ee6dd77 2510 struct mac_info *mac_control;
1da177e4 2511 struct config_param *config;
1ee6dd77 2512 struct XENA_dev_config __iomem *bar0 = nic->bar0;
20346722 2513 int i;
1da177e4 2514
7ba013ac 2515 atomic_inc(&nic->isr_cnt);
1da177e4
LT
2516 mac_control = &nic->mac_control;
2517 config = &nic->config;
2518
20346722
K
2519 nic->pkts_to_process = *budget;
2520 if (nic->pkts_to_process > dev->quota)
2521 nic->pkts_to_process = dev->quota;
2522 org_pkts_to_process = nic->pkts_to_process;
1da177e4 2523
19a60522
SS
2524 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2525 readl(&bar0->rx_traffic_int);
1da177e4
LT
2526
2527 for (i = 0; i < config->rx_ring_num; i++) {
20346722
K
2528 rx_intr_handler(&mac_control->rings[i]);
2529 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2530 if (!nic->pkts_to_process) {
2531 /* Quota for the current iteration has been met */
2532 goto no_rx;
1da177e4 2533 }
1da177e4
LT
2534 }
2535 if (!pkt_cnt)
2536 pkt_cnt = 1;
2537
2538 dev->quota -= pkt_cnt;
2539 *budget -= pkt_cnt;
2540 netif_rx_complete(dev);
2541
2542 for (i = 0; i < config->rx_ring_num; i++) {
2543 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2544 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2545 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2546 break;
2547 }
2548 }
2549 /* Re enable the Rx interrupts. */
c92ca04b 2550 writeq(0x0, &bar0->rx_traffic_mask);
19a60522 2551 readl(&bar0->rx_traffic_mask);
7ba013ac 2552 atomic_dec(&nic->isr_cnt);
1da177e4
LT
2553 return 0;
2554
20346722 2555no_rx:
1da177e4
LT
2556 dev->quota -= pkt_cnt;
2557 *budget -= pkt_cnt;
2558
2559 for (i = 0; i < config->rx_ring_num; i++) {
2560 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2561 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2562 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2563 break;
2564 }
2565 }
7ba013ac 2566 atomic_dec(&nic->isr_cnt);
1da177e4
LT
2567 return 1;
2568}
20346722 2569
b41477f3 2570#ifdef CONFIG_NET_POLL_CONTROLLER
612eff0e 2571/**
b41477f3 2572 * s2io_netpoll - netpoll event handler entry point
612eff0e
BH
2573 * @dev : pointer to the device structure.
2574 * Description:
b41477f3
AR
2575 * This function will be called by upper layer to check for events on the
2576 * interface in situations where interrupts are disabled. It is used for
2577 * specific in-kernel networking tasks, such as remote consoles and kernel
2578 * debugging over the network (example netdump in RedHat).
612eff0e 2579 */
612eff0e
BH
2580static void s2io_netpoll(struct net_device *dev)
2581{
1ee6dd77
RB
2582 struct s2io_nic *nic = dev->priv;
2583 struct mac_info *mac_control;
612eff0e 2584 struct config_param *config;
1ee6dd77 2585 struct XENA_dev_config __iomem *bar0 = nic->bar0;
b41477f3 2586 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
612eff0e
BH
2587 int i;
2588
2589 disable_irq(dev->irq);
2590
2591 atomic_inc(&nic->isr_cnt);
2592 mac_control = &nic->mac_control;
2593 config = &nic->config;
2594
612eff0e 2595 writeq(val64, &bar0->rx_traffic_int);
b41477f3
AR
2596 writeq(val64, &bar0->tx_traffic_int);
2597
6aa20a22 2598 /* we need to free up the transmitted skbufs or else netpoll will
b41477f3
AR
2599 * run out of skbs and will fail and eventually netpoll application such
2600 * as netdump will fail.
2601 */
2602 for (i = 0; i < config->tx_fifo_num; i++)
2603 tx_intr_handler(&mac_control->fifos[i]);
612eff0e 2604
b41477f3 2605 /* check for received packet and indicate up to network */
612eff0e
BH
2606 for (i = 0; i < config->rx_ring_num; i++)
2607 rx_intr_handler(&mac_control->rings[i]);
2608
2609 for (i = 0; i < config->rx_ring_num; i++) {
2610 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2611 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2612 DBG_PRINT(ERR_DBG, " in Rx Netpoll!!\n");
2613 break;
2614 }
2615 }
2616 atomic_dec(&nic->isr_cnt);
2617 enable_irq(dev->irq);
2618 return;
2619}
2620#endif
2621
20346722 2622/**
1da177e4
LT
2623 * rx_intr_handler - Rx interrupt handler
2624 * @nic: device private variable.
20346722
K
2625 * Description:
2626 * If the interrupt is because of a received frame or if the
1da177e4 2627 * receive ring contains fresh as yet un-processed frames,this function is
20346722
K
2628 * called. It picks out the RxD at which place the last Rx processing had
2629 * stopped and sends the skb to the OSM's Rx handler and then increments
1da177e4
LT
2630 * the offset.
2631 * Return Value:
2632 * NONE.
2633 */
1ee6dd77 2634static void rx_intr_handler(struct ring_info *ring_data)
1da177e4 2635{
1ee6dd77 2636 struct s2io_nic *nic = ring_data->nic;
1da177e4 2637 struct net_device *dev = (struct net_device *) nic->dev;
da6971d8 2638 int get_block, put_block, put_offset;
1ee6dd77
RB
2639 struct rx_curr_get_info get_info, put_info;
2640 struct RxD_t *rxdp;
1da177e4 2641 struct sk_buff *skb;
20346722 2642 int pkt_cnt = 0;
7d3d0439
RA
2643 int i;
2644
7ba013ac
K
2645 spin_lock(&nic->rx_lock);
2646 if (atomic_read(&nic->card_state) == CARD_DOWN) {
776bd20f 2647 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
7ba013ac
K
2648 __FUNCTION__, dev->name);
2649 spin_unlock(&nic->rx_lock);
776bd20f 2650 return;
7ba013ac
K
2651 }
2652
20346722
K
2653 get_info = ring_data->rx_curr_get_info;
2654 get_block = get_info.block_index;
1ee6dd77 2655 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
20346722 2656 put_block = put_info.block_index;
da6971d8 2657 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
db874e65
SS
2658 if (!napi) {
2659 spin_lock(&nic->put_lock);
2660 put_offset = ring_data->put_pos;
2661 spin_unlock(&nic->put_lock);
2662 } else
2663 put_offset = ring_data->put_pos;
2664
da6971d8 2665 while (RXD_IS_UP2DT(rxdp)) {
db874e65
SS
2666 /*
2667 * If your are next to put index then it's
2668 * FIFO full condition
2669 */
da6971d8
AR
2670 if ((get_block == put_block) &&
2671 (get_info.offset + 1) == put_info.offset) {
75c30b13 2672 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
da6971d8
AR
2673 break;
2674 }
20346722
K
2675 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2676 if (skb == NULL) {
2677 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2678 dev->name);
2679 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
7ba013ac 2680 spin_unlock(&nic->rx_lock);
20346722 2681 return;
1da177e4 2682 }
da6971d8
AR
2683 if (nic->rxd_mode == RXD_MODE_1) {
2684 pci_unmap_single(nic->pdev, (dma_addr_t)
1ee6dd77 2685 ((struct RxD1*)rxdp)->Buffer0_ptr,
20346722
K
2686 dev->mtu +
2687 HEADER_ETHERNET_II_802_3_SIZE +
2688 HEADER_802_2_SIZE +
2689 HEADER_SNAP_SIZE,
2690 PCI_DMA_FROMDEVICE);
da6971d8 2691 } else if (nic->rxd_mode == RXD_MODE_3B) {
75c30b13 2692 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
1ee6dd77 2693 ((struct RxD3*)rxdp)->Buffer0_ptr,
20346722 2694 BUF0_LEN, PCI_DMA_FROMDEVICE);
da6971d8 2695 pci_unmap_single(nic->pdev, (dma_addr_t)
1ee6dd77 2696 ((struct RxD3*)rxdp)->Buffer2_ptr,
da6971d8 2697 dev->mtu + 4,
20346722 2698 PCI_DMA_FROMDEVICE);
da6971d8 2699 } else {
75c30b13 2700 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
1ee6dd77 2701 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
da6971d8
AR
2702 PCI_DMA_FROMDEVICE);
2703 pci_unmap_single(nic->pdev, (dma_addr_t)
1ee6dd77 2704 ((struct RxD3*)rxdp)->Buffer1_ptr,
da6971d8
AR
2705 l3l4hdr_size + 4,
2706 PCI_DMA_FROMDEVICE);
2707 pci_unmap_single(nic->pdev, (dma_addr_t)
1ee6dd77 2708 ((struct RxD3*)rxdp)->Buffer2_ptr,
da6971d8
AR
2709 dev->mtu, PCI_DMA_FROMDEVICE);
2710 }
863c11a9 2711 prefetch(skb->data);
20346722
K
2712 rx_osm_handler(ring_data, rxdp);
2713 get_info.offset++;
da6971d8
AR
2714 ring_data->rx_curr_get_info.offset = get_info.offset;
2715 rxdp = ring_data->rx_blocks[get_block].
2716 rxds[get_info.offset].virt_addr;
2717 if (get_info.offset == rxd_count[nic->rxd_mode]) {
20346722 2718 get_info.offset = 0;
da6971d8 2719 ring_data->rx_curr_get_info.offset = get_info.offset;
20346722 2720 get_block++;
da6971d8
AR
2721 if (get_block == ring_data->block_count)
2722 get_block = 0;
2723 ring_data->rx_curr_get_info.block_index = get_block;
20346722
K
2724 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2725 }
1da177e4 2726
20346722 2727 nic->pkts_to_process -= 1;
db874e65 2728 if ((napi) && (!nic->pkts_to_process))
20346722 2729 break;
20346722 2730 pkt_cnt++;
1da177e4
LT
2731 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2732 break;
2733 }
7d3d0439
RA
2734 if (nic->lro) {
2735 /* Clear all LRO sessions before exiting */
2736 for (i=0; i<MAX_LRO_SESSIONS; i++) {
1ee6dd77 2737 struct lro *lro = &nic->lro0_n[i];
7d3d0439
RA
2738 if (lro->in_use) {
2739 update_L3L4_header(nic, lro);
2740 queue_rx_frame(lro->parent);
2741 clear_lro_session(lro);
2742 }
2743 }
2744 }
2745
7ba013ac 2746 spin_unlock(&nic->rx_lock);
1da177e4 2747}
20346722
K
2748
2749/**
1da177e4
LT
2750 * tx_intr_handler - Transmit interrupt handler
2751 * @nic : device private variable
20346722
K
2752 * Description:
2753 * If an interrupt was raised to indicate DMA complete of the
2754 * Tx packet, this function is called. It identifies the last TxD
2755 * whose buffer was freed and frees all skbs whose data have already
1da177e4
LT
2756 * DMA'ed into the NICs internal memory.
2757 * Return Value:
2758 * NONE
2759 */
2760
1ee6dd77 2761static void tx_intr_handler(struct fifo_info *fifo_data)
1da177e4 2762{
1ee6dd77 2763 struct s2io_nic *nic = fifo_data->nic;
1da177e4 2764 struct net_device *dev = (struct net_device *) nic->dev;
1ee6dd77 2765 struct tx_curr_get_info get_info, put_info;
1da177e4 2766 struct sk_buff *skb;
1ee6dd77 2767 struct TxD *txdlp;
1da177e4 2768
20346722 2769 get_info = fifo_data->tx_curr_get_info;
1ee6dd77
RB
2770 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2771 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
20346722
K
2772 list_virt_addr;
2773 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2774 (get_info.offset != put_info.offset) &&
2775 (txdlp->Host_Control)) {
2776 /* Check for TxD errors */
2777 if (txdlp->Control_1 & TXD_T_CODE) {
2778 unsigned long long err;
2779 err = txdlp->Control_1 & TXD_T_CODE;
bd1034f0
AR
2780 if (err & 0x1) {
2781 nic->mac_control.stats_info->sw_stat.
2782 parity_err_cnt++;
2783 }
776bd20f 2784 if ((err >> 48) == 0xA) {
2785 DBG_PRINT(TX_DBG, "TxD returned due \
19a60522 2786 to loss of link\n");
776bd20f 2787 }
2788 else {
19a60522 2789 DBG_PRINT(ERR_DBG, "***TxD error %llx\n", err);
776bd20f 2790 }
20346722 2791 }
1da177e4 2792
fed5eccd 2793 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
20346722
K
2794 if (skb == NULL) {
2795 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2796 __FUNCTION__);
2797 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2798 return;
2799 }
2800
20346722 2801 /* Updating the statistics block */
20346722
K
2802 nic->stats.tx_bytes += skb->len;
2803 dev_kfree_skb_irq(skb);
2804
2805 get_info.offset++;
863c11a9
AR
2806 if (get_info.offset == get_info.fifo_len + 1)
2807 get_info.offset = 0;
1ee6dd77 2808 txdlp = (struct TxD *) fifo_data->list_info
20346722
K
2809 [get_info.offset].list_virt_addr;
2810 fifo_data->tx_curr_get_info.offset =
2811 get_info.offset;
1da177e4
LT
2812 }
2813
2814 spin_lock(&nic->tx_lock);
2815 if (netif_queue_stopped(dev))
2816 netif_wake_queue(dev);
2817 spin_unlock(&nic->tx_lock);
2818}
2819
bd1034f0
AR
2820/**
2821 * s2io_mdio_write - Function to write in to MDIO registers
2822 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2823 * @addr : address value
2824 * @value : data value
2825 * @dev : pointer to net_device structure
2826 * Description:
2827 * This function is used to write values to the MDIO registers
2828 * NONE
2829 */
2830static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2831{
2832 u64 val64 = 0x0;
1ee6dd77
RB
2833 struct s2io_nic *sp = dev->priv;
2834 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0
AR
2835
2836 //address transaction
2837 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2838 | MDIO_MMD_DEV_ADDR(mmd_type)
2839 | MDIO_MMS_PRT_ADDR(0x0);
2840 writeq(val64, &bar0->mdio_control);
2841 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2842 writeq(val64, &bar0->mdio_control);
2843 udelay(100);
2844
2845 //Data transaction
2846 val64 = 0x0;
2847 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2848 | MDIO_MMD_DEV_ADDR(mmd_type)
2849 | MDIO_MMS_PRT_ADDR(0x0)
2850 | MDIO_MDIO_DATA(value)
2851 | MDIO_OP(MDIO_OP_WRITE_TRANS);
2852 writeq(val64, &bar0->mdio_control);
2853 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2854 writeq(val64, &bar0->mdio_control);
2855 udelay(100);
2856
2857 val64 = 0x0;
2858 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2859 | MDIO_MMD_DEV_ADDR(mmd_type)
2860 | MDIO_MMS_PRT_ADDR(0x0)
2861 | MDIO_OP(MDIO_OP_READ_TRANS);
2862 writeq(val64, &bar0->mdio_control);
2863 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2864 writeq(val64, &bar0->mdio_control);
2865 udelay(100);
2866
2867}
2868
2869/**
2870 * s2io_mdio_read - Function to write in to MDIO registers
2871 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2872 * @addr : address value
2873 * @dev : pointer to net_device structure
2874 * Description:
2875 * This function is used to read values to the MDIO registers
2876 * NONE
2877 */
2878static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2879{
2880 u64 val64 = 0x0;
2881 u64 rval64 = 0x0;
1ee6dd77
RB
2882 struct s2io_nic *sp = dev->priv;
2883 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0
AR
2884
2885 /* address transaction */
2886 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2887 | MDIO_MMD_DEV_ADDR(mmd_type)
2888 | MDIO_MMS_PRT_ADDR(0x0);
2889 writeq(val64, &bar0->mdio_control);
2890 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2891 writeq(val64, &bar0->mdio_control);
2892 udelay(100);
2893
2894 /* Data transaction */
2895 val64 = 0x0;
2896 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2897 | MDIO_MMD_DEV_ADDR(mmd_type)
2898 | MDIO_MMS_PRT_ADDR(0x0)
2899 | MDIO_OP(MDIO_OP_READ_TRANS);
2900 writeq(val64, &bar0->mdio_control);
2901 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2902 writeq(val64, &bar0->mdio_control);
2903 udelay(100);
2904
2905 /* Read the value from regs */
2906 rval64 = readq(&bar0->mdio_control);
2907 rval64 = rval64 & 0xFFFF0000;
2908 rval64 = rval64 >> 16;
2909 return rval64;
2910}
2911/**
2912 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
2913 * @counter : couter value to be updated
2914 * @flag : flag to indicate the status
2915 * @type : counter type
2916 * Description:
2917 * This function is to check the status of the xpak counters value
2918 * NONE
2919 */
2920
2921static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
2922{
2923 u64 mask = 0x3;
2924 u64 val64;
2925 int i;
2926 for(i = 0; i <index; i++)
2927 mask = mask << 0x2;
2928
2929 if(flag > 0)
2930 {
2931 *counter = *counter + 1;
2932 val64 = *regs_stat & mask;
2933 val64 = val64 >> (index * 0x2);
2934 val64 = val64 + 1;
2935 if(val64 == 3)
2936 {
2937 switch(type)
2938 {
2939 case 1:
2940 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2941 "service. Excessive temperatures may "
2942 "result in premature transceiver "
2943 "failure \n");
2944 break;
2945 case 2:
2946 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2947 "service Excessive bias currents may "
2948 "indicate imminent laser diode "
2949 "failure \n");
2950 break;
2951 case 3:
2952 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2953 "service Excessive laser output "
2954 "power may saturate far-end "
2955 "receiver\n");
2956 break;
2957 default:
2958 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
2959 "type \n");
2960 }
2961 val64 = 0x0;
2962 }
2963 val64 = val64 << (index * 0x2);
2964 *regs_stat = (*regs_stat & (~mask)) | (val64);
2965
2966 } else {
2967 *regs_stat = *regs_stat & (~mask);
2968 }
2969}
2970
2971/**
2972 * s2io_updt_xpak_counter - Function to update the xpak counters
2973 * @dev : pointer to net_device struct
2974 * Description:
2975 * This function is to upate the status of the xpak counters value
2976 * NONE
2977 */
2978static void s2io_updt_xpak_counter(struct net_device *dev)
2979{
2980 u16 flag = 0x0;
2981 u16 type = 0x0;
2982 u16 val16 = 0x0;
2983 u64 val64 = 0x0;
2984 u64 addr = 0x0;
2985
1ee6dd77
RB
2986 struct s2io_nic *sp = dev->priv;
2987 struct stat_block *stat_info = sp->mac_control.stats_info;
bd1034f0
AR
2988
2989 /* Check the communication with the MDIO slave */
2990 addr = 0x0000;
2991 val64 = 0x0;
2992 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
2993 if((val64 == 0xFFFF) || (val64 == 0x0000))
2994 {
2995 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
2996 "Returned %llx\n", (unsigned long long)val64);
2997 return;
2998 }
2999
3000 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3001 if(val64 != 0x2040)
3002 {
3003 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3004 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3005 (unsigned long long)val64);
3006 return;
3007 }
3008
3009 /* Loading the DOM register to MDIO register */
3010 addr = 0xA100;
3011 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3012 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3013
3014 /* Reading the Alarm flags */
3015 addr = 0xA070;
3016 val64 = 0x0;
3017 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3018
3019 flag = CHECKBIT(val64, 0x7);
3020 type = 1;
3021 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3022 &stat_info->xpak_stat.xpak_regs_stat,
3023 0x0, flag, type);
3024
3025 if(CHECKBIT(val64, 0x6))
3026 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3027
3028 flag = CHECKBIT(val64, 0x3);
3029 type = 2;
3030 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3031 &stat_info->xpak_stat.xpak_regs_stat,
3032 0x2, flag, type);
3033
3034 if(CHECKBIT(val64, 0x2))
3035 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3036
3037 flag = CHECKBIT(val64, 0x1);
3038 type = 3;
3039 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3040 &stat_info->xpak_stat.xpak_regs_stat,
3041 0x4, flag, type);
3042
3043 if(CHECKBIT(val64, 0x0))
3044 stat_info->xpak_stat.alarm_laser_output_power_low++;
3045
3046 /* Reading the Warning flags */
3047 addr = 0xA074;
3048 val64 = 0x0;
3049 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3050
3051 if(CHECKBIT(val64, 0x7))
3052 stat_info->xpak_stat.warn_transceiver_temp_high++;
3053
3054 if(CHECKBIT(val64, 0x6))
3055 stat_info->xpak_stat.warn_transceiver_temp_low++;
3056
3057 if(CHECKBIT(val64, 0x3))
3058 stat_info->xpak_stat.warn_laser_bias_current_high++;
3059
3060 if(CHECKBIT(val64, 0x2))
3061 stat_info->xpak_stat.warn_laser_bias_current_low++;
3062
3063 if(CHECKBIT(val64, 0x1))
3064 stat_info->xpak_stat.warn_laser_output_power_high++;
3065
3066 if(CHECKBIT(val64, 0x0))
3067 stat_info->xpak_stat.warn_laser_output_power_low++;
3068}
3069
20346722 3070/**
1da177e4
LT
3071 * alarm_intr_handler - Alarm Interrrupt handler
3072 * @nic: device private variable
20346722 3073 * Description: If the interrupt was neither because of Rx packet or Tx
1da177e4 3074 * complete, this function is called. If the interrupt was to indicate
20346722
K
3075 * a loss of link, the OSM link status handler is invoked for any other
3076 * alarm interrupt the block that raised the interrupt is displayed
1da177e4
LT
3077 * and a H/W reset is issued.
3078 * Return Value:
3079 * NONE
3080*/
3081
3082static void alarm_intr_handler(struct s2io_nic *nic)
3083{
3084 struct net_device *dev = (struct net_device *) nic->dev;
1ee6dd77 3085 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4 3086 register u64 val64 = 0, err_reg = 0;
bd1034f0
AR
3087 u64 cnt;
3088 int i;
372cc597
SS
3089 if (atomic_read(&nic->card_state) == CARD_DOWN)
3090 return;
bd1034f0
AR
3091 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3092 /* Handling the XPAK counters update */
3093 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3094 /* waiting for an hour */
3095 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3096 } else {
3097 s2io_updt_xpak_counter(dev);
3098 /* reset the count to zero */
3099 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3100 }
1da177e4
LT
3101
3102 /* Handling link status change error Intr */
a371a07d
K
3103 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3104 err_reg = readq(&bar0->mac_rmac_err_reg);
3105 writeq(err_reg, &bar0->mac_rmac_err_reg);
3106 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3107 schedule_work(&nic->set_link_task);
3108 }
1da177e4
LT
3109 }
3110
5e25b9dd
K
3111 /* Handling Ecc errors */
3112 val64 = readq(&bar0->mc_err_reg);
3113 writeq(val64, &bar0->mc_err_reg);
3114 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3115 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
7ba013ac
K
3116 nic->mac_control.stats_info->sw_stat.
3117 double_ecc_errs++;
776bd20f 3118 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
5e25b9dd 3119 dev->name);
776bd20f 3120 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
e960fc5c 3121 if (nic->device_type != XFRAME_II_DEVICE) {
776bd20f 3122 /* Reset XframeI only if critical error */
3123 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3124 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3125 netif_stop_queue(dev);
3126 schedule_work(&nic->rst_timer_task);
bd1034f0
AR
3127 nic->mac_control.stats_info->sw_stat.
3128 soft_reset_cnt++;
776bd20f 3129 }
e960fc5c 3130 }
5e25b9dd 3131 } else {
7ba013ac
K
3132 nic->mac_control.stats_info->sw_stat.
3133 single_ecc_errs++;
5e25b9dd
K
3134 }
3135 }
3136
1da177e4
LT
3137 /* In case of a serious error, the device will be Reset. */
3138 val64 = readq(&bar0->serr_source);
3139 if (val64 & SERR_SOURCE_ANY) {
bd1034f0 3140 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
1da177e4 3141 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
6aa20a22 3142 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
776bd20f 3143 (unsigned long long)val64);
1da177e4
LT
3144 netif_stop_queue(dev);
3145 schedule_work(&nic->rst_timer_task);
bd1034f0 3146 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
1da177e4
LT
3147 }
3148
3149 /*
3150 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3151 * Error occurs, the adapter will be recycled by disabling the
20346722 3152 * adapter enable bit and enabling it again after the device
1da177e4
LT
3153 * becomes Quiescent.
3154 */
3155 val64 = readq(&bar0->pcc_err_reg);
3156 writeq(val64, &bar0->pcc_err_reg);
3157 if (val64 & PCC_FB_ECC_DB_ERR) {
3158 u64 ac = readq(&bar0->adapter_control);
3159 ac &= ~(ADAPTER_CNTL_EN);
3160 writeq(ac, &bar0->adapter_control);
3161 ac = readq(&bar0->adapter_control);
3162 schedule_work(&nic->set_link_task);
3163 }
bd1034f0
AR
3164 /* Check for data parity error */
3165 val64 = readq(&bar0->pic_int_status);
3166 if (val64 & PIC_INT_GPIO) {
3167 val64 = readq(&bar0->gpio_int_reg);
3168 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3169 nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3170 schedule_work(&nic->rst_timer_task);
3171 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3172 }
3173 }
3174
3175 /* Check for ring full counter */
3176 if (nic->device_type & XFRAME_II_DEVICE) {
3177 val64 = readq(&bar0->ring_bump_counter1);
3178 for (i=0; i<4; i++) {
3179 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3180 cnt >>= 64 - ((i+1)*16);
3181 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3182 += cnt;
3183 }
3184
3185 val64 = readq(&bar0->ring_bump_counter2);
3186 for (i=0; i<4; i++) {
3187 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3188 cnt >>= 64 - ((i+1)*16);
3189 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3190 += cnt;
3191 }
3192 }
1da177e4
LT
3193
3194 /* Other type of interrupts are not being handled now, TODO */
3195}
3196
20346722 3197/**
1da177e4 3198 * wait_for_cmd_complete - waits for a command to complete.
20346722 3199 * @sp : private member of the device structure, which is a pointer to the
1da177e4 3200 * s2io_nic structure.
20346722
K
3201 * Description: Function that waits for a command to Write into RMAC
3202 * ADDR DATA registers to be completed and returns either success or
3203 * error depending on whether the command was complete or not.
1da177e4
LT
3204 * Return value:
3205 * SUCCESS on success and FAILURE on failure.
3206 */
3207
9fc93a41
SS
3208static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3209 int bit_state)
1da177e4 3210{
9fc93a41 3211 int ret = FAILURE, cnt = 0, delay = 1;
1da177e4
LT
3212 u64 val64;
3213
9fc93a41
SS
3214 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3215 return FAILURE;
3216
3217 do {
c92ca04b 3218 val64 = readq(addr);
9fc93a41
SS
3219 if (bit_state == S2IO_BIT_RESET) {
3220 if (!(val64 & busy_bit)) {
3221 ret = SUCCESS;
3222 break;
3223 }
3224 } else {
3225 if (!(val64 & busy_bit)) {
3226 ret = SUCCESS;
3227 break;
3228 }
1da177e4 3229 }
c92ca04b
AR
3230
3231 if(in_interrupt())
9fc93a41 3232 mdelay(delay);
c92ca04b 3233 else
9fc93a41 3234 msleep(delay);
c92ca04b 3235
9fc93a41
SS
3236 if (++cnt >= 10)
3237 delay = 50;
3238 } while (cnt < 20);
1da177e4
LT
3239 return ret;
3240}
19a60522
SS
3241/*
3242 * check_pci_device_id - Checks if the device id is supported
3243 * @id : device id
3244 * Description: Function to check if the pci device id is supported by driver.
3245 * Return value: Actual device id if supported else PCI_ANY_ID
3246 */
3247static u16 check_pci_device_id(u16 id)
3248{
3249 switch (id) {
3250 case PCI_DEVICE_ID_HERC_WIN:
3251 case PCI_DEVICE_ID_HERC_UNI:
3252 return XFRAME_II_DEVICE;
3253 case PCI_DEVICE_ID_S2IO_UNI:
3254 case PCI_DEVICE_ID_S2IO_WIN:
3255 return XFRAME_I_DEVICE;
3256 default:
3257 return PCI_ANY_ID;
3258 }
3259}
1da177e4 3260
20346722
K
3261/**
3262 * s2io_reset - Resets the card.
1da177e4
LT
3263 * @sp : private member of the device structure.
3264 * Description: Function to Reset the card. This function then also
20346722 3265 * restores the previously saved PCI configuration space registers as
1da177e4
LT
3266 * the card reset also resets the configuration space.
3267 * Return value:
3268 * void.
3269 */
3270
1ee6dd77 3271static void s2io_reset(struct s2io_nic * sp)
1da177e4 3272{
1ee6dd77 3273 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 3274 u64 val64;
5e25b9dd 3275 u16 subid, pci_cmd;
19a60522
SS
3276 int i;
3277 u16 val16;
3278 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3279 __FUNCTION__, sp->dev->name);
1da177e4 3280
0b1f7ebe 3281 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
e960fc5c 3282 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
0b1f7ebe 3283
19a60522
SS
3284 if (sp->device_type == XFRAME_II_DEVICE) {
3285 int ret;
3286 ret = pci_set_power_state(sp->pdev, 3);
3287 if (!ret)
3288 ret = pci_set_power_state(sp->pdev, 0);
3289 else {
3290 DBG_PRINT(ERR_DBG,"%s PME based SW_Reset failed!\n",
3291 __FUNCTION__);
3292 goto old_way;
3293 }
3294 msleep(20);
3295 goto new_way;
3296 }
3297old_way:
1da177e4
LT
3298 val64 = SW_RESET_ALL;
3299 writeq(val64, &bar0->sw_reset);
19a60522 3300new_way:
c92ca04b
AR
3301 if (strstr(sp->product_name, "CX4")) {
3302 msleep(750);
3303 }
19a60522
SS
3304 msleep(250);
3305 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
1da177e4 3306
19a60522
SS
3307 /* Restore the PCI state saved during initialization. */
3308 pci_restore_state(sp->pdev);
3309 pci_read_config_word(sp->pdev, 0x2, &val16);
3310 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3311 break;
3312 msleep(200);
3313 }
1da177e4 3314
19a60522
SS
3315 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3316 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3317 }
3318
3319 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3320
3321 s2io_init_pci(sp);
1da177e4 3322
20346722
K
3323 /* Set swapper to enable I/O register access */
3324 s2io_set_swapper(sp);
3325
cc6e7c44
RA
3326 /* Restore the MSIX table entries from local variables */
3327 restore_xmsi_data(sp);
3328
5e25b9dd 3329 /* Clear certain PCI/PCI-X fields after reset */
303bcb4b 3330 if (sp->device_type == XFRAME_II_DEVICE) {
b41477f3 3331 /* Clear "detected parity error" bit */
303bcb4b 3332 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
5e25b9dd 3333
303bcb4b
K
3334 /* Clearing PCIX Ecc status register */
3335 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
5e25b9dd 3336
303bcb4b
K
3337 /* Clearing PCI_STATUS error reflected here */
3338 writeq(BIT(62), &bar0->txpic_int_reg);
3339 }
5e25b9dd 3340
20346722
K
3341 /* Reset device statistics maintained by OS */
3342 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3343
1da177e4
LT
3344 /* SXE-002: Configure link and activity LED to turn it off */
3345 subid = sp->pdev->subsystem_device;
541ae68f
K
3346 if (((subid & 0xFF) >= 0x07) &&
3347 (sp->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
3348 val64 = readq(&bar0->gpio_control);
3349 val64 |= 0x0000800000000000ULL;
3350 writeq(val64, &bar0->gpio_control);
3351 val64 = 0x0411040400000000ULL;
509a2671 3352 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
3353 }
3354
541ae68f
K
3355 /*
3356 * Clear spurious ECC interrupts that would have occured on
3357 * XFRAME II cards after reset.
3358 */
3359 if (sp->device_type == XFRAME_II_DEVICE) {
3360 val64 = readq(&bar0->pcc_err_reg);
3361 writeq(val64, &bar0->pcc_err_reg);
3362 }
3363
1da177e4
LT
3364 sp->device_enabled_once = FALSE;
3365}
3366
3367/**
20346722
K
3368 * s2io_set_swapper - to set the swapper controle on the card
3369 * @sp : private member of the device structure,
1da177e4 3370 * pointer to the s2io_nic structure.
20346722 3371 * Description: Function to set the swapper control on the card
1da177e4
LT
3372 * correctly depending on the 'endianness' of the system.
3373 * Return value:
3374 * SUCCESS on success and FAILURE on failure.
3375 */
3376
1ee6dd77 3377static int s2io_set_swapper(struct s2io_nic * sp)
1da177e4
LT
3378{
3379 struct net_device *dev = sp->dev;
1ee6dd77 3380 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
3381 u64 val64, valt, valr;
3382
20346722 3383 /*
1da177e4
LT
3384 * Set proper endian settings and verify the same by reading
3385 * the PIF Feed-back register.
3386 */
3387
3388 val64 = readq(&bar0->pif_rd_swapper_fb);
3389 if (val64 != 0x0123456789ABCDEFULL) {
3390 int i = 0;
3391 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3392 0x8100008181000081ULL, /* FE=1, SE=0 */
3393 0x4200004242000042ULL, /* FE=0, SE=1 */
3394 0}; /* FE=0, SE=0 */
3395
3396 while(i<4) {
3397 writeq(value[i], &bar0->swapper_ctrl);
3398 val64 = readq(&bar0->pif_rd_swapper_fb);
3399 if (val64 == 0x0123456789ABCDEFULL)
3400 break;
3401 i++;
3402 }
3403 if (i == 4) {
3404 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3405 dev->name);
3406 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3407 (unsigned long long) val64);
3408 return FAILURE;
3409 }
3410 valr = value[i];
3411 } else {
3412 valr = readq(&bar0->swapper_ctrl);
3413 }
3414
3415 valt = 0x0123456789ABCDEFULL;
3416 writeq(valt, &bar0->xmsi_address);
3417 val64 = readq(&bar0->xmsi_address);
3418
3419 if(val64 != valt) {
3420 int i = 0;
3421 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3422 0x0081810000818100ULL, /* FE=1, SE=0 */
3423 0x0042420000424200ULL, /* FE=0, SE=1 */
3424 0}; /* FE=0, SE=0 */
3425
3426 while(i<4) {
3427 writeq((value[i] | valr), &bar0->swapper_ctrl);
3428 writeq(valt, &bar0->xmsi_address);
3429 val64 = readq(&bar0->xmsi_address);
3430 if(val64 == valt)
3431 break;
3432 i++;
3433 }
3434 if(i == 4) {
20346722 3435 unsigned long long x = val64;
1da177e4 3436 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
20346722 3437 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
1da177e4
LT
3438 return FAILURE;
3439 }
3440 }
3441 val64 = readq(&bar0->swapper_ctrl);
3442 val64 &= 0xFFFF000000000000ULL;
3443
3444#ifdef __BIG_ENDIAN
20346722
K
3445 /*
3446 * The device by default set to a big endian format, so a
1da177e4
LT
3447 * big endian driver need not set anything.
3448 */
3449 val64 |= (SWAPPER_CTRL_TXP_FE |
3450 SWAPPER_CTRL_TXP_SE |
3451 SWAPPER_CTRL_TXD_R_FE |
3452 SWAPPER_CTRL_TXD_W_FE |
3453 SWAPPER_CTRL_TXF_R_FE |
3454 SWAPPER_CTRL_RXD_R_FE |
3455 SWAPPER_CTRL_RXD_W_FE |
3456 SWAPPER_CTRL_RXF_W_FE |
3457 SWAPPER_CTRL_XMSI_FE |
1da177e4 3458 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
92383340 3459 if (sp->intr_type == INTA)
cc6e7c44 3460 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3461 writeq(val64, &bar0->swapper_ctrl);
3462#else
20346722 3463 /*
1da177e4 3464 * Initially we enable all bits to make it accessible by the
20346722 3465 * driver, then we selectively enable only those bits that
1da177e4
LT
3466 * we want to set.
3467 */
3468 val64 |= (SWAPPER_CTRL_TXP_FE |
3469 SWAPPER_CTRL_TXP_SE |
3470 SWAPPER_CTRL_TXD_R_FE |
3471 SWAPPER_CTRL_TXD_R_SE |
3472 SWAPPER_CTRL_TXD_W_FE |
3473 SWAPPER_CTRL_TXD_W_SE |
3474 SWAPPER_CTRL_TXF_R_FE |
3475 SWAPPER_CTRL_RXD_R_FE |
3476 SWAPPER_CTRL_RXD_R_SE |
3477 SWAPPER_CTRL_RXD_W_FE |
3478 SWAPPER_CTRL_RXD_W_SE |
3479 SWAPPER_CTRL_RXF_W_FE |
3480 SWAPPER_CTRL_XMSI_FE |
1da177e4 3481 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
cc6e7c44
RA
3482 if (sp->intr_type == INTA)
3483 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3484 writeq(val64, &bar0->swapper_ctrl);
3485#endif
3486 val64 = readq(&bar0->swapper_ctrl);
3487
20346722
K
3488 /*
3489 * Verifying if endian settings are accurate by reading a
1da177e4
LT
3490 * feedback register.
3491 */
3492 val64 = readq(&bar0->pif_rd_swapper_fb);
3493 if (val64 != 0x0123456789ABCDEFULL) {
3494 /* Endian settings are incorrect, calls for another dekko. */
3495 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3496 dev->name);
3497 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3498 (unsigned long long) val64);
3499 return FAILURE;
3500 }
3501
3502 return SUCCESS;
3503}
3504
1ee6dd77 3505static int wait_for_msix_trans(struct s2io_nic *nic, int i)
cc6e7c44 3506{
1ee6dd77 3507 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3508 u64 val64;
3509 int ret = 0, cnt = 0;
3510
3511 do {
3512 val64 = readq(&bar0->xmsi_access);
3513 if (!(val64 & BIT(15)))
3514 break;
3515 mdelay(1);
3516 cnt++;
3517 } while(cnt < 5);
3518 if (cnt == 5) {
3519 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3520 ret = 1;
3521 }
3522
3523 return ret;
3524}
3525
1ee6dd77 3526static void restore_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3527{
1ee6dd77 3528 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3529 u64 val64;
3530 int i;
3531
75c30b13 3532 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
cc6e7c44
RA
3533 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3534 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3535 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3536 writeq(val64, &bar0->xmsi_access);
3537 if (wait_for_msix_trans(nic, i)) {
3538 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3539 continue;
3540 }
3541 }
3542}
3543
1ee6dd77 3544static void store_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3545{
1ee6dd77 3546 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3547 u64 val64, addr, data;
3548 int i;
3549
3550 /* Store and display */
75c30b13 3551 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
cc6e7c44
RA
3552 val64 = (BIT(15) | vBIT(i, 26, 6));
3553 writeq(val64, &bar0->xmsi_access);
3554 if (wait_for_msix_trans(nic, i)) {
3555 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3556 continue;
3557 }
3558 addr = readq(&bar0->xmsi_address);
3559 data = readq(&bar0->xmsi_data);
3560 if (addr && data) {
3561 nic->msix_info[i].addr = addr;
3562 nic->msix_info[i].data = data;
3563 }
3564 }
3565}
3566
1ee6dd77 3567int s2io_enable_msi(struct s2io_nic *nic)
cc6e7c44 3568{
1ee6dd77 3569 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3570 u16 msi_ctrl, msg_val;
3571 struct config_param *config = &nic->config;
3572 struct net_device *dev = nic->dev;
3573 u64 val64, tx_mat, rx_mat;
3574 int i, err;
3575
3576 val64 = readq(&bar0->pic_control);
3577 val64 &= ~BIT(1);
3578 writeq(val64, &bar0->pic_control);
3579
3580 err = pci_enable_msi(nic->pdev);
3581 if (err) {
3582 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3583 nic->dev->name);
3584 return err;
3585 }
3586
3587 /*
3588 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3589 * for interrupt handling.
3590 */
3591 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3592 msg_val ^= 0x1;
3593 pci_write_config_word(nic->pdev, 0x4c, msg_val);
3594 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3595
3596 pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3597 msi_ctrl |= 0x10;
3598 pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3599
3600 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3601 tx_mat = readq(&bar0->tx_mat0_n[0]);
3602 for (i=0; i<config->tx_fifo_num; i++) {
3603 tx_mat |= TX_MAT_SET(i, 1);
3604 }
3605 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3606
3607 rx_mat = readq(&bar0->rx_mat);
3608 for (i=0; i<config->rx_ring_num; i++) {
3609 rx_mat |= RX_MAT_SET(i, 1);
3610 }
3611 writeq(rx_mat, &bar0->rx_mat);
3612
3613 dev->irq = nic->pdev->irq;
3614 return 0;
3615}
3616
1ee6dd77 3617static int s2io_enable_msi_x(struct s2io_nic *nic)
cc6e7c44 3618{
1ee6dd77 3619 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3620 u64 tx_mat, rx_mat;
3621 u16 msi_control; /* Temp variable */
3622 int ret, i, j, msix_indx = 1;
3623
3624 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3625 GFP_KERNEL);
3626 if (nic->entries == NULL) {
3627 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3628 return -ENOMEM;
3629 }
3630 memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3631
3632 nic->s2io_entries =
3633 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3634 GFP_KERNEL);
3635 if (nic->s2io_entries == NULL) {
3636 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3637 kfree(nic->entries);
3638 return -ENOMEM;
3639 }
3640 memset(nic->s2io_entries, 0,
3641 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3642
3643 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3644 nic->entries[i].entry = i;
3645 nic->s2io_entries[i].entry = i;
3646 nic->s2io_entries[i].arg = NULL;
3647 nic->s2io_entries[i].in_use = 0;
3648 }
3649
3650 tx_mat = readq(&bar0->tx_mat0_n[0]);
3651 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3652 tx_mat |= TX_MAT_SET(i, msix_indx);
3653 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3654 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3655 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3656 }
3657 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3658
3659 if (!nic->config.bimodal) {
3660 rx_mat = readq(&bar0->rx_mat);
3661 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3662 rx_mat |= RX_MAT_SET(j, msix_indx);
3663 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3664 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3665 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3666 }
3667 writeq(rx_mat, &bar0->rx_mat);
3668 } else {
3669 tx_mat = readq(&bar0->tx_mat0_n[7]);
3670 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3671 tx_mat |= TX_MAT_SET(i, msix_indx);
3672 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3673 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3674 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3675 }
3676 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3677 }
3678
c92ca04b 3679 nic->avail_msix_vectors = 0;
cc6e7c44 3680 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
c92ca04b
AR
3681 /* We fail init if error or we get less vectors than min required */
3682 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3683 nic->avail_msix_vectors = ret;
3684 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3685 }
cc6e7c44
RA
3686 if (ret) {
3687 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3688 kfree(nic->entries);
3689 kfree(nic->s2io_entries);
3690 nic->entries = NULL;
3691 nic->s2io_entries = NULL;
c92ca04b 3692 nic->avail_msix_vectors = 0;
cc6e7c44
RA
3693 return -ENOMEM;
3694 }
c92ca04b
AR
3695 if (!nic->avail_msix_vectors)
3696 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
cc6e7c44
RA
3697
3698 /*
3699 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3700 * in the herc NIC. (Temp change, needs to be removed later)
3701 */
3702 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3703 msi_control |= 0x1; /* Enable MSI */
3704 pci_write_config_word(nic->pdev, 0x42, msi_control);
3705
3706 return 0;
3707}
3708
1da177e4
LT
3709/* ********************************************************* *
3710 * Functions defined below concern the OS part of the driver *
3711 * ********************************************************* */
3712
20346722 3713/**
1da177e4
LT
3714 * s2io_open - open entry point of the driver
3715 * @dev : pointer to the device structure.
3716 * Description:
3717 * This function is the open entry point of the driver. It mainly calls a
3718 * function to allocate Rx buffers and inserts them into the buffer
20346722 3719 * descriptors and then enables the Rx part of the NIC.
1da177e4
LT
3720 * Return value:
3721 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3722 * file on failure.
3723 */
3724
ac1f60db 3725static int s2io_open(struct net_device *dev)
1da177e4 3726{
1ee6dd77 3727 struct s2io_nic *sp = dev->priv;
1da177e4
LT
3728 int err = 0;
3729
20346722
K
3730 /*
3731 * Make sure you have link off by default every time
1da177e4
LT
3732 * Nic is initialized
3733 */
3734 netif_carrier_off(dev);
0b1f7ebe 3735 sp->last_link_state = 0;
1da177e4
LT
3736
3737 /* Initialize H/W and enable interrupts */
c92ca04b
AR
3738 err = s2io_card_up(sp);
3739 if (err) {
1da177e4
LT
3740 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3741 dev->name);
e6a8fee2 3742 goto hw_init_failed;
1da177e4
LT
3743 }
3744
3745 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3746 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
e6a8fee2 3747 s2io_card_down(sp);
20346722 3748 err = -ENODEV;
e6a8fee2 3749 goto hw_init_failed;
1da177e4
LT
3750 }
3751
3752 netif_start_queue(dev);
3753 return 0;
20346722 3754
20346722 3755hw_init_failed:
cc6e7c44
RA
3756 if (sp->intr_type == MSI_X) {
3757 if (sp->entries)
3758 kfree(sp->entries);
3759 if (sp->s2io_entries)
3760 kfree(sp->s2io_entries);
3761 }
20346722 3762 return err;
1da177e4
LT
3763}
3764
3765/**
3766 * s2io_close -close entry point of the driver
3767 * @dev : device pointer.
3768 * Description:
3769 * This is the stop entry point of the driver. It needs to undo exactly
3770 * whatever was done by the open entry point,thus it's usually referred to
3771 * as the close function.Among other things this function mainly stops the
3772 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3773 * Return value:
3774 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3775 * file on failure.
3776 */
3777
ac1f60db 3778static int s2io_close(struct net_device *dev)
1da177e4 3779{
1ee6dd77 3780 struct s2io_nic *sp = dev->priv;
cc6e7c44 3781
1da177e4
LT
3782 netif_stop_queue(dev);
3783 /* Reset card, kill tasklet and free Tx and Rx buffers. */
e6a8fee2 3784 s2io_card_down(sp);
cc6e7c44 3785
1da177e4
LT
3786 sp->device_close_flag = TRUE; /* Device is shut down. */
3787 return 0;
3788}
3789
3790/**
3791 * s2io_xmit - Tx entry point of te driver
3792 * @skb : the socket buffer containing the Tx data.
3793 * @dev : device pointer.
3794 * Description :
3795 * This function is the Tx entry point of the driver. S2IO NIC supports
3796 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3797 * NOTE: when device cant queue the pkt,just the trans_start variable will
3798 * not be upadted.
3799 * Return value:
3800 * 0 on success & 1 on failure.
3801 */
3802
ac1f60db 3803static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 3804{
1ee6dd77 3805 struct s2io_nic *sp = dev->priv;
1da177e4
LT
3806 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3807 register u64 val64;
1ee6dd77
RB
3808 struct TxD *txdp;
3809 struct TxFIFO_element __iomem *tx_fifo;
1da177e4 3810 unsigned long flags;
be3a6b02
K
3811 u16 vlan_tag = 0;
3812 int vlan_priority = 0;
1ee6dd77 3813 struct mac_info *mac_control;
1da177e4 3814 struct config_param *config;
75c30b13 3815 int offload_type;
1da177e4
LT
3816
3817 mac_control = &sp->mac_control;
3818 config = &sp->config;
3819
20346722 3820 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
1da177e4 3821 spin_lock_irqsave(&sp->tx_lock, flags);
1da177e4 3822 if (atomic_read(&sp->card_state) == CARD_DOWN) {
20346722 3823 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
1da177e4
LT
3824 dev->name);
3825 spin_unlock_irqrestore(&sp->tx_lock, flags);
20346722
K
3826 dev_kfree_skb(skb);
3827 return 0;
1da177e4
LT
3828 }
3829
3830 queue = 0;
1da177e4 3831
be3a6b02
K
3832 /* Get Fifo number to Transmit based on vlan priority */
3833 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3834 vlan_tag = vlan_tx_tag_get(skb);
3835 vlan_priority = vlan_tag >> 13;
3836 queue = config->fifo_mapping[vlan_priority];
3837 }
3838
20346722
K
3839 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3840 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
1ee6dd77 3841 txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
20346722
K
3842 list_virt_addr;
3843
3844 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
1da177e4 3845 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9
AR
3846 if (txdp->Host_Control ||
3847 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
776bd20f 3848 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
1da177e4
LT
3849 netif_stop_queue(dev);
3850 dev_kfree_skb(skb);
3851 spin_unlock_irqrestore(&sp->tx_lock, flags);
3852 return 0;
3853 }
0b1f7ebe
K
3854
3855 /* A buffer with no data will be dropped */
3856 if (!skb->len) {
3857 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3858 dev_kfree_skb(skb);
3859 spin_unlock_irqrestore(&sp->tx_lock, flags);
3860 return 0;
3861 }
3862
75c30b13 3863 offload_type = s2io_offload_type(skb);
75c30b13 3864 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1da177e4 3865 txdp->Control_1 |= TXD_TCP_LSO_EN;
75c30b13 3866 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
1da177e4 3867 }
84fa7933 3868 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1da177e4
LT
3869 txdp->Control_2 |=
3870 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3871 TXD_TX_CKO_UDP_EN);
3872 }
fed5eccd
AR
3873 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
3874 txdp->Control_1 |= TXD_LIST_OWN_XENA;
1da177e4 3875 txdp->Control_2 |= config->tx_intr_type;
d8892c6e 3876
be3a6b02
K
3877 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3878 txdp->Control_2 |= TXD_VLAN_ENABLE;
3879 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3880 }
3881
fed5eccd 3882 frg_len = skb->len - skb->data_len;
75c30b13 3883 if (offload_type == SKB_GSO_UDP) {
fed5eccd
AR
3884 int ufo_size;
3885
75c30b13 3886 ufo_size = s2io_udp_mss(skb);
fed5eccd
AR
3887 ufo_size &= ~7;
3888 txdp->Control_1 |= TXD_UFO_EN;
3889 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
3890 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
3891#ifdef __BIG_ENDIAN
3892 sp->ufo_in_band_v[put_off] =
3893 (u64)skb_shinfo(skb)->ip6_frag_id;
3894#else
3895 sp->ufo_in_band_v[put_off] =
3896 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
3897#endif
3898 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
3899 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
3900 sp->ufo_in_band_v,
3901 sizeof(u64), PCI_DMA_TODEVICE);
3902 txdp++;
fed5eccd 3903 }
1da177e4 3904
fed5eccd
AR
3905 txdp->Buffer_Pointer = pci_map_single
3906 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3907 txdp->Host_Control = (unsigned long) skb;
3908 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
75c30b13 3909 if (offload_type == SKB_GSO_UDP)
fed5eccd
AR
3910 txdp->Control_1 |= TXD_UFO_EN;
3911
3912 frg_cnt = skb_shinfo(skb)->nr_frags;
1da177e4
LT
3913 /* For fragmented SKB. */
3914 for (i = 0; i < frg_cnt; i++) {
3915 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0b1f7ebe
K
3916 /* A '0' length fragment will be ignored */
3917 if (!frag->size)
3918 continue;
1da177e4
LT
3919 txdp++;
3920 txdp->Buffer_Pointer = (u64) pci_map_page
3921 (sp->pdev, frag->page, frag->page_offset,
3922 frag->size, PCI_DMA_TODEVICE);
efd51b5c 3923 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
75c30b13 3924 if (offload_type == SKB_GSO_UDP)
fed5eccd 3925 txdp->Control_1 |= TXD_UFO_EN;
1da177e4
LT
3926 }
3927 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3928
75c30b13 3929 if (offload_type == SKB_GSO_UDP)
fed5eccd
AR
3930 frg_cnt++; /* as Txd0 was used for inband header */
3931
1da177e4 3932 tx_fifo = mac_control->tx_FIFO_start[queue];
20346722 3933 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
1da177e4
LT
3934 writeq(val64, &tx_fifo->TxDL_Pointer);
3935
3936 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3937 TX_FIFO_LAST_LIST);
75c30b13 3938 if (offload_type)
fed5eccd 3939 val64 |= TX_FIFO_SPECIAL_FUNC;
75c30b13 3940
1da177e4
LT
3941 writeq(val64, &tx_fifo->List_Control);
3942
303bcb4b
K
3943 mmiowb();
3944
1da177e4 3945 put_off++;
863c11a9
AR
3946 if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
3947 put_off = 0;
20346722 3948 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
1da177e4
LT
3949
3950 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9 3951 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
bd1034f0 3952 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
1da177e4
LT
3953 DBG_PRINT(TX_DBG,
3954 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3955 put_off, get_off);
3956 netif_stop_queue(dev);
3957 }
3958
3959 dev->trans_start = jiffies;
3960 spin_unlock_irqrestore(&sp->tx_lock, flags);
3961
3962 return 0;
3963}
3964
25fff88e
K
3965static void
3966s2io_alarm_handle(unsigned long data)
3967{
1ee6dd77 3968 struct s2io_nic *sp = (struct s2io_nic *)data;
25fff88e
K
3969
3970 alarm_intr_handler(sp);
3971 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3972}
3973
1ee6dd77 3974static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
75c30b13
AR
3975{
3976 int rxb_size, level;
3977
3978 if (!sp->lro) {
3979 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
3980 level = rx_buffer_level(sp, rxb_size, rng_n);
3981
3982 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3983 int ret;
3984 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
3985 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3986 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
3987 DBG_PRINT(ERR_DBG, "Out of memory in %s",
3988 __FUNCTION__);
3989 clear_bit(0, (&sp->tasklet_status));
3990 return -1;
3991 }
3992 clear_bit(0, (&sp->tasklet_status));
3993 } else if (level == LOW)
3994 tasklet_schedule(&sp->task);
3995
3996 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
3997 DBG_PRINT(ERR_DBG, "%s:Out of memory", sp->dev->name);
3998 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
3999 }
4000 return 0;
4001}
4002
7d12e780 4003static irqreturn_t s2io_msi_handle(int irq, void *dev_id)
cc6e7c44
RA
4004{
4005 struct net_device *dev = (struct net_device *) dev_id;
1ee6dd77 4006 struct s2io_nic *sp = dev->priv;
cc6e7c44 4007 int i;
1ee6dd77 4008 struct mac_info *mac_control;
cc6e7c44
RA
4009 struct config_param *config;
4010
4011 atomic_inc(&sp->isr_cnt);
4012 mac_control = &sp->mac_control;
4013 config = &sp->config;
4014 DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
4015
4016 /* If Intr is because of Rx Traffic */
4017 for (i = 0; i < config->rx_ring_num; i++)
4018 rx_intr_handler(&mac_control->rings[i]);
4019
4020 /* If Intr is because of Tx Traffic */
4021 for (i = 0; i < config->tx_fifo_num; i++)
4022 tx_intr_handler(&mac_control->fifos[i]);
4023
4024 /*
4025 * If the Rx buffer count is below the panic threshold then
4026 * reallocate the buffers from the interrupt handler itself,
4027 * else schedule a tasklet to reallocate the buffers.
4028 */
75c30b13
AR
4029 for (i = 0; i < config->rx_ring_num; i++)
4030 s2io_chk_rx_buffers(sp, i);
cc6e7c44
RA
4031
4032 atomic_dec(&sp->isr_cnt);
4033 return IRQ_HANDLED;
4034}
4035
7d12e780 4036static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
cc6e7c44 4037{
1ee6dd77
RB
4038 struct ring_info *ring = (struct ring_info *)dev_id;
4039 struct s2io_nic *sp = ring->nic;
cc6e7c44
RA
4040
4041 atomic_inc(&sp->isr_cnt);
cc6e7c44 4042
75c30b13
AR
4043 rx_intr_handler(ring);
4044 s2io_chk_rx_buffers(sp, ring->ring_no);
7d3d0439 4045
cc6e7c44 4046 atomic_dec(&sp->isr_cnt);
cc6e7c44
RA
4047 return IRQ_HANDLED;
4048}
4049
7d12e780 4050static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
cc6e7c44 4051{
1ee6dd77
RB
4052 struct fifo_info *fifo = (struct fifo_info *)dev_id;
4053 struct s2io_nic *sp = fifo->nic;
cc6e7c44
RA
4054
4055 atomic_inc(&sp->isr_cnt);
4056 tx_intr_handler(fifo);
4057 atomic_dec(&sp->isr_cnt);
4058 return IRQ_HANDLED;
4059}
1ee6dd77 4060static void s2io_txpic_intr_handle(struct s2io_nic *sp)
a371a07d 4061{
1ee6dd77 4062 struct XENA_dev_config __iomem *bar0 = sp->bar0;
a371a07d
K
4063 u64 val64;
4064
4065 val64 = readq(&bar0->pic_int_status);
4066 if (val64 & PIC_INT_GPIO) {
4067 val64 = readq(&bar0->gpio_int_reg);
4068 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4069 (val64 & GPIO_INT_REG_LINK_UP)) {
c92ca04b
AR
4070 /*
4071 * This is unstable state so clear both up/down
4072 * interrupt and adapter to re-evaluate the link state.
4073 */
a371a07d
K
4074 val64 |= GPIO_INT_REG_LINK_DOWN;
4075 val64 |= GPIO_INT_REG_LINK_UP;
4076 writeq(val64, &bar0->gpio_int_reg);
a371a07d 4077 val64 = readq(&bar0->gpio_int_mask);
c92ca04b
AR
4078 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4079 GPIO_INT_MASK_LINK_DOWN);
a371a07d 4080 writeq(val64, &bar0->gpio_int_mask);
a371a07d 4081 }
c92ca04b
AR
4082 else if (val64 & GPIO_INT_REG_LINK_UP) {
4083 val64 = readq(&bar0->adapter_status);
c92ca04b 4084 /* Enable Adapter */
19a60522
SS
4085 val64 = readq(&bar0->adapter_control);
4086 val64 |= ADAPTER_CNTL_EN;
4087 writeq(val64, &bar0->adapter_control);
4088 val64 |= ADAPTER_LED_ON;
4089 writeq(val64, &bar0->adapter_control);
4090 if (!sp->device_enabled_once)
4091 sp->device_enabled_once = 1;
c92ca04b 4092
19a60522
SS
4093 s2io_link(sp, LINK_UP);
4094 /*
4095 * unmask link down interrupt and mask link-up
4096 * intr
4097 */
4098 val64 = readq(&bar0->gpio_int_mask);
4099 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4100 val64 |= GPIO_INT_MASK_LINK_UP;
4101 writeq(val64, &bar0->gpio_int_mask);
c92ca04b 4102
c92ca04b
AR
4103 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4104 val64 = readq(&bar0->adapter_status);
19a60522
SS
4105 s2io_link(sp, LINK_DOWN);
4106 /* Link is down so unmaks link up interrupt */
4107 val64 = readq(&bar0->gpio_int_mask);
4108 val64 &= ~GPIO_INT_MASK_LINK_UP;
4109 val64 |= GPIO_INT_MASK_LINK_DOWN;
4110 writeq(val64, &bar0->gpio_int_mask);
a371a07d
K
4111 }
4112 }
c92ca04b 4113 val64 = readq(&bar0->gpio_int_mask);
a371a07d
K
4114}
4115
1da177e4
LT
4116/**
4117 * s2io_isr - ISR handler of the device .
4118 * @irq: the irq of the device.
4119 * @dev_id: a void pointer to the dev structure of the NIC.
20346722
K
4120 * Description: This function is the ISR handler of the device. It
4121 * identifies the reason for the interrupt and calls the relevant
4122 * service routines. As a contongency measure, this ISR allocates the
1da177e4
LT
4123 * recv buffers, if their numbers are below the panic value which is
4124 * presently set to 25% of the original number of rcv buffers allocated.
4125 * Return value:
20346722 4126 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
1da177e4
LT
4127 * IRQ_NONE: will be returned if interrupt is not from our device
4128 */
7d12e780 4129static irqreturn_t s2io_isr(int irq, void *dev_id)
1da177e4
LT
4130{
4131 struct net_device *dev = (struct net_device *) dev_id;
1ee6dd77
RB
4132 struct s2io_nic *sp = dev->priv;
4133 struct XENA_dev_config __iomem *bar0 = sp->bar0;
20346722 4134 int i;
19a60522 4135 u64 reason = 0;
1ee6dd77 4136 struct mac_info *mac_control;
1da177e4
LT
4137 struct config_param *config;
4138
7ba013ac 4139 atomic_inc(&sp->isr_cnt);
1da177e4
LT
4140 mac_control = &sp->mac_control;
4141 config = &sp->config;
4142
20346722 4143 /*
1da177e4
LT
4144 * Identify the cause for interrupt and call the appropriate
4145 * interrupt handler. Causes for the interrupt could be;
4146 * 1. Rx of packet.
4147 * 2. Tx complete.
4148 * 3. Link down.
20346722 4149 * 4. Error in any functional blocks of the NIC.
1da177e4
LT
4150 */
4151 reason = readq(&bar0->general_int_status);
4152
4153 if (!reason) {
19a60522
SS
4154 /* The interrupt was not raised by us. */
4155 atomic_dec(&sp->isr_cnt);
4156 return IRQ_NONE;
4157 }
4158 else if (unlikely(reason == S2IO_MINUS_ONE) ) {
4159 /* Disable device and get out */
7ba013ac 4160 atomic_dec(&sp->isr_cnt);
1da177e4
LT
4161 return IRQ_NONE;
4162 }
5d3213cc 4163
db874e65
SS
4164 if (napi) {
4165 if (reason & GEN_INTR_RXTRAFFIC) {
19a60522 4166 if ( likely ( netif_rx_schedule_prep(dev)) ) {
db874e65 4167 __netif_rx_schedule(dev);
19a60522 4168 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
db874e65 4169 }
19a60522
SS
4170 else
4171 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
db874e65
SS
4172 }
4173 } else {
4174 /*
4175 * Rx handler is called by default, without checking for the
4176 * cause of interrupt.
4177 * rx_traffic_int reg is an R1 register, writing all 1's
4178 * will ensure that the actual interrupt causing bit get's
4179 * cleared and hence a read can be avoided.
4180 */
19a60522
SS
4181 if (reason & GEN_INTR_RXTRAFFIC)
4182 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4183
db874e65
SS
4184 for (i = 0; i < config->rx_ring_num; i++) {
4185 rx_intr_handler(&mac_control->rings[i]);
1da177e4
LT
4186 }
4187 }
1da177e4 4188
863c11a9
AR
4189 /*
4190 * tx_traffic_int reg is an R1 register, writing all 1's
4191 * will ensure that the actual interrupt causing bit get's
4192 * cleared and hence a read can be avoided.
4193 */
19a60522
SS
4194 if (reason & GEN_INTR_TXTRAFFIC)
4195 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
fe113638 4196
863c11a9
AR
4197 for (i = 0; i < config->tx_fifo_num; i++)
4198 tx_intr_handler(&mac_control->fifos[i]);
20346722 4199
a371a07d
K
4200 if (reason & GEN_INTR_TXPIC)
4201 s2io_txpic_intr_handle(sp);
20346722
K
4202 /*
4203 * If the Rx buffer count is below the panic threshold then
4204 * reallocate the buffers from the interrupt handler itself,
1da177e4
LT
4205 * else schedule a tasklet to reallocate the buffers.
4206 */
db874e65
SS
4207 if (!napi) {
4208 for (i = 0; i < config->rx_ring_num; i++)
4209 s2io_chk_rx_buffers(sp, i);
4210 }
4211
4212 writeq(0, &bar0->general_int_mask);
4213 readl(&bar0->general_int_status);
4214
7ba013ac 4215 atomic_dec(&sp->isr_cnt);
1da177e4
LT
4216 return IRQ_HANDLED;
4217}
4218
7ba013ac
K
4219/**
4220 * s2io_updt_stats -
4221 */
1ee6dd77 4222static void s2io_updt_stats(struct s2io_nic *sp)
7ba013ac 4223{
1ee6dd77 4224 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7ba013ac
K
4225 u64 val64;
4226 int cnt = 0;
4227
4228 if (atomic_read(&sp->card_state) == CARD_UP) {
4229 /* Apprx 30us on a 133 MHz bus */
4230 val64 = SET_UPDT_CLICKS(10) |
4231 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4232 writeq(val64, &bar0->stat_cfg);
4233 do {
4234 udelay(100);
4235 val64 = readq(&bar0->stat_cfg);
4236 if (!(val64 & BIT(0)))
4237 break;
4238 cnt++;
4239 if (cnt == 5)
4240 break; /* Updt failed */
4241 } while(1);
75c30b13 4242 } else {
1ee6dd77 4243 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
7ba013ac
K
4244 }
4245}
4246
1da177e4 4247/**
20346722 4248 * s2io_get_stats - Updates the device statistics structure.
1da177e4
LT
4249 * @dev : pointer to the device structure.
4250 * Description:
20346722 4251 * This function updates the device statistics structure in the s2io_nic
1da177e4
LT
4252 * structure and returns a pointer to the same.
4253 * Return value:
4254 * pointer to the updated net_device_stats structure.
4255 */
4256
ac1f60db 4257static struct net_device_stats *s2io_get_stats(struct net_device *dev)
1da177e4 4258{
1ee6dd77
RB
4259 struct s2io_nic *sp = dev->priv;
4260 struct mac_info *mac_control;
1da177e4
LT
4261 struct config_param *config;
4262
20346722 4263
1da177e4
LT
4264 mac_control = &sp->mac_control;
4265 config = &sp->config;
4266
7ba013ac
K
4267 /* Configure Stats for immediate updt */
4268 s2io_updt_stats(sp);
4269
4270 sp->stats.tx_packets =
4271 le32_to_cpu(mac_control->stats_info->tmac_frms);
20346722
K
4272 sp->stats.tx_errors =
4273 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4274 sp->stats.rx_errors =
ee705dba 4275 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
20346722
K
4276 sp->stats.multicast =
4277 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
1da177e4 4278 sp->stats.rx_length_errors =
ee705dba 4279 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
1da177e4
LT
4280
4281 return (&sp->stats);
4282}
4283
4284/**
4285 * s2io_set_multicast - entry point for multicast address enable/disable.
4286 * @dev : pointer to the device structure
4287 * Description:
20346722
K
4288 * This function is a driver entry point which gets called by the kernel
4289 * whenever multicast addresses must be enabled/disabled. This also gets
1da177e4
LT
4290 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4291 * determine, if multicast address must be enabled or if promiscuous mode
4292 * is to be disabled etc.
4293 * Return value:
4294 * void.
4295 */
4296
4297static void s2io_set_multicast(struct net_device *dev)
4298{
4299 int i, j, prev_cnt;
4300 struct dev_mc_list *mclist;
1ee6dd77
RB
4301 struct s2io_nic *sp = dev->priv;
4302 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4303 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4304 0xfeffffffffffULL;
4305 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4306 void __iomem *add;
4307
4308 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4309 /* Enable all Multicast addresses */
4310 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4311 &bar0->rmac_addr_data0_mem);
4312 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4313 &bar0->rmac_addr_data1_mem);
4314 val64 = RMAC_ADDR_CMD_MEM_WE |
4315 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4316 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4317 writeq(val64, &bar0->rmac_addr_cmd_mem);
4318 /* Wait till command completes */
c92ca04b 4319 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
4320 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4321 S2IO_BIT_RESET);
1da177e4
LT
4322
4323 sp->m_cast_flg = 1;
4324 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4325 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4326 /* Disable all Multicast addresses */
4327 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4328 &bar0->rmac_addr_data0_mem);
5e25b9dd
K
4329 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4330 &bar0->rmac_addr_data1_mem);
1da177e4
LT
4331 val64 = RMAC_ADDR_CMD_MEM_WE |
4332 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4333 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4334 writeq(val64, &bar0->rmac_addr_cmd_mem);
4335 /* Wait till command completes */
c92ca04b 4336 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
4337 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4338 S2IO_BIT_RESET);
1da177e4
LT
4339
4340 sp->m_cast_flg = 0;
4341 sp->all_multi_pos = 0;
4342 }
4343
4344 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4345 /* Put the NIC into promiscuous mode */
4346 add = &bar0->mac_cfg;
4347 val64 = readq(&bar0->mac_cfg);
4348 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4349
4350 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4351 writel((u32) val64, add);
4352 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4353 writel((u32) (val64 >> 32), (add + 4));
4354
4355 val64 = readq(&bar0->mac_cfg);
4356 sp->promisc_flg = 1;
776bd20f 4357 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
1da177e4
LT
4358 dev->name);
4359 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4360 /* Remove the NIC from promiscuous mode */
4361 add = &bar0->mac_cfg;
4362 val64 = readq(&bar0->mac_cfg);
4363 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4364
4365 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4366 writel((u32) val64, add);
4367 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4368 writel((u32) (val64 >> 32), (add + 4));
4369
4370 val64 = readq(&bar0->mac_cfg);
4371 sp->promisc_flg = 0;
776bd20f 4372 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
1da177e4
LT
4373 dev->name);
4374 }
4375
4376 /* Update individual M_CAST address list */
4377 if ((!sp->m_cast_flg) && dev->mc_count) {
4378 if (dev->mc_count >
4379 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4380 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4381 dev->name);
4382 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4383 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4384 return;
4385 }
4386
4387 prev_cnt = sp->mc_addr_count;
4388 sp->mc_addr_count = dev->mc_count;
4389
4390 /* Clear out the previous list of Mc in the H/W. */
4391 for (i = 0; i < prev_cnt; i++) {
4392 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4393 &bar0->rmac_addr_data0_mem);
4394 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 4395 &bar0->rmac_addr_data1_mem);
1da177e4
LT
4396 val64 = RMAC_ADDR_CMD_MEM_WE |
4397 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4398 RMAC_ADDR_CMD_MEM_OFFSET
4399 (MAC_MC_ADDR_START_OFFSET + i);
4400 writeq(val64, &bar0->rmac_addr_cmd_mem);
4401
4402 /* Wait for command completes */
c92ca04b 4403 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
4404 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4405 S2IO_BIT_RESET)) {
1da177e4
LT
4406 DBG_PRINT(ERR_DBG, "%s: Adding ",
4407 dev->name);
4408 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4409 return;
4410 }
4411 }
4412
4413 /* Create the new Rx filter list and update the same in H/W. */
4414 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4415 i++, mclist = mclist->next) {
4416 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4417 ETH_ALEN);
a7a80d5a 4418 mac_addr = 0;
1da177e4
LT
4419 for (j = 0; j < ETH_ALEN; j++) {
4420 mac_addr |= mclist->dmi_addr[j];
4421 mac_addr <<= 8;
4422 }
4423 mac_addr >>= 8;
4424 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4425 &bar0->rmac_addr_data0_mem);
4426 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 4427 &bar0->rmac_addr_data1_mem);
1da177e4
LT
4428 val64 = RMAC_ADDR_CMD_MEM_WE |
4429 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4430 RMAC_ADDR_CMD_MEM_OFFSET
4431 (i + MAC_MC_ADDR_START_OFFSET);
4432 writeq(val64, &bar0->rmac_addr_cmd_mem);
4433
4434 /* Wait for command completes */
c92ca04b 4435 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
4436 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4437 S2IO_BIT_RESET)) {
1da177e4
LT
4438 DBG_PRINT(ERR_DBG, "%s: Adding ",
4439 dev->name);
4440 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4441 return;
4442 }
4443 }
4444 }
4445}
4446
4447/**
20346722 4448 * s2io_set_mac_addr - Programs the Xframe mac address
1da177e4
LT
4449 * @dev : pointer to the device structure.
4450 * @addr: a uchar pointer to the new mac address which is to be set.
20346722 4451 * Description : This procedure will program the Xframe to receive
1da177e4 4452 * frames with new Mac Address
20346722 4453 * Return value: SUCCESS on success and an appropriate (-)ve integer
1da177e4
LT
4454 * as defined in errno.h file on failure.
4455 */
4456
26df54bf 4457static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
1da177e4 4458{
1ee6dd77
RB
4459 struct s2io_nic *sp = dev->priv;
4460 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4461 register u64 val64, mac_addr = 0;
4462 int i;
4463
20346722 4464 /*
1da177e4
LT
4465 * Set the new MAC address as the new unicast filter and reflect this
4466 * change on the device address registered with the OS. It will be
20346722 4467 * at offset 0.
1da177e4
LT
4468 */
4469 for (i = 0; i < ETH_ALEN; i++) {
4470 mac_addr <<= 8;
4471 mac_addr |= addr[i];
4472 }
4473
4474 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4475 &bar0->rmac_addr_data0_mem);
4476
4477 val64 =
4478 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4479 RMAC_ADDR_CMD_MEM_OFFSET(0);
4480 writeq(val64, &bar0->rmac_addr_cmd_mem);
4481 /* Wait till command completes */
c92ca04b 4482 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41 4483 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
1da177e4
LT
4484 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4485 return FAILURE;
4486 }
4487
4488 return SUCCESS;
4489}
4490
4491/**
20346722 4492 * s2io_ethtool_sset - Sets different link parameters.
1da177e4
LT
4493 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4494 * @info: pointer to the structure with parameters given by ethtool to set
4495 * link information.
4496 * Description:
20346722 4497 * The function sets different link parameters provided by the user onto
1da177e4
LT
4498 * the NIC.
4499 * Return value:
4500 * 0 on success.
4501*/
4502
4503static int s2io_ethtool_sset(struct net_device *dev,
4504 struct ethtool_cmd *info)
4505{
1ee6dd77 4506 struct s2io_nic *sp = dev->priv;
1da177e4
LT
4507 if ((info->autoneg == AUTONEG_ENABLE) ||
4508 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4509 return -EINVAL;
4510 else {
4511 s2io_close(sp->dev);
4512 s2io_open(sp->dev);
4513 }
4514
4515 return 0;
4516}
4517
4518/**
20346722 4519 * s2io_ethtol_gset - Return link specific information.
1da177e4
LT
4520 * @sp : private member of the device structure, pointer to the
4521 * s2io_nic structure.
4522 * @info : pointer to the structure with parameters given by ethtool
4523 * to return link information.
4524 * Description:
4525 * Returns link specific information like speed, duplex etc.. to ethtool.
4526 * Return value :
4527 * return 0 on success.
4528 */
4529
4530static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4531{
1ee6dd77 4532 struct s2io_nic *sp = dev->priv;
1da177e4
LT
4533 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4534 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4535 info->port = PORT_FIBRE;
4536 /* info->transceiver?? TODO */
4537
4538 if (netif_carrier_ok(sp->dev)) {
4539 info->speed = 10000;
4540 info->duplex = DUPLEX_FULL;
4541 } else {
4542 info->speed = -1;
4543 info->duplex = -1;
4544 }
4545
4546 info->autoneg = AUTONEG_DISABLE;
4547 return 0;
4548}
4549
4550/**
20346722
K
4551 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4552 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
4553 * s2io_nic structure.
4554 * @info : pointer to the structure with parameters given by ethtool to
4555 * return driver information.
4556 * Description:
4557 * Returns driver specefic information like name, version etc.. to ethtool.
4558 * Return value:
4559 * void
4560 */
4561
4562static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4563 struct ethtool_drvinfo *info)
4564{
1ee6dd77 4565 struct s2io_nic *sp = dev->priv;
1da177e4 4566
dbc2309d
JL
4567 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4568 strncpy(info->version, s2io_driver_version, sizeof(info->version));
4569 strncpy(info->fw_version, "", sizeof(info->fw_version));
4570 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
1da177e4
LT
4571 info->regdump_len = XENA_REG_SPACE;
4572 info->eedump_len = XENA_EEPROM_SPACE;
4573 info->testinfo_len = S2IO_TEST_LEN;
4574 info->n_stats = S2IO_STAT_LEN;
4575}
4576
4577/**
4578 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
20346722 4579 * @sp: private member of the device structure, which is a pointer to the
1da177e4 4580 * s2io_nic structure.
20346722 4581 * @regs : pointer to the structure with parameters given by ethtool for
1da177e4
LT
4582 * dumping the registers.
4583 * @reg_space: The input argumnet into which all the registers are dumped.
4584 * Description:
4585 * Dumps the entire register space of xFrame NIC into the user given
4586 * buffer area.
4587 * Return value :
4588 * void .
4589*/
4590
4591static void s2io_ethtool_gregs(struct net_device *dev,
4592 struct ethtool_regs *regs, void *space)
4593{
4594 int i;
4595 u64 reg;
4596 u8 *reg_space = (u8 *) space;
1ee6dd77 4597 struct s2io_nic *sp = dev->priv;
1da177e4
LT
4598
4599 regs->len = XENA_REG_SPACE;
4600 regs->version = sp->pdev->subsystem_device;
4601
4602 for (i = 0; i < regs->len; i += 8) {
4603 reg = readq(sp->bar0 + i);
4604 memcpy((reg_space + i), &reg, 8);
4605 }
4606}
4607
4608/**
4609 * s2io_phy_id - timer function that alternates adapter LED.
20346722 4610 * @data : address of the private member of the device structure, which
1da177e4 4611 * is a pointer to the s2io_nic structure, provided as an u32.
20346722
K
4612 * Description: This is actually the timer function that alternates the
4613 * adapter LED bit of the adapter control bit to set/reset every time on
4614 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
1da177e4
LT
4615 * once every second.
4616*/
4617static void s2io_phy_id(unsigned long data)
4618{
1ee6dd77
RB
4619 struct s2io_nic *sp = (struct s2io_nic *) data;
4620 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4621 u64 val64 = 0;
4622 u16 subid;
4623
4624 subid = sp->pdev->subsystem_device;
541ae68f
K
4625 if ((sp->device_type == XFRAME_II_DEVICE) ||
4626 ((subid & 0xFF) >= 0x07)) {
1da177e4
LT
4627 val64 = readq(&bar0->gpio_control);
4628 val64 ^= GPIO_CTRL_GPIO_0;
4629 writeq(val64, &bar0->gpio_control);
4630 } else {
4631 val64 = readq(&bar0->adapter_control);
4632 val64 ^= ADAPTER_LED_ON;
4633 writeq(val64, &bar0->adapter_control);
4634 }
4635
4636 mod_timer(&sp->id_timer, jiffies + HZ / 2);
4637}
4638
4639/**
4640 * s2io_ethtool_idnic - To physically identify the nic on the system.
4641 * @sp : private member of the device structure, which is a pointer to the
4642 * s2io_nic structure.
20346722 4643 * @id : pointer to the structure with identification parameters given by
1da177e4
LT
4644 * ethtool.
4645 * Description: Used to physically identify the NIC on the system.
20346722 4646 * The Link LED will blink for a time specified by the user for
1da177e4 4647 * identification.
20346722 4648 * NOTE: The Link has to be Up to be able to blink the LED. Hence
1da177e4
LT
4649 * identification is possible only if it's link is up.
4650 * Return value:
4651 * int , returns 0 on success
4652 */
4653
4654static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4655{
4656 u64 val64 = 0, last_gpio_ctrl_val;
1ee6dd77
RB
4657 struct s2io_nic *sp = dev->priv;
4658 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4659 u16 subid;
4660
4661 subid = sp->pdev->subsystem_device;
4662 last_gpio_ctrl_val = readq(&bar0->gpio_control);
541ae68f
K
4663 if ((sp->device_type == XFRAME_I_DEVICE) &&
4664 ((subid & 0xFF) < 0x07)) {
1da177e4
LT
4665 val64 = readq(&bar0->adapter_control);
4666 if (!(val64 & ADAPTER_CNTL_EN)) {
4667 printk(KERN_ERR
4668 "Adapter Link down, cannot blink LED\n");
4669 return -EFAULT;
4670 }
4671 }
4672 if (sp->id_timer.function == NULL) {
4673 init_timer(&sp->id_timer);
4674 sp->id_timer.function = s2io_phy_id;
4675 sp->id_timer.data = (unsigned long) sp;
4676 }
4677 mod_timer(&sp->id_timer, jiffies);
4678 if (data)
20346722 4679 msleep_interruptible(data * HZ);
1da177e4 4680 else
20346722 4681 msleep_interruptible(MAX_FLICKER_TIME);
1da177e4
LT
4682 del_timer_sync(&sp->id_timer);
4683
541ae68f 4684 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
1da177e4
LT
4685 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4686 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4687 }
4688
4689 return 0;
4690}
4691
4692/**
4693 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
20346722
K
4694 * @sp : private member of the device structure, which is a pointer to the
4695 * s2io_nic structure.
1da177e4
LT
4696 * @ep : pointer to the structure with pause parameters given by ethtool.
4697 * Description:
4698 * Returns the Pause frame generation and reception capability of the NIC.
4699 * Return value:
4700 * void
4701 */
4702static void s2io_ethtool_getpause_data(struct net_device *dev,
4703 struct ethtool_pauseparam *ep)
4704{
4705 u64 val64;
1ee6dd77
RB
4706 struct s2io_nic *sp = dev->priv;
4707 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4708
4709 val64 = readq(&bar0->rmac_pause_cfg);
4710 if (val64 & RMAC_PAUSE_GEN_ENABLE)
4711 ep->tx_pause = TRUE;
4712 if (val64 & RMAC_PAUSE_RX_ENABLE)
4713 ep->rx_pause = TRUE;
4714 ep->autoneg = FALSE;
4715}
4716
4717/**
4718 * s2io_ethtool_setpause_data - set/reset pause frame generation.
20346722 4719 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
4720 * s2io_nic structure.
4721 * @ep : pointer to the structure with pause parameters given by ethtool.
4722 * Description:
4723 * It can be used to set or reset Pause frame generation or reception
4724 * support of the NIC.
4725 * Return value:
4726 * int, returns 0 on Success
4727 */
4728
4729static int s2io_ethtool_setpause_data(struct net_device *dev,
20346722 4730 struct ethtool_pauseparam *ep)
1da177e4
LT
4731{
4732 u64 val64;
1ee6dd77
RB
4733 struct s2io_nic *sp = dev->priv;
4734 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4735
4736 val64 = readq(&bar0->rmac_pause_cfg);
4737 if (ep->tx_pause)
4738 val64 |= RMAC_PAUSE_GEN_ENABLE;
4739 else
4740 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4741 if (ep->rx_pause)
4742 val64 |= RMAC_PAUSE_RX_ENABLE;
4743 else
4744 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4745 writeq(val64, &bar0->rmac_pause_cfg);
4746 return 0;
4747}
4748
4749/**
4750 * read_eeprom - reads 4 bytes of data from user given offset.
20346722 4751 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
4752 * s2io_nic structure.
4753 * @off : offset at which the data must be written
4754 * @data : Its an output parameter where the data read at the given
20346722 4755 * offset is stored.
1da177e4 4756 * Description:
20346722 4757 * Will read 4 bytes of data from the user given offset and return the
1da177e4
LT
4758 * read data.
4759 * NOTE: Will allow to read only part of the EEPROM visible through the
4760 * I2C bus.
4761 * Return value:
4762 * -1 on failure and 0 on success.
4763 */
4764
4765#define S2IO_DEV_ID 5
1ee6dd77 4766static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
1da177e4
LT
4767{
4768 int ret = -1;
4769 u32 exit_cnt = 0;
4770 u64 val64;
1ee6dd77 4771 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 4772
ad4ebed0 4773 if (sp->device_type == XFRAME_I_DEVICE) {
4774 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4775 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
4776 I2C_CONTROL_CNTL_START;
4777 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
1da177e4 4778
ad4ebed0 4779 while (exit_cnt < 5) {
4780 val64 = readq(&bar0->i2c_control);
4781 if (I2C_CONTROL_CNTL_END(val64)) {
4782 *data = I2C_CONTROL_GET_DATA(val64);
4783 ret = 0;
4784 break;
4785 }
4786 msleep(50);
4787 exit_cnt++;
1da177e4 4788 }
1da177e4
LT
4789 }
4790
ad4ebed0 4791 if (sp->device_type == XFRAME_II_DEVICE) {
4792 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 4793 SPI_CONTROL_BYTECNT(0x3) |
ad4ebed0 4794 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
4795 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4796 val64 |= SPI_CONTROL_REQ;
4797 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4798 while (exit_cnt < 5) {
4799 val64 = readq(&bar0->spi_control);
4800 if (val64 & SPI_CONTROL_NACK) {
4801 ret = 1;
4802 break;
4803 } else if (val64 & SPI_CONTROL_DONE) {
4804 *data = readq(&bar0->spi_data);
4805 *data &= 0xffffff;
4806 ret = 0;
4807 break;
4808 }
4809 msleep(50);
4810 exit_cnt++;
4811 }
4812 }
1da177e4
LT
4813 return ret;
4814}
4815
4816/**
4817 * write_eeprom - actually writes the relevant part of the data value.
4818 * @sp : private member of the device structure, which is a pointer to the
4819 * s2io_nic structure.
4820 * @off : offset at which the data must be written
4821 * @data : The data that is to be written
20346722 4822 * @cnt : Number of bytes of the data that are actually to be written into
1da177e4
LT
4823 * the Eeprom. (max of 3)
4824 * Description:
4825 * Actually writes the relevant part of the data value into the Eeprom
4826 * through the I2C bus.
4827 * Return value:
4828 * 0 on success, -1 on failure.
4829 */
4830
1ee6dd77 4831static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
1da177e4
LT
4832{
4833 int exit_cnt = 0, ret = -1;
4834 u64 val64;
1ee6dd77 4835 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 4836
ad4ebed0 4837 if (sp->device_type == XFRAME_I_DEVICE) {
4838 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4839 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
4840 I2C_CONTROL_CNTL_START;
4841 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4842
4843 while (exit_cnt < 5) {
4844 val64 = readq(&bar0->i2c_control);
4845 if (I2C_CONTROL_CNTL_END(val64)) {
4846 if (!(val64 & I2C_CONTROL_NACK))
4847 ret = 0;
4848 break;
4849 }
4850 msleep(50);
4851 exit_cnt++;
4852 }
4853 }
1da177e4 4854
ad4ebed0 4855 if (sp->device_type == XFRAME_II_DEVICE) {
4856 int write_cnt = (cnt == 8) ? 0 : cnt;
4857 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
4858
4859 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 4860 SPI_CONTROL_BYTECNT(write_cnt) |
ad4ebed0 4861 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
4862 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4863 val64 |= SPI_CONTROL_REQ;
4864 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4865 while (exit_cnt < 5) {
4866 val64 = readq(&bar0->spi_control);
4867 if (val64 & SPI_CONTROL_NACK) {
4868 ret = 1;
4869 break;
4870 } else if (val64 & SPI_CONTROL_DONE) {
1da177e4 4871 ret = 0;
ad4ebed0 4872 break;
4873 }
4874 msleep(50);
4875 exit_cnt++;
1da177e4 4876 }
1da177e4 4877 }
1da177e4
LT
4878 return ret;
4879}
1ee6dd77 4880static void s2io_vpd_read(struct s2io_nic *nic)
9dc737a7 4881{
b41477f3
AR
4882 u8 *vpd_data;
4883 u8 data;
9dc737a7
AR
4884 int i=0, cnt, fail = 0;
4885 int vpd_addr = 0x80;
4886
4887 if (nic->device_type == XFRAME_II_DEVICE) {
4888 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
4889 vpd_addr = 0x80;
4890 }
4891 else {
4892 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
4893 vpd_addr = 0x50;
4894 }
19a60522 4895 strcpy(nic->serial_num, "NOT AVAILABLE");
9dc737a7 4896
b41477f3
AR
4897 vpd_data = kmalloc(256, GFP_KERNEL);
4898 if (!vpd_data)
4899 return;
4900
9dc737a7
AR
4901 for (i = 0; i < 256; i +=4 ) {
4902 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
4903 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
4904 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
4905 for (cnt = 0; cnt <5; cnt++) {
4906 msleep(2);
4907 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
4908 if (data == 0x80)
4909 break;
4910 }
4911 if (cnt >= 5) {
4912 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
4913 fail = 1;
4914 break;
4915 }
4916 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
4917 (u32 *)&vpd_data[i]);
4918 }
19a60522
SS
4919
4920 if(!fail) {
4921 /* read serial number of adapter */
4922 for (cnt = 0; cnt < 256; cnt++) {
4923 if ((vpd_data[cnt] == 'S') &&
4924 (vpd_data[cnt+1] == 'N') &&
4925 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
4926 memset(nic->serial_num, 0, VPD_STRING_LEN);
4927 memcpy(nic->serial_num, &vpd_data[cnt + 3],
4928 vpd_data[cnt+2]);
4929 break;
4930 }
4931 }
4932 }
4933
4934 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
9dc737a7
AR
4935 memset(nic->product_name, 0, vpd_data[1]);
4936 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
4937 }
b41477f3 4938 kfree(vpd_data);
9dc737a7
AR
4939}
4940
1da177e4
LT
4941/**
4942 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
4943 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
20346722 4944 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
4945 * containing all relevant information.
4946 * @data_buf : user defined value to be written into Eeprom.
4947 * Description: Reads the values stored in the Eeprom at given offset
4948 * for a given length. Stores these values int the input argument data
4949 * buffer 'data_buf' and returns these to the caller (ethtool.)
4950 * Return value:
4951 * int 0 on success
4952 */
4953
4954static int s2io_ethtool_geeprom(struct net_device *dev,
20346722 4955 struct ethtool_eeprom *eeprom, u8 * data_buf)
1da177e4 4956{
ad4ebed0 4957 u32 i, valid;
4958 u64 data;
1ee6dd77 4959 struct s2io_nic *sp = dev->priv;
1da177e4
LT
4960
4961 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
4962
4963 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
4964 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
4965
4966 for (i = 0; i < eeprom->len; i += 4) {
4967 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
4968 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
4969 return -EFAULT;
4970 }
4971 valid = INV(data);
4972 memcpy((data_buf + i), &valid, 4);
4973 }
4974 return 0;
4975}
4976
4977/**
4978 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
4979 * @sp : private member of the device structure, which is a pointer to the
4980 * s2io_nic structure.
20346722 4981 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
4982 * containing all relevant information.
4983 * @data_buf ; user defined value to be written into Eeprom.
4984 * Description:
4985 * Tries to write the user provided value in the Eeprom, at the offset
4986 * given by the user.
4987 * Return value:
4988 * 0 on success, -EFAULT on failure.
4989 */
4990
4991static int s2io_ethtool_seeprom(struct net_device *dev,
4992 struct ethtool_eeprom *eeprom,
4993 u8 * data_buf)
4994{
4995 int len = eeprom->len, cnt = 0;
ad4ebed0 4996 u64 valid = 0, data;
1ee6dd77 4997 struct s2io_nic *sp = dev->priv;
1da177e4
LT
4998
4999 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5000 DBG_PRINT(ERR_DBG,
5001 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5002 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5003 eeprom->magic);
5004 return -EFAULT;
5005 }
5006
5007 while (len) {
5008 data = (u32) data_buf[cnt] & 0x000000FF;
5009 if (data) {
5010 valid = (u32) (data << 24);
5011 } else
5012 valid = data;
5013
5014 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5015 DBG_PRINT(ERR_DBG,
5016 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5017 DBG_PRINT(ERR_DBG,
5018 "write into the specified offset\n");
5019 return -EFAULT;
5020 }
5021 cnt++;
5022 len--;
5023 }
5024
5025 return 0;
5026}
5027
5028/**
20346722
K
5029 * s2io_register_test - reads and writes into all clock domains.
5030 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5031 * s2io_nic structure.
5032 * @data : variable that returns the result of each of the test conducted b
5033 * by the driver.
5034 * Description:
5035 * Read and write into all clock domains. The NIC has 3 clock domains,
5036 * see that registers in all the three regions are accessible.
5037 * Return value:
5038 * 0 on success.
5039 */
5040
1ee6dd77 5041static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
1da177e4 5042{
1ee6dd77 5043 struct XENA_dev_config __iomem *bar0 = sp->bar0;
ad4ebed0 5044 u64 val64 = 0, exp_val;
1da177e4
LT
5045 int fail = 0;
5046
20346722
K
5047 val64 = readq(&bar0->pif_rd_swapper_fb);
5048 if (val64 != 0x123456789abcdefULL) {
1da177e4
LT
5049 fail = 1;
5050 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5051 }
5052
5053 val64 = readq(&bar0->rmac_pause_cfg);
5054 if (val64 != 0xc000ffff00000000ULL) {
5055 fail = 1;
5056 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5057 }
5058
5059 val64 = readq(&bar0->rx_queue_cfg);
ad4ebed0 5060 if (sp->device_type == XFRAME_II_DEVICE)
5061 exp_val = 0x0404040404040404ULL;
5062 else
5063 exp_val = 0x0808080808080808ULL;
5064 if (val64 != exp_val) {
1da177e4
LT
5065 fail = 1;
5066 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5067 }
5068
5069 val64 = readq(&bar0->xgxs_efifo_cfg);
5070 if (val64 != 0x000000001923141EULL) {
5071 fail = 1;
5072 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5073 }
5074
5075 val64 = 0x5A5A5A5A5A5A5A5AULL;
5076 writeq(val64, &bar0->xmsi_data);
5077 val64 = readq(&bar0->xmsi_data);
5078 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5079 fail = 1;
5080 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5081 }
5082
5083 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5084 writeq(val64, &bar0->xmsi_data);
5085 val64 = readq(&bar0->xmsi_data);
5086 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5087 fail = 1;
5088 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5089 }
5090
5091 *data = fail;
ad4ebed0 5092 return fail;
1da177e4
LT
5093}
5094
5095/**
20346722 5096 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
1da177e4
LT
5097 * @sp : private member of the device structure, which is a pointer to the
5098 * s2io_nic structure.
5099 * @data:variable that returns the result of each of the test conducted by
5100 * the driver.
5101 * Description:
20346722 5102 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
1da177e4
LT
5103 * register.
5104 * Return value:
5105 * 0 on success.
5106 */
5107
1ee6dd77 5108static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
1da177e4
LT
5109{
5110 int fail = 0;
ad4ebed0 5111 u64 ret_data, org_4F0, org_7F0;
5112 u8 saved_4F0 = 0, saved_7F0 = 0;
5113 struct net_device *dev = sp->dev;
1da177e4
LT
5114
5115 /* Test Write Error at offset 0 */
ad4ebed0 5116 /* Note that SPI interface allows write access to all areas
5117 * of EEPROM. Hence doing all negative testing only for Xframe I.
5118 */
5119 if (sp->device_type == XFRAME_I_DEVICE)
5120 if (!write_eeprom(sp, 0, 0, 3))
5121 fail = 1;
5122
5123 /* Save current values at offsets 0x4F0 and 0x7F0 */
5124 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5125 saved_4F0 = 1;
5126 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5127 saved_7F0 = 1;
1da177e4
LT
5128
5129 /* Test Write at offset 4f0 */
ad4ebed0 5130 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
1da177e4
LT
5131 fail = 1;
5132 if (read_eeprom(sp, 0x4F0, &ret_data))
5133 fail = 1;
5134
ad4ebed0 5135 if (ret_data != 0x012345) {
26b7625c
AM
5136 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5137 "Data written %llx Data read %llx\n",
5138 dev->name, (unsigned long long)0x12345,
5139 (unsigned long long)ret_data);
1da177e4 5140 fail = 1;
ad4ebed0 5141 }
1da177e4
LT
5142
5143 /* Reset the EEPROM data go FFFF */
ad4ebed0 5144 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
1da177e4
LT
5145
5146 /* Test Write Request Error at offset 0x7c */
ad4ebed0 5147 if (sp->device_type == XFRAME_I_DEVICE)
5148 if (!write_eeprom(sp, 0x07C, 0, 3))
5149 fail = 1;
1da177e4 5150
ad4ebed0 5151 /* Test Write Request at offset 0x7f0 */
5152 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
1da177e4 5153 fail = 1;
ad4ebed0 5154 if (read_eeprom(sp, 0x7F0, &ret_data))
1da177e4
LT
5155 fail = 1;
5156
ad4ebed0 5157 if (ret_data != 0x012345) {
26b7625c
AM
5158 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5159 "Data written %llx Data read %llx\n",
5160 dev->name, (unsigned long long)0x12345,
5161 (unsigned long long)ret_data);
1da177e4 5162 fail = 1;
ad4ebed0 5163 }
1da177e4
LT
5164
5165 /* Reset the EEPROM data go FFFF */
ad4ebed0 5166 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
1da177e4 5167
ad4ebed0 5168 if (sp->device_type == XFRAME_I_DEVICE) {
5169 /* Test Write Error at offset 0x80 */
5170 if (!write_eeprom(sp, 0x080, 0, 3))
5171 fail = 1;
1da177e4 5172
ad4ebed0 5173 /* Test Write Error at offset 0xfc */
5174 if (!write_eeprom(sp, 0x0FC, 0, 3))
5175 fail = 1;
1da177e4 5176
ad4ebed0 5177 /* Test Write Error at offset 0x100 */
5178 if (!write_eeprom(sp, 0x100, 0, 3))
5179 fail = 1;
1da177e4 5180
ad4ebed0 5181 /* Test Write Error at offset 4ec */
5182 if (!write_eeprom(sp, 0x4EC, 0, 3))
5183 fail = 1;
5184 }
5185
5186 /* Restore values at offsets 0x4F0 and 0x7F0 */
5187 if (saved_4F0)
5188 write_eeprom(sp, 0x4F0, org_4F0, 3);
5189 if (saved_7F0)
5190 write_eeprom(sp, 0x7F0, org_7F0, 3);
1da177e4
LT
5191
5192 *data = fail;
ad4ebed0 5193 return fail;
1da177e4
LT
5194}
5195
5196/**
5197 * s2io_bist_test - invokes the MemBist test of the card .
20346722 5198 * @sp : private member of the device structure, which is a pointer to the
1da177e4 5199 * s2io_nic structure.
20346722 5200 * @data:variable that returns the result of each of the test conducted by
1da177e4
LT
5201 * the driver.
5202 * Description:
5203 * This invokes the MemBist test of the card. We give around
5204 * 2 secs time for the Test to complete. If it's still not complete
20346722 5205 * within this peiod, we consider that the test failed.
1da177e4
LT
5206 * Return value:
5207 * 0 on success and -1 on failure.
5208 */
5209
1ee6dd77 5210static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
1da177e4
LT
5211{
5212 u8 bist = 0;
5213 int cnt = 0, ret = -1;
5214
5215 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5216 bist |= PCI_BIST_START;
5217 pci_write_config_word(sp->pdev, PCI_BIST, bist);
5218
5219 while (cnt < 20) {
5220 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5221 if (!(bist & PCI_BIST_START)) {
5222 *data = (bist & PCI_BIST_CODE_MASK);
5223 ret = 0;
5224 break;
5225 }
5226 msleep(100);
5227 cnt++;
5228 }
5229
5230 return ret;
5231}
5232
5233/**
20346722
K
5234 * s2io-link_test - verifies the link state of the nic
5235 * @sp ; private member of the device structure, which is a pointer to the
1da177e4
LT
5236 * s2io_nic structure.
5237 * @data: variable that returns the result of each of the test conducted by
5238 * the driver.
5239 * Description:
20346722 5240 * The function verifies the link state of the NIC and updates the input
1da177e4
LT
5241 * argument 'data' appropriately.
5242 * Return value:
5243 * 0 on success.
5244 */
5245
1ee6dd77 5246static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
1da177e4 5247{
1ee6dd77 5248 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5249 u64 val64;
5250
5251 val64 = readq(&bar0->adapter_status);
c92ca04b 5252 if(!(LINK_IS_UP(val64)))
1da177e4 5253 *data = 1;
c92ca04b
AR
5254 else
5255 *data = 0;
1da177e4 5256
b41477f3 5257 return *data;
1da177e4
LT
5258}
5259
5260/**
20346722
K
5261 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5262 * @sp - private member of the device structure, which is a pointer to the
1da177e4 5263 * s2io_nic structure.
20346722 5264 * @data - variable that returns the result of each of the test
1da177e4
LT
5265 * conducted by the driver.
5266 * Description:
20346722 5267 * This is one of the offline test that tests the read and write
1da177e4
LT
5268 * access to the RldRam chip on the NIC.
5269 * Return value:
5270 * 0 on success.
5271 */
5272
1ee6dd77 5273static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
1da177e4 5274{
1ee6dd77 5275 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 5276 u64 val64;
ad4ebed0 5277 int cnt, iteration = 0, test_fail = 0;
1da177e4
LT
5278
5279 val64 = readq(&bar0->adapter_control);
5280 val64 &= ~ADAPTER_ECC_EN;
5281 writeq(val64, &bar0->adapter_control);
5282
5283 val64 = readq(&bar0->mc_rldram_test_ctrl);
5284 val64 |= MC_RLDRAM_TEST_MODE;
ad4ebed0 5285 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
5286
5287 val64 = readq(&bar0->mc_rldram_mrs);
5288 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5289 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5290
5291 val64 |= MC_RLDRAM_MRS_ENABLE;
5292 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5293
5294 while (iteration < 2) {
5295 val64 = 0x55555555aaaa0000ULL;
5296 if (iteration == 1) {
5297 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5298 }
5299 writeq(val64, &bar0->mc_rldram_test_d0);
5300
5301 val64 = 0xaaaa5a5555550000ULL;
5302 if (iteration == 1) {
5303 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5304 }
5305 writeq(val64, &bar0->mc_rldram_test_d1);
5306
5307 val64 = 0x55aaaaaaaa5a0000ULL;
5308 if (iteration == 1) {
5309 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5310 }
5311 writeq(val64, &bar0->mc_rldram_test_d2);
5312
ad4ebed0 5313 val64 = (u64) (0x0000003ffffe0100ULL);
1da177e4
LT
5314 writeq(val64, &bar0->mc_rldram_test_add);
5315
ad4ebed0 5316 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5317 MC_RLDRAM_TEST_GO;
5318 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
5319
5320 for (cnt = 0; cnt < 5; cnt++) {
5321 val64 = readq(&bar0->mc_rldram_test_ctrl);
5322 if (val64 & MC_RLDRAM_TEST_DONE)
5323 break;
5324 msleep(200);
5325 }
5326
5327 if (cnt == 5)
5328 break;
5329
ad4ebed0 5330 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5331 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
5332
5333 for (cnt = 0; cnt < 5; cnt++) {
5334 val64 = readq(&bar0->mc_rldram_test_ctrl);
5335 if (val64 & MC_RLDRAM_TEST_DONE)
5336 break;
5337 msleep(500);
5338 }
5339
5340 if (cnt == 5)
5341 break;
5342
5343 val64 = readq(&bar0->mc_rldram_test_ctrl);
ad4ebed0 5344 if (!(val64 & MC_RLDRAM_TEST_PASS))
5345 test_fail = 1;
1da177e4
LT
5346
5347 iteration++;
5348 }
5349
ad4ebed0 5350 *data = test_fail;
1da177e4 5351
ad4ebed0 5352 /* Bring the adapter out of test mode */
5353 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5354
5355 return test_fail;
1da177e4
LT
5356}
5357
5358/**
5359 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5360 * @sp : private member of the device structure, which is a pointer to the
5361 * s2io_nic structure.
5362 * @ethtest : pointer to a ethtool command specific structure that will be
5363 * returned to the user.
20346722 5364 * @data : variable that returns the result of each of the test
1da177e4
LT
5365 * conducted by the driver.
5366 * Description:
5367 * This function conducts 6 tests ( 4 offline and 2 online) to determine
5368 * the health of the card.
5369 * Return value:
5370 * void
5371 */
5372
5373static void s2io_ethtool_test(struct net_device *dev,
5374 struct ethtool_test *ethtest,
5375 uint64_t * data)
5376{
1ee6dd77 5377 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5378 int orig_state = netif_running(sp->dev);
5379
5380 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5381 /* Offline Tests. */
20346722 5382 if (orig_state)
1da177e4 5383 s2io_close(sp->dev);
1da177e4
LT
5384
5385 if (s2io_register_test(sp, &data[0]))
5386 ethtest->flags |= ETH_TEST_FL_FAILED;
5387
5388 s2io_reset(sp);
1da177e4
LT
5389
5390 if (s2io_rldram_test(sp, &data[3]))
5391 ethtest->flags |= ETH_TEST_FL_FAILED;
5392
5393 s2io_reset(sp);
1da177e4
LT
5394
5395 if (s2io_eeprom_test(sp, &data[1]))
5396 ethtest->flags |= ETH_TEST_FL_FAILED;
5397
5398 if (s2io_bist_test(sp, &data[4]))
5399 ethtest->flags |= ETH_TEST_FL_FAILED;
5400
5401 if (orig_state)
5402 s2io_open(sp->dev);
5403
5404 data[2] = 0;
5405 } else {
5406 /* Online Tests. */
5407 if (!orig_state) {
5408 DBG_PRINT(ERR_DBG,
5409 "%s: is not up, cannot run test\n",
5410 dev->name);
5411 data[0] = -1;
5412 data[1] = -1;
5413 data[2] = -1;
5414 data[3] = -1;
5415 data[4] = -1;
5416 }
5417
5418 if (s2io_link_test(sp, &data[2]))
5419 ethtest->flags |= ETH_TEST_FL_FAILED;
5420
5421 data[0] = 0;
5422 data[1] = 0;
5423 data[3] = 0;
5424 data[4] = 0;
5425 }
5426}
5427
5428static void s2io_get_ethtool_stats(struct net_device *dev,
5429 struct ethtool_stats *estats,
5430 u64 * tmp_stats)
5431{
5432 int i = 0;
1ee6dd77
RB
5433 struct s2io_nic *sp = dev->priv;
5434 struct stat_block *stat_info = sp->mac_control.stats_info;
1da177e4 5435
7ba013ac 5436 s2io_updt_stats(sp);
541ae68f
K
5437 tmp_stats[i++] =
5438 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
5439 le32_to_cpu(stat_info->tmac_frms);
5440 tmp_stats[i++] =
5441 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5442 le32_to_cpu(stat_info->tmac_data_octets);
1da177e4 5443 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
541ae68f
K
5444 tmp_stats[i++] =
5445 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5446 le32_to_cpu(stat_info->tmac_mcst_frms);
5447 tmp_stats[i++] =
5448 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5449 le32_to_cpu(stat_info->tmac_bcst_frms);
1da177e4 5450 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
bd1034f0
AR
5451 tmp_stats[i++] =
5452 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5453 le32_to_cpu(stat_info->tmac_ttl_octets);
5454 tmp_stats[i++] =
5455 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5456 le32_to_cpu(stat_info->tmac_ucst_frms);
5457 tmp_stats[i++] =
5458 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5459 le32_to_cpu(stat_info->tmac_nucst_frms);
541ae68f
K
5460 tmp_stats[i++] =
5461 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5462 le32_to_cpu(stat_info->tmac_any_err_frms);
bd1034f0 5463 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
1da177e4 5464 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
541ae68f
K
5465 tmp_stats[i++] =
5466 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5467 le32_to_cpu(stat_info->tmac_vld_ip);
5468 tmp_stats[i++] =
5469 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5470 le32_to_cpu(stat_info->tmac_drop_ip);
5471 tmp_stats[i++] =
5472 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5473 le32_to_cpu(stat_info->tmac_icmp);
5474 tmp_stats[i++] =
5475 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5476 le32_to_cpu(stat_info->tmac_rst_tcp);
1da177e4 5477 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
541ae68f
K
5478 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5479 le32_to_cpu(stat_info->tmac_udp);
5480 tmp_stats[i++] =
5481 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5482 le32_to_cpu(stat_info->rmac_vld_frms);
5483 tmp_stats[i++] =
5484 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5485 le32_to_cpu(stat_info->rmac_data_octets);
1da177e4
LT
5486 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5487 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
541ae68f
K
5488 tmp_stats[i++] =
5489 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5490 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5491 tmp_stats[i++] =
5492 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5493 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
1da177e4 5494 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
bd1034f0 5495 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
1da177e4
LT
5496 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5497 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
bd1034f0
AR
5498 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5499 tmp_stats[i++] =
5500 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5501 le32_to_cpu(stat_info->rmac_ttl_octets);
5502 tmp_stats[i++] =
5503 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5504 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5505 tmp_stats[i++] =
5506 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5507 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
541ae68f
K
5508 tmp_stats[i++] =
5509 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5510 le32_to_cpu(stat_info->rmac_discarded_frms);
bd1034f0
AR
5511 tmp_stats[i++] =
5512 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5513 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5514 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5515 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
541ae68f
K
5516 tmp_stats[i++] =
5517 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5518 le32_to_cpu(stat_info->rmac_usized_frms);
5519 tmp_stats[i++] =
5520 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5521 le32_to_cpu(stat_info->rmac_osized_frms);
5522 tmp_stats[i++] =
5523 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5524 le32_to_cpu(stat_info->rmac_frag_frms);
5525 tmp_stats[i++] =
5526 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5527 le32_to_cpu(stat_info->rmac_jabber_frms);
bd1034f0
AR
5528 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
5529 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
5530 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
5531 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
5532 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
5533 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
5534 tmp_stats[i++] =
5535 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
541ae68f 5536 le32_to_cpu(stat_info->rmac_ip);
1da177e4
LT
5537 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5538 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
bd1034f0
AR
5539 tmp_stats[i++] =
5540 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
541ae68f 5541 le32_to_cpu(stat_info->rmac_drop_ip);
bd1034f0
AR
5542 tmp_stats[i++] =
5543 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
541ae68f 5544 le32_to_cpu(stat_info->rmac_icmp);
1da177e4 5545 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
bd1034f0
AR
5546 tmp_stats[i++] =
5547 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
541ae68f
K
5548 le32_to_cpu(stat_info->rmac_udp);
5549 tmp_stats[i++] =
5550 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5551 le32_to_cpu(stat_info->rmac_err_drp_udp);
bd1034f0
AR
5552 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
5553 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
5554 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
5555 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
5556 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
5557 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
5558 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
5559 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
5560 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
5561 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
5562 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
5563 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
5564 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
5565 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
5566 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
5567 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
5568 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
541ae68f
K
5569 tmp_stats[i++] =
5570 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5571 le32_to_cpu(stat_info->rmac_pause_cnt);
bd1034f0
AR
5572 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
5573 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
541ae68f
K
5574 tmp_stats[i++] =
5575 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5576 le32_to_cpu(stat_info->rmac_accepted_ip);
1da177e4 5577 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
bd1034f0
AR
5578 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
5579 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
5580 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
5581 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
5582 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
5583 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
5584 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
5585 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
5586 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
5587 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
5588 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
5589 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
5590 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
5591 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
5592 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
5593 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5594 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5595 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
5596 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5597 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5598 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5599 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5600 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5601 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5602 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5603 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5604 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5605 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5606 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5607 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5608 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5609 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5610 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5611 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
7ba013ac
K
5612 tmp_stats[i++] = 0;
5613 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5614 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
bd1034f0
AR
5615 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
5616 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
5617 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
5618 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
5619 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
5620 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
5621 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
5622 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
5623 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
5624 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
5625 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
5626 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
5627 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
5628 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
5629 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
5630 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
5631 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
7d3d0439
RA
5632 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5633 tmp_stats[i++] = stat_info->sw_stat.sending_both;
5634 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5635 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
fe931395 5636 if (stat_info->sw_stat.num_aggregations) {
bd1034f0
AR
5637 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5638 int count = 0;
6aa20a22 5639 /*
bd1034f0
AR
5640 * Since 64-bit divide does not work on all platforms,
5641 * do repeated subtraction.
5642 */
5643 while (tmp >= stat_info->sw_stat.num_aggregations) {
5644 tmp -= stat_info->sw_stat.num_aggregations;
5645 count++;
5646 }
5647 tmp_stats[i++] = count;
fe931395 5648 }
bd1034f0
AR
5649 else
5650 tmp_stats[i++] = 0;
1da177e4
LT
5651}
5652
ac1f60db 5653static int s2io_ethtool_get_regs_len(struct net_device *dev)
1da177e4
LT
5654{
5655 return (XENA_REG_SPACE);
5656}
5657
5658
ac1f60db 5659static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
1da177e4 5660{
1ee6dd77 5661 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5662
5663 return (sp->rx_csum);
5664}
ac1f60db
AB
5665
5666static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
1da177e4 5667{
1ee6dd77 5668 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5669
5670 if (data)
5671 sp->rx_csum = 1;
5672 else
5673 sp->rx_csum = 0;
5674
5675 return 0;
5676}
ac1f60db
AB
5677
5678static int s2io_get_eeprom_len(struct net_device *dev)
1da177e4
LT
5679{
5680 return (XENA_EEPROM_SPACE);
5681}
5682
ac1f60db 5683static int s2io_ethtool_self_test_count(struct net_device *dev)
1da177e4
LT
5684{
5685 return (S2IO_TEST_LEN);
5686}
ac1f60db
AB
5687
5688static void s2io_ethtool_get_strings(struct net_device *dev,
5689 u32 stringset, u8 * data)
1da177e4
LT
5690{
5691 switch (stringset) {
5692 case ETH_SS_TEST:
5693 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5694 break;
5695 case ETH_SS_STATS:
5696 memcpy(data, &ethtool_stats_keys,
5697 sizeof(ethtool_stats_keys));
5698 }
5699}
1da177e4
LT
5700static int s2io_ethtool_get_stats_count(struct net_device *dev)
5701{
5702 return (S2IO_STAT_LEN);
5703}
5704
ac1f60db 5705static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
1da177e4
LT
5706{
5707 if (data)
5708 dev->features |= NETIF_F_IP_CSUM;
5709 else
5710 dev->features &= ~NETIF_F_IP_CSUM;
5711
5712 return 0;
5713}
5714
75c30b13
AR
5715static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
5716{
5717 return (dev->features & NETIF_F_TSO) != 0;
5718}
5719static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
5720{
5721 if (data)
5722 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
5723 else
5724 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
5725
5726 return 0;
5727}
1da177e4 5728
7282d491 5729static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
5730 .get_settings = s2io_ethtool_gset,
5731 .set_settings = s2io_ethtool_sset,
5732 .get_drvinfo = s2io_ethtool_gdrvinfo,
5733 .get_regs_len = s2io_ethtool_get_regs_len,
5734 .get_regs = s2io_ethtool_gregs,
5735 .get_link = ethtool_op_get_link,
5736 .get_eeprom_len = s2io_get_eeprom_len,
5737 .get_eeprom = s2io_ethtool_geeprom,
5738 .set_eeprom = s2io_ethtool_seeprom,
5739 .get_pauseparam = s2io_ethtool_getpause_data,
5740 .set_pauseparam = s2io_ethtool_setpause_data,
5741 .get_rx_csum = s2io_ethtool_get_rx_csum,
5742 .set_rx_csum = s2io_ethtool_set_rx_csum,
5743 .get_tx_csum = ethtool_op_get_tx_csum,
5744 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5745 .get_sg = ethtool_op_get_sg,
5746 .set_sg = ethtool_op_set_sg,
75c30b13
AR
5747 .get_tso = s2io_ethtool_op_get_tso,
5748 .set_tso = s2io_ethtool_op_set_tso,
fed5eccd
AR
5749 .get_ufo = ethtool_op_get_ufo,
5750 .set_ufo = ethtool_op_set_ufo,
1da177e4
LT
5751 .self_test_count = s2io_ethtool_self_test_count,
5752 .self_test = s2io_ethtool_test,
5753 .get_strings = s2io_ethtool_get_strings,
5754 .phys_id = s2io_ethtool_idnic,
5755 .get_stats_count = s2io_ethtool_get_stats_count,
5756 .get_ethtool_stats = s2io_get_ethtool_stats
5757};
5758
5759/**
20346722 5760 * s2io_ioctl - Entry point for the Ioctl
1da177e4
LT
5761 * @dev : Device pointer.
5762 * @ifr : An IOCTL specefic structure, that can contain a pointer to
5763 * a proprietary structure used to pass information to the driver.
5764 * @cmd : This is used to distinguish between the different commands that
5765 * can be passed to the IOCTL functions.
5766 * Description:
20346722
K
5767 * Currently there are no special functionality supported in IOCTL, hence
5768 * function always return EOPNOTSUPPORTED
1da177e4
LT
5769 */
5770
ac1f60db 5771static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1da177e4
LT
5772{
5773 return -EOPNOTSUPP;
5774}
5775
5776/**
5777 * s2io_change_mtu - entry point to change MTU size for the device.
5778 * @dev : device pointer.
5779 * @new_mtu : the new MTU size for the device.
5780 * Description: A driver entry point to change MTU size for the device.
5781 * Before changing the MTU the device must be stopped.
5782 * Return value:
5783 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5784 * file on failure.
5785 */
5786
ac1f60db 5787static int s2io_change_mtu(struct net_device *dev, int new_mtu)
1da177e4 5788{
1ee6dd77 5789 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5790
5791 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5792 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
5793 dev->name);
5794 return -EPERM;
5795 }
5796
1da177e4 5797 dev->mtu = new_mtu;
d8892c6e 5798 if (netif_running(dev)) {
e6a8fee2 5799 s2io_card_down(sp);
d8892c6e
K
5800 netif_stop_queue(dev);
5801 if (s2io_card_up(sp)) {
5802 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5803 __FUNCTION__);
5804 }
5805 if (netif_queue_stopped(dev))
5806 netif_wake_queue(dev);
5807 } else { /* Device is down */
1ee6dd77 5808 struct XENA_dev_config __iomem *bar0 = sp->bar0;
d8892c6e
K
5809 u64 val64 = new_mtu;
5810
5811 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
5812 }
1da177e4
LT
5813
5814 return 0;
5815}
5816
5817/**
5818 * s2io_tasklet - Bottom half of the ISR.
5819 * @dev_adr : address of the device structure in dma_addr_t format.
5820 * Description:
5821 * This is the tasklet or the bottom half of the ISR. This is
20346722 5822 * an extension of the ISR which is scheduled by the scheduler to be run
1da177e4 5823 * when the load on the CPU is low. All low priority tasks of the ISR can
20346722 5824 * be pushed into the tasklet. For now the tasklet is used only to
1da177e4
LT
5825 * replenish the Rx buffers in the Rx buffer descriptors.
5826 * Return value:
5827 * void.
5828 */
5829
5830static void s2io_tasklet(unsigned long dev_addr)
5831{
5832 struct net_device *dev = (struct net_device *) dev_addr;
1ee6dd77 5833 struct s2io_nic *sp = dev->priv;
1da177e4 5834 int i, ret;
1ee6dd77 5835 struct mac_info *mac_control;
1da177e4
LT
5836 struct config_param *config;
5837
5838 mac_control = &sp->mac_control;
5839 config = &sp->config;
5840
5841 if (!TASKLET_IN_USE) {
5842 for (i = 0; i < config->rx_ring_num; i++) {
5843 ret = fill_rx_buffers(sp, i);
5844 if (ret == -ENOMEM) {
5845 DBG_PRINT(ERR_DBG, "%s: Out of ",
5846 dev->name);
5847 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
5848 break;
5849 } else if (ret == -EFILL) {
5850 DBG_PRINT(ERR_DBG,
5851 "%s: Rx Ring %d is full\n",
5852 dev->name, i);
5853 break;
5854 }
5855 }
5856 clear_bit(0, (&sp->tasklet_status));
5857 }
5858}
5859
5860/**
5861 * s2io_set_link - Set the LInk status
5862 * @data: long pointer to device private structue
5863 * Description: Sets the link status for the adapter
5864 */
5865
c4028958 5866static void s2io_set_link(struct work_struct *work)
1da177e4 5867{
1ee6dd77 5868 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
1da177e4 5869 struct net_device *dev = nic->dev;
1ee6dd77 5870 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
5871 register u64 val64;
5872 u16 subid;
5873
22747d6b
FR
5874 rtnl_lock();
5875
5876 if (!netif_running(dev))
5877 goto out_unlock;
5878
1da177e4
LT
5879 if (test_and_set_bit(0, &(nic->link_state))) {
5880 /* The card is being reset, no point doing anything */
22747d6b 5881 goto out_unlock;
1da177e4
LT
5882 }
5883
5884 subid = nic->pdev->subsystem_device;
a371a07d
K
5885 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
5886 /*
5887 * Allow a small delay for the NICs self initiated
5888 * cleanup to complete.
5889 */
5890 msleep(100);
5891 }
1da177e4
LT
5892
5893 val64 = readq(&bar0->adapter_status);
19a60522
SS
5894 if (LINK_IS_UP(val64)) {
5895 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
5896 if (verify_xena_quiescence(nic)) {
5897 val64 = readq(&bar0->adapter_control);
5898 val64 |= ADAPTER_CNTL_EN;
1da177e4 5899 writeq(val64, &bar0->adapter_control);
19a60522
SS
5900 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
5901 nic->device_type, subid)) {
5902 val64 = readq(&bar0->gpio_control);
5903 val64 |= GPIO_CTRL_GPIO_0;
5904 writeq(val64, &bar0->gpio_control);
5905 val64 = readq(&bar0->gpio_control);
5906 } else {
5907 val64 |= ADAPTER_LED_ON;
5908 writeq(val64, &bar0->adapter_control);
a371a07d 5909 }
1da177e4 5910 nic->device_enabled_once = TRUE;
19a60522
SS
5911 } else {
5912 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
5913 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
5914 netif_stop_queue(dev);
1da177e4 5915 }
19a60522
SS
5916 }
5917 val64 = readq(&bar0->adapter_status);
5918 if (!LINK_IS_UP(val64)) {
5919 DBG_PRINT(ERR_DBG, "%s:", dev->name);
5920 DBG_PRINT(ERR_DBG, " Link down after enabling ");
5921 DBG_PRINT(ERR_DBG, "device \n");
5922 } else
1da177e4 5923 s2io_link(nic, LINK_UP);
19a60522
SS
5924 } else {
5925 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5926 subid)) {
5927 val64 = readq(&bar0->gpio_control);
5928 val64 &= ~GPIO_CTRL_GPIO_0;
5929 writeq(val64, &bar0->gpio_control);
5930 val64 = readq(&bar0->gpio_control);
1da177e4 5931 }
19a60522 5932 s2io_link(nic, LINK_DOWN);
1da177e4
LT
5933 }
5934 clear_bit(0, &(nic->link_state));
22747d6b
FR
5935
5936out_unlock:
5937 rtnl_lock();
1da177e4
LT
5938}
5939
1ee6dd77
RB
5940static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
5941 struct buffAdd *ba,
5942 struct sk_buff **skb, u64 *temp0, u64 *temp1,
5943 u64 *temp2, int size)
5d3213cc
AR
5944{
5945 struct net_device *dev = sp->dev;
5946 struct sk_buff *frag_list;
5947
5948 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
5949 /* allocate skb */
5950 if (*skb) {
5951 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
5952 /*
5953 * As Rx frame are not going to be processed,
5954 * using same mapped address for the Rxd
5955 * buffer pointer
5956 */
1ee6dd77 5957 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0;
5d3213cc
AR
5958 } else {
5959 *skb = dev_alloc_skb(size);
5960 if (!(*skb)) {
5961 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
5962 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
5963 return -ENOMEM ;
5964 }
5965 /* storing the mapped addr in a temp variable
5966 * such it will be used for next rxd whose
5967 * Host Control is NULL
5968 */
1ee6dd77 5969 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0 =
5d3213cc
AR
5970 pci_map_single( sp->pdev, (*skb)->data,
5971 size - NET_IP_ALIGN,
5972 PCI_DMA_FROMDEVICE);
5973 rxdp->Host_Control = (unsigned long) (*skb);
5974 }
5975 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
5976 /* Two buffer Mode */
5977 if (*skb) {
1ee6dd77
RB
5978 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
5979 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
5980 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
5d3213cc
AR
5981 } else {
5982 *skb = dev_alloc_skb(size);
2ceaac75
DR
5983 if (!(*skb)) {
5984 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n",
19a60522 5985 dev->name);
2ceaac75
DR
5986 return -ENOMEM;
5987 }
1ee6dd77 5988 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
5d3213cc
AR
5989 pci_map_single(sp->pdev, (*skb)->data,
5990 dev->mtu + 4,
5991 PCI_DMA_FROMDEVICE);
1ee6dd77 5992 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
5d3213cc
AR
5993 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
5994 PCI_DMA_FROMDEVICE);
5995 rxdp->Host_Control = (unsigned long) (*skb);
5996
5997 /* Buffer-1 will be dummy buffer not used */
1ee6dd77 5998 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
5d3213cc
AR
5999 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6000 PCI_DMA_FROMDEVICE);
6001 }
6002 } else if ((rxdp->Host_Control == 0)) {
6003 /* Three buffer mode */
6004 if (*skb) {
1ee6dd77
RB
6005 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6006 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6007 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
5d3213cc
AR
6008 } else {
6009 *skb = dev_alloc_skb(size);
2ceaac75
DR
6010 if (!(*skb)) {
6011 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n",
6012 dev->name);
6013 return -ENOMEM;
6014 }
1ee6dd77 6015 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
5d3213cc
AR
6016 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6017 PCI_DMA_FROMDEVICE);
6018 /* Buffer-1 receives L3/L4 headers */
1ee6dd77 6019 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
5d3213cc
AR
6020 pci_map_single( sp->pdev, (*skb)->data,
6021 l3l4hdr_size + 4,
6022 PCI_DMA_FROMDEVICE);
6023 /*
6024 * skb_shinfo(skb)->frag_list will have L4
6025 * data payload
6026 */
6027 skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu +
6028 ALIGN_SIZE);
6029 if (skb_shinfo(*skb)->frag_list == NULL) {
6030 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
6031 failed\n ", dev->name);
6032 return -ENOMEM ;
6033 }
6034 frag_list = skb_shinfo(*skb)->frag_list;
6035 frag_list->next = NULL;
6036 /*
6037 * Buffer-2 receives L4 data payload
6038 */
1ee6dd77 6039 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
5d3213cc
AR
6040 pci_map_single( sp->pdev, frag_list->data,
6041 dev->mtu, PCI_DMA_FROMDEVICE);
6042 }
6043 }
6044 return 0;
6045}
1ee6dd77
RB
6046static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6047 int size)
5d3213cc
AR
6048{
6049 struct net_device *dev = sp->dev;
6050 if (sp->rxd_mode == RXD_MODE_1) {
6051 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6052 } else if (sp->rxd_mode == RXD_MODE_3B) {
6053 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6054 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6055 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6056 } else {
6057 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6058 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
6059 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
6060 }
6061}
6062
1ee6dd77 6063static int rxd_owner_bit_reset(struct s2io_nic *sp)
5d3213cc
AR
6064{
6065 int i, j, k, blk_cnt = 0, size;
1ee6dd77 6066 struct mac_info * mac_control = &sp->mac_control;
5d3213cc
AR
6067 struct config_param *config = &sp->config;
6068 struct net_device *dev = sp->dev;
1ee6dd77 6069 struct RxD_t *rxdp = NULL;
5d3213cc 6070 struct sk_buff *skb = NULL;
1ee6dd77 6071 struct buffAdd *ba = NULL;
5d3213cc
AR
6072 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6073
6074 /* Calculate the size based on ring mode */
6075 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6076 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6077 if (sp->rxd_mode == RXD_MODE_1)
6078 size += NET_IP_ALIGN;
6079 else if (sp->rxd_mode == RXD_MODE_3B)
6080 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6081 else
6082 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
6083
6084 for (i = 0; i < config->rx_ring_num; i++) {
6085 blk_cnt = config->rx_cfg[i].num_rxd /
6086 (rxd_count[sp->rxd_mode] +1);
6087
6088 for (j = 0; j < blk_cnt; j++) {
6089 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6090 rxdp = mac_control->rings[i].
6091 rx_blocks[j].rxds[k].virt_addr;
6092 if(sp->rxd_mode >= RXD_MODE_3A)
6093 ba = &mac_control->rings[i].ba[j][k];
6094 set_rxd_buffer_pointer(sp, rxdp, ba,
6095 &skb,(u64 *)&temp0_64,
6096 (u64 *)&temp1_64,
6097 (u64 *)&temp2_64, size);
6098
6099 set_rxd_buffer_size(sp, rxdp, size);
6100 wmb();
6101 /* flip the Ownership bit to Hardware */
6102 rxdp->Control_1 |= RXD_OWN_XENA;
6103 }
6104 }
6105 }
6106 return 0;
6107
6108}
6109
1ee6dd77 6110static int s2io_add_isr(struct s2io_nic * sp)
1da177e4 6111{
e6a8fee2 6112 int ret = 0;
c92ca04b 6113 struct net_device *dev = sp->dev;
e6a8fee2 6114 int err = 0;
1da177e4 6115
e6a8fee2
AR
6116 if (sp->intr_type == MSI)
6117 ret = s2io_enable_msi(sp);
6118 else if (sp->intr_type == MSI_X)
6119 ret = s2io_enable_msi_x(sp);
6120 if (ret) {
6121 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6122 sp->intr_type = INTA;
20346722 6123 }
1da177e4 6124
1ee6dd77 6125 /* Store the values of the MSIX table in the struct s2io_nic structure */
e6a8fee2 6126 store_xmsi_data(sp);
c92ca04b 6127
e6a8fee2
AR
6128 /* After proper initialization of H/W, register ISR */
6129 if (sp->intr_type == MSI) {
6130 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
6131 IRQF_SHARED, sp->name, dev);
6132 if (err) {
6133 pci_disable_msi(sp->pdev);
6134 DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n",
6135 dev->name);
6136 return -1;
6137 }
6138 }
6139 if (sp->intr_type == MSI_X) {
fb6a825b 6140 int i, msix_tx_cnt=0,msix_rx_cnt=0;
c92ca04b 6141
e6a8fee2
AR
6142 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6143 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6144 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6145 dev->name, i);
6146 err = request_irq(sp->entries[i].vector,
6147 s2io_msix_fifo_handle, 0, sp->desc[i],
6148 sp->s2io_entries[i].arg);
fb6a825b
SS
6149 /* If either data or addr is zero print it */
6150 if(!(sp->msix_info[i].addr &&
6151 sp->msix_info[i].data)) {
6152 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6153 "Data:0x%lx\n",sp->desc[i],
6154 (unsigned long long)
6155 sp->msix_info[i].addr,
6156 (unsigned long)
6157 ntohl(sp->msix_info[i].data));
6158 } else {
6159 msix_tx_cnt++;
6160 }
e6a8fee2
AR
6161 } else {
6162 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6163 dev->name, i);
6164 err = request_irq(sp->entries[i].vector,
6165 s2io_msix_ring_handle, 0, sp->desc[i],
6166 sp->s2io_entries[i].arg);
fb6a825b
SS
6167 /* If either data or addr is zero print it */
6168 if(!(sp->msix_info[i].addr &&
6169 sp->msix_info[i].data)) {
6170 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6171 "Data:0x%lx\n",sp->desc[i],
6172 (unsigned long long)
6173 sp->msix_info[i].addr,
6174 (unsigned long)
6175 ntohl(sp->msix_info[i].data));
6176 } else {
6177 msix_rx_cnt++;
6178 }
c92ca04b 6179 }
e6a8fee2
AR
6180 if (err) {
6181 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6182 "failed\n", dev->name, i);
6183 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6184 return -1;
6185 }
6186 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6187 }
fb6a825b
SS
6188 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6189 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
e6a8fee2
AR
6190 }
6191 if (sp->intr_type == INTA) {
6192 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6193 sp->name, dev);
6194 if (err) {
6195 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6196 dev->name);
6197 return -1;
6198 }
6199 }
6200 return 0;
6201}
1ee6dd77 6202static void s2io_rem_isr(struct s2io_nic * sp)
e6a8fee2
AR
6203{
6204 int cnt = 0;
6205 struct net_device *dev = sp->dev;
6206
6207 if (sp->intr_type == MSI_X) {
6208 int i;
6209 u16 msi_control;
6210
6211 for (i=1; (sp->s2io_entries[i].in_use ==
6212 MSIX_REGISTERED_SUCCESS); i++) {
6213 int vector = sp->entries[i].vector;
6214 void *arg = sp->s2io_entries[i].arg;
6215
6216 free_irq(vector, arg);
6217 }
6218 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6219 msi_control &= 0xFFFE; /* Disable MSI */
6220 pci_write_config_word(sp->pdev, 0x42, msi_control);
6221
6222 pci_disable_msix(sp->pdev);
6223 } else {
6224 free_irq(sp->pdev->irq, dev);
6225 if (sp->intr_type == MSI) {
6226 u16 val;
6227
6228 pci_disable_msi(sp->pdev);
6229 pci_read_config_word(sp->pdev, 0x4c, &val);
6230 val ^= 0x1;
6231 pci_write_config_word(sp->pdev, 0x4c, val);
c92ca04b
AR
6232 }
6233 }
6234 /* Waiting till all Interrupt handlers are complete */
6235 cnt = 0;
6236 do {
6237 msleep(10);
6238 if (!atomic_read(&sp->isr_cnt))
6239 break;
6240 cnt++;
6241 } while(cnt < 5);
e6a8fee2
AR
6242}
6243
1ee6dd77 6244static void s2io_card_down(struct s2io_nic * sp)
e6a8fee2
AR
6245{
6246 int cnt = 0;
1ee6dd77 6247 struct XENA_dev_config __iomem *bar0 = sp->bar0;
e6a8fee2
AR
6248 unsigned long flags;
6249 register u64 val64 = 0;
6250
6251 del_timer_sync(&sp->alarm_timer);
6252 /* If s2io_set_link task is executing, wait till it completes. */
6253 while (test_and_set_bit(0, &(sp->link_state))) {
6254 msleep(50);
6255 }
6256 atomic_set(&sp->card_state, CARD_DOWN);
6257
6258 /* disable Tx and Rx traffic on the NIC */
6259 stop_nic(sp);
6260
6261 s2io_rem_isr(sp);
1da177e4
LT
6262
6263 /* Kill tasklet. */
6264 tasklet_kill(&sp->task);
6265
6266 /* Check if the device is Quiescent and then Reset the NIC */
6267 do {
5d3213cc
AR
6268 /* As per the HW requirement we need to replenish the
6269 * receive buffer to avoid the ring bump. Since there is
6270 * no intention of processing the Rx frame at this pointwe are
6271 * just settting the ownership bit of rxd in Each Rx
6272 * ring to HW and set the appropriate buffer size
6273 * based on the ring mode
6274 */
6275 rxd_owner_bit_reset(sp);
6276
1da177e4 6277 val64 = readq(&bar0->adapter_status);
19a60522
SS
6278 if (verify_xena_quiescence(sp)) {
6279 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
1da177e4
LT
6280 break;
6281 }
6282
6283 msleep(50);
6284 cnt++;
6285 if (cnt == 10) {
6286 DBG_PRINT(ERR_DBG,
6287 "s2io_close:Device not Quiescent ");
6288 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6289 (unsigned long long) val64);
6290 break;
6291 }
6292 } while (1);
1da177e4
LT
6293 s2io_reset(sp);
6294
7ba013ac
K
6295 spin_lock_irqsave(&sp->tx_lock, flags);
6296 /* Free all Tx buffers */
1da177e4 6297 free_tx_buffers(sp);
7ba013ac
K
6298 spin_unlock_irqrestore(&sp->tx_lock, flags);
6299
6300 /* Free all Rx buffers */
6301 spin_lock_irqsave(&sp->rx_lock, flags);
1da177e4 6302 free_rx_buffers(sp);
7ba013ac 6303 spin_unlock_irqrestore(&sp->rx_lock, flags);
1da177e4 6304
1da177e4
LT
6305 clear_bit(0, &(sp->link_state));
6306}
6307
1ee6dd77 6308static int s2io_card_up(struct s2io_nic * sp)
1da177e4 6309{
cc6e7c44 6310 int i, ret = 0;
1ee6dd77 6311 struct mac_info *mac_control;
1da177e4
LT
6312 struct config_param *config;
6313 struct net_device *dev = (struct net_device *) sp->dev;
e6a8fee2 6314 u16 interruptible;
1da177e4
LT
6315
6316 /* Initialize the H/W I/O registers */
6317 if (init_nic(sp) != 0) {
6318 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6319 dev->name);
e6a8fee2 6320 s2io_reset(sp);
1da177e4
LT
6321 return -ENODEV;
6322 }
6323
20346722
K
6324 /*
6325 * Initializing the Rx buffers. For now we are considering only 1
1da177e4
LT
6326 * Rx ring and initializing buffers into 30 Rx blocks
6327 */
6328 mac_control = &sp->mac_control;
6329 config = &sp->config;
6330
6331 for (i = 0; i < config->rx_ring_num; i++) {
6332 if ((ret = fill_rx_buffers(sp, i))) {
6333 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6334 dev->name);
6335 s2io_reset(sp);
6336 free_rx_buffers(sp);
6337 return -ENOMEM;
6338 }
6339 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6340 atomic_read(&sp->rx_bufs_left[i]));
6341 }
19a60522
SS
6342 /* Maintain the state prior to the open */
6343 if (sp->promisc_flg)
6344 sp->promisc_flg = 0;
6345 if (sp->m_cast_flg) {
6346 sp->m_cast_flg = 0;
6347 sp->all_multi_pos= 0;
6348 }
1da177e4
LT
6349
6350 /* Setting its receive mode */
6351 s2io_set_multicast(dev);
6352
7d3d0439 6353 if (sp->lro) {
b41477f3 6354 /* Initialize max aggregatable pkts per session based on MTU */
7d3d0439
RA
6355 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6356 /* Check if we can use(if specified) user provided value */
6357 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6358 sp->lro_max_aggr_per_sess = lro_max_pkts;
6359 }
6360
1da177e4
LT
6361 /* Enable Rx Traffic and interrupts on the NIC */
6362 if (start_nic(sp)) {
6363 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
1da177e4 6364 s2io_reset(sp);
e6a8fee2
AR
6365 free_rx_buffers(sp);
6366 return -ENODEV;
6367 }
6368
6369 /* Add interrupt service routine */
6370 if (s2io_add_isr(sp) != 0) {
6371 if (sp->intr_type == MSI_X)
6372 s2io_rem_isr(sp);
6373 s2io_reset(sp);
1da177e4
LT
6374 free_rx_buffers(sp);
6375 return -ENODEV;
6376 }
6377
25fff88e
K
6378 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6379
e6a8fee2
AR
6380 /* Enable tasklet for the device */
6381 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6382
6383 /* Enable select interrupts */
6384 if (sp->intr_type != INTA)
6385 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6386 else {
6387 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6388 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
6389 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
6390 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6391 }
6392
6393
1da177e4
LT
6394 atomic_set(&sp->card_state, CARD_UP);
6395 return 0;
6396}
6397
20346722 6398/**
1da177e4
LT
6399 * s2io_restart_nic - Resets the NIC.
6400 * @data : long pointer to the device private structure
6401 * Description:
6402 * This function is scheduled to be run by the s2io_tx_watchdog
20346722 6403 * function after 0.5 secs to reset the NIC. The idea is to reduce
1da177e4
LT
6404 * the run time of the watch dog routine which is run holding a
6405 * spin lock.
6406 */
6407
c4028958 6408static void s2io_restart_nic(struct work_struct *work)
1da177e4 6409{
1ee6dd77 6410 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
c4028958 6411 struct net_device *dev = sp->dev;
1da177e4 6412
22747d6b
FR
6413 rtnl_lock();
6414
6415 if (!netif_running(dev))
6416 goto out_unlock;
6417
e6a8fee2 6418 s2io_card_down(sp);
1da177e4
LT
6419 if (s2io_card_up(sp)) {
6420 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6421 dev->name);
6422 }
6423 netif_wake_queue(dev);
6424 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6425 dev->name);
22747d6b
FR
6426out_unlock:
6427 rtnl_unlock();
1da177e4
LT
6428}
6429
20346722
K
6430/**
6431 * s2io_tx_watchdog - Watchdog for transmit side.
1da177e4
LT
6432 * @dev : Pointer to net device structure
6433 * Description:
6434 * This function is triggered if the Tx Queue is stopped
6435 * for a pre-defined amount of time when the Interface is still up.
6436 * If the Interface is jammed in such a situation, the hardware is
6437 * reset (by s2io_close) and restarted again (by s2io_open) to
6438 * overcome any problem that might have been caused in the hardware.
6439 * Return value:
6440 * void
6441 */
6442
6443static void s2io_tx_watchdog(struct net_device *dev)
6444{
1ee6dd77 6445 struct s2io_nic *sp = dev->priv;
1da177e4
LT
6446
6447 if (netif_carrier_ok(dev)) {
6448 schedule_work(&sp->rst_timer_task);
bd1034f0 6449 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
1da177e4
LT
6450 }
6451}
6452
6453/**
6454 * rx_osm_handler - To perform some OS related operations on SKB.
6455 * @sp: private member of the device structure,pointer to s2io_nic structure.
6456 * @skb : the socket buffer pointer.
6457 * @len : length of the packet
6458 * @cksum : FCS checksum of the frame.
6459 * @ring_no : the ring from which this RxD was extracted.
20346722 6460 * Description:
b41477f3 6461 * This function is called by the Rx interrupt serivce routine to perform
1da177e4
LT
6462 * some OS related operations on the SKB before passing it to the upper
6463 * layers. It mainly checks if the checksum is OK, if so adds it to the
6464 * SKBs cksum variable, increments the Rx packet count and passes the SKB
6465 * to the upper layer. If the checksum is wrong, it increments the Rx
6466 * packet error count, frees the SKB and returns error.
6467 * Return value:
6468 * SUCCESS on success and -1 on failure.
6469 */
1ee6dd77 6470static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
1da177e4 6471{
1ee6dd77 6472 struct s2io_nic *sp = ring_data->nic;
1da177e4 6473 struct net_device *dev = (struct net_device *) sp->dev;
20346722
K
6474 struct sk_buff *skb = (struct sk_buff *)
6475 ((unsigned long) rxdp->Host_Control);
6476 int ring_no = ring_data->ring_no;
1da177e4 6477 u16 l3_csum, l4_csum;
863c11a9 6478 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
1ee6dd77 6479 struct lro *lro;
da6971d8 6480
20346722 6481 skb->dev = dev;
c92ca04b 6482
863c11a9 6483 if (err) {
bd1034f0
AR
6484 /* Check for parity error */
6485 if (err & 0x1) {
6486 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
6487 }
6488
863c11a9
AR
6489 /*
6490 * Drop the packet if bad transfer code. Exception being
6491 * 0x5, which could be due to unsupported IPv6 extension header.
6492 * In this case, we let stack handle the packet.
6493 * Note that in this case, since checksum will be incorrect,
6494 * stack will validate the same.
6495 */
6496 if (err && ((err >> 48) != 0x5)) {
6497 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
6498 dev->name, err);
6499 sp->stats.rx_crc_errors++;
6500 dev_kfree_skb(skb);
6501 atomic_dec(&sp->rx_bufs_left[ring_no]);
6502 rxdp->Host_Control = 0;
6503 return 0;
6504 }
20346722 6505 }
1da177e4 6506
20346722
K
6507 /* Updating statistics */
6508 rxdp->Host_Control = 0;
6509 sp->rx_pkt_count++;
6510 sp->stats.rx_packets++;
da6971d8
AR
6511 if (sp->rxd_mode == RXD_MODE_1) {
6512 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
20346722 6513
da6971d8
AR
6514 sp->stats.rx_bytes += len;
6515 skb_put(skb, len);
6516
6517 } else if (sp->rxd_mode >= RXD_MODE_3A) {
6518 int get_block = ring_data->rx_curr_get_info.block_index;
6519 int get_off = ring_data->rx_curr_get_info.offset;
6520 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
6521 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6522 unsigned char *buff = skb_push(skb, buf0_len);
6523
1ee6dd77 6524 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
da6971d8
AR
6525 sp->stats.rx_bytes += buf0_len + buf2_len;
6526 memcpy(buff, ba->ba_0, buf0_len);
6527
6528 if (sp->rxd_mode == RXD_MODE_3A) {
6529 int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
6530
6531 skb_put(skb, buf1_len);
6532 skb->len += buf2_len;
6533 skb->data_len += buf2_len;
da6971d8
AR
6534 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
6535 sp->stats.rx_bytes += buf1_len;
6536
6537 } else
6538 skb_put(skb, buf2_len);
6539 }
20346722 6540
7d3d0439
RA
6541 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
6542 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
20346722
K
6543 (sp->rx_csum)) {
6544 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
1da177e4
LT
6545 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
6546 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
20346722 6547 /*
1da177e4
LT
6548 * NIC verifies if the Checksum of the received
6549 * frame is Ok or not and accordingly returns
6550 * a flag in the RxD.
6551 */
6552 skb->ip_summed = CHECKSUM_UNNECESSARY;
7d3d0439
RA
6553 if (sp->lro) {
6554 u32 tcp_len;
6555 u8 *tcp;
6556 int ret = 0;
6557
6558 ret = s2io_club_tcp_session(skb->data, &tcp,
6559 &tcp_len, &lro, rxdp, sp);
6560 switch (ret) {
6561 case 3: /* Begin anew */
6562 lro->parent = skb;
6563 goto aggregate;
6564 case 1: /* Aggregate */
6565 {
6566 lro_append_pkt(sp, lro,
6567 skb, tcp_len);
6568 goto aggregate;
6569 }
6570 case 4: /* Flush session */
6571 {
6572 lro_append_pkt(sp, lro,
6573 skb, tcp_len);
6574 queue_rx_frame(lro->parent);
6575 clear_lro_session(lro);
6576 sp->mac_control.stats_info->
6577 sw_stat.flush_max_pkts++;
6578 goto aggregate;
6579 }
6580 case 2: /* Flush both */
6581 lro->parent->data_len =
6582 lro->frags_len;
6583 sp->mac_control.stats_info->
6584 sw_stat.sending_both++;
6585 queue_rx_frame(lro->parent);
6586 clear_lro_session(lro);
6587 goto send_up;
6588 case 0: /* sessions exceeded */
c92ca04b
AR
6589 case -1: /* non-TCP or not
6590 * L2 aggregatable
6591 */
7d3d0439
RA
6592 case 5: /*
6593 * First pkt in session not
6594 * L3/L4 aggregatable
6595 */
6596 break;
6597 default:
6598 DBG_PRINT(ERR_DBG,
6599 "%s: Samadhana!!\n",
6600 __FUNCTION__);
6601 BUG();
6602 }
6603 }
1da177e4 6604 } else {
20346722
K
6605 /*
6606 * Packet with erroneous checksum, let the
1da177e4
LT
6607 * upper layers deal with it.
6608 */
6609 skb->ip_summed = CHECKSUM_NONE;
6610 }
6611 } else {
6612 skb->ip_summed = CHECKSUM_NONE;
6613 }
6614
7d3d0439
RA
6615 if (!sp->lro) {
6616 skb->protocol = eth_type_trans(skb, dev);
7d3d0439
RA
6617 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
6618 /* Queueing the vlan frame to the upper layer */
db874e65
SS
6619 if (napi)
6620 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6621 RXD_GET_VLAN_TAG(rxdp->Control_2));
6622 else
6623 vlan_hwaccel_rx(skb, sp->vlgrp,
6624 RXD_GET_VLAN_TAG(rxdp->Control_2));
7d3d0439 6625 } else {
db874e65
SS
6626 if (napi)
6627 netif_receive_skb(skb);
6628 else
6629 netif_rx(skb);
7d3d0439 6630 }
7d3d0439
RA
6631 } else {
6632send_up:
6633 queue_rx_frame(skb);
6aa20a22 6634 }
1da177e4 6635 dev->last_rx = jiffies;
7d3d0439 6636aggregate:
1da177e4 6637 atomic_dec(&sp->rx_bufs_left[ring_no]);
1da177e4
LT
6638 return SUCCESS;
6639}
6640
6641/**
6642 * s2io_link - stops/starts the Tx queue.
6643 * @sp : private member of the device structure, which is a pointer to the
6644 * s2io_nic structure.
6645 * @link : inidicates whether link is UP/DOWN.
6646 * Description:
6647 * This function stops/starts the Tx queue depending on whether the link
20346722
K
6648 * status of the NIC is is down or up. This is called by the Alarm
6649 * interrupt handler whenever a link change interrupt comes up.
1da177e4
LT
6650 * Return value:
6651 * void.
6652 */
6653
1ee6dd77 6654static void s2io_link(struct s2io_nic * sp, int link)
1da177e4
LT
6655{
6656 struct net_device *dev = (struct net_device *) sp->dev;
6657
6658 if (link != sp->last_link_state) {
6659 if (link == LINK_DOWN) {
6660 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
6661 netif_carrier_off(dev);
6662 } else {
6663 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
6664 netif_carrier_on(dev);
6665 }
6666 }
6667 sp->last_link_state = link;
6668}
6669
6670/**
20346722
K
6671 * get_xena_rev_id - to identify revision ID of xena.
6672 * @pdev : PCI Dev structure
6673 * Description:
6674 * Function to identify the Revision ID of xena.
6675 * Return value:
6676 * returns the revision ID of the device.
6677 */
6678
26df54bf 6679static int get_xena_rev_id(struct pci_dev *pdev)
20346722
K
6680{
6681 u8 id = 0;
6682 int ret;
6683 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
6684 return id;
6685}
6686
6687/**
6688 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
6689 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
6690 * s2io_nic structure.
6691 * Description:
6692 * This function initializes a few of the PCI and PCI-X configuration registers
6693 * with recommended values.
6694 * Return value:
6695 * void
6696 */
6697
1ee6dd77 6698static void s2io_init_pci(struct s2io_nic * sp)
1da177e4 6699{
20346722 6700 u16 pci_cmd = 0, pcix_cmd = 0;
1da177e4
LT
6701
6702 /* Enable Data Parity Error Recovery in PCI-X command register. */
6703 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 6704 &(pcix_cmd));
1da177e4 6705 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 6706 (pcix_cmd | 1));
1da177e4 6707 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 6708 &(pcix_cmd));
1da177e4
LT
6709
6710 /* Set the PErr Response bit in PCI command register. */
6711 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6712 pci_write_config_word(sp->pdev, PCI_COMMAND,
6713 (pci_cmd | PCI_COMMAND_PARITY));
6714 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
1da177e4
LT
6715}
6716
9dc737a7
AR
6717static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6718{
6719 if ( tx_fifo_num > 8) {
6720 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
6721 "supported\n");
6722 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
6723 tx_fifo_num = 8;
6724 }
6725 if ( rx_ring_num > 8) {
6726 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
6727 "supported\n");
6728 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
6729 rx_ring_num = 8;
6730 }
db874e65
SS
6731 if (*dev_intr_type != INTA)
6732 napi = 0;
6733
9dc737a7
AR
6734#ifndef CONFIG_PCI_MSI
6735 if (*dev_intr_type != INTA) {
6736 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
6737 "MSI/MSI-X. Defaulting to INTA\n");
6738 *dev_intr_type = INTA;
6739 }
6740#else
6741 if (*dev_intr_type > MSI_X) {
6742 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
6743 "Defaulting to INTA\n");
6744 *dev_intr_type = INTA;
6745 }
6746#endif
6747 if ((*dev_intr_type == MSI_X) &&
6748 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
6749 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
6aa20a22 6750 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
9dc737a7
AR
6751 "Defaulting to INTA\n");
6752 *dev_intr_type = INTA;
6753 }
fb6a825b 6754
9dc737a7
AR
6755 if (rx_ring_mode > 3) {
6756 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6757 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
6758 rx_ring_mode = 3;
6759 }
6760 return SUCCESS;
6761}
6762
9fc93a41
SS
6763/**
6764 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
6765 * or Traffic class respectively.
6766 * @nic: device peivate variable
6767 * Description: The function configures the receive steering to
6768 * desired receive ring.
6769 * Return Value: SUCCESS on success and
6770 * '-1' on failure (endian settings incorrect).
6771 */
6772static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
6773{
6774 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6775 register u64 val64 = 0;
6776
6777 if (ds_codepoint > 63)
6778 return FAILURE;
6779
6780 val64 = RTS_DS_MEM_DATA(ring);
6781 writeq(val64, &bar0->rts_ds_mem_data);
6782
6783 val64 = RTS_DS_MEM_CTRL_WE |
6784 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
6785 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
6786
6787 writeq(val64, &bar0->rts_ds_mem_ctrl);
6788
6789 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
6790 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
6791 S2IO_BIT_RESET);
6792}
6793
1da177e4 6794/**
20346722 6795 * s2io_init_nic - Initialization of the adapter .
1da177e4
LT
6796 * @pdev : structure containing the PCI related information of the device.
6797 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
6798 * Description:
6799 * The function initializes an adapter identified by the pci_dec structure.
20346722
K
6800 * All OS related initialization including memory and device structure and
6801 * initlaization of the device private variable is done. Also the swapper
6802 * control register is initialized to enable read and write into the I/O
1da177e4
LT
6803 * registers of the device.
6804 * Return value:
6805 * returns 0 on success and negative on failure.
6806 */
6807
6808static int __devinit
6809s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6810{
1ee6dd77 6811 struct s2io_nic *sp;
1da177e4 6812 struct net_device *dev;
1da177e4
LT
6813 int i, j, ret;
6814 int dma_flag = FALSE;
6815 u32 mac_up, mac_down;
6816 u64 val64 = 0, tmp64 = 0;
1ee6dd77 6817 struct XENA_dev_config __iomem *bar0 = NULL;
1da177e4 6818 u16 subid;
1ee6dd77 6819 struct mac_info *mac_control;
1da177e4 6820 struct config_param *config;
541ae68f 6821 int mode;
cc6e7c44 6822 u8 dev_intr_type = intr_type;
1da177e4 6823
9dc737a7
AR
6824 if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
6825 return ret;
1da177e4
LT
6826
6827 if ((ret = pci_enable_device(pdev))) {
6828 DBG_PRINT(ERR_DBG,
6829 "s2io_init_nic: pci_enable_device failed\n");
6830 return ret;
6831 }
6832
1e7f0bd8 6833 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1da177e4
LT
6834 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
6835 dma_flag = TRUE;
1da177e4 6836 if (pci_set_consistent_dma_mask
1e7f0bd8 6837 (pdev, DMA_64BIT_MASK)) {
1da177e4
LT
6838 DBG_PRINT(ERR_DBG,
6839 "Unable to obtain 64bit DMA for \
6840 consistent allocations\n");
6841 pci_disable_device(pdev);
6842 return -ENOMEM;
6843 }
1e7f0bd8 6844 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
1da177e4
LT
6845 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
6846 } else {
6847 pci_disable_device(pdev);
6848 return -ENOMEM;
6849 }
cc6e7c44
RA
6850 if (dev_intr_type != MSI_X) {
6851 if (pci_request_regions(pdev, s2io_driver_name)) {
b41477f3
AR
6852 DBG_PRINT(ERR_DBG, "Request Regions failed\n");
6853 pci_disable_device(pdev);
cc6e7c44
RA
6854 return -ENODEV;
6855 }
6856 }
6857 else {
6858 if (!(request_mem_region(pci_resource_start(pdev, 0),
6859 pci_resource_len(pdev, 0), s2io_driver_name))) {
6860 DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
6861 pci_disable_device(pdev);
6862 return -ENODEV;
6863 }
6864 if (!(request_mem_region(pci_resource_start(pdev, 2),
6865 pci_resource_len(pdev, 2), s2io_driver_name))) {
6866 DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
6867 release_mem_region(pci_resource_start(pdev, 0),
6868 pci_resource_len(pdev, 0));
6869 pci_disable_device(pdev);
6870 return -ENODEV;
6871 }
1da177e4
LT
6872 }
6873
1ee6dd77 6874 dev = alloc_etherdev(sizeof(struct s2io_nic));
1da177e4
LT
6875 if (dev == NULL) {
6876 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
6877 pci_disable_device(pdev);
6878 pci_release_regions(pdev);
6879 return -ENODEV;
6880 }
6881
6882 pci_set_master(pdev);
6883 pci_set_drvdata(pdev, dev);
6884 SET_MODULE_OWNER(dev);
6885 SET_NETDEV_DEV(dev, &pdev->dev);
6886
6887 /* Private member variable initialized to s2io NIC structure */
6888 sp = dev->priv;
1ee6dd77 6889 memset(sp, 0, sizeof(struct s2io_nic));
1da177e4
LT
6890 sp->dev = dev;
6891 sp->pdev = pdev;
1da177e4 6892 sp->high_dma_flag = dma_flag;
1da177e4 6893 sp->device_enabled_once = FALSE;
da6971d8
AR
6894 if (rx_ring_mode == 1)
6895 sp->rxd_mode = RXD_MODE_1;
6896 if (rx_ring_mode == 2)
6897 sp->rxd_mode = RXD_MODE_3B;
6898 if (rx_ring_mode == 3)
6899 sp->rxd_mode = RXD_MODE_3A;
6900
cc6e7c44 6901 sp->intr_type = dev_intr_type;
1da177e4 6902
541ae68f
K
6903 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
6904 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
6905 sp->device_type = XFRAME_II_DEVICE;
6906 else
6907 sp->device_type = XFRAME_I_DEVICE;
6908
7d3d0439 6909 sp->lro = lro;
6aa20a22 6910
1da177e4
LT
6911 /* Initialize some PCI/PCI-X fields of the NIC. */
6912 s2io_init_pci(sp);
6913
20346722 6914 /*
1da177e4 6915 * Setting the device configuration parameters.
20346722
K
6916 * Most of these parameters can be specified by the user during
6917 * module insertion as they are module loadable parameters. If
6918 * these parameters are not not specified during load time, they
1da177e4
LT
6919 * are initialized with default values.
6920 */
6921 mac_control = &sp->mac_control;
6922 config = &sp->config;
6923
6924 /* Tx side parameters. */
1da177e4
LT
6925 config->tx_fifo_num = tx_fifo_num;
6926 for (i = 0; i < MAX_TX_FIFOS; i++) {
6927 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
6928 config->tx_cfg[i].fifo_priority = i;
6929 }
6930
20346722
K
6931 /* mapping the QoS priority to the configured fifos */
6932 for (i = 0; i < MAX_TX_FIFOS; i++)
6933 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
6934
1da177e4
LT
6935 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
6936 for (i = 0; i < config->tx_fifo_num; i++) {
6937 config->tx_cfg[i].f_no_snoop =
6938 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
6939 if (config->tx_cfg[i].fifo_len < 65) {
6940 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
6941 break;
6942 }
6943 }
fed5eccd
AR
6944 /* + 2 because one Txd for skb->data and one Txd for UFO */
6945 config->max_txds = MAX_SKB_FRAGS + 2;
1da177e4
LT
6946
6947 /* Rx side parameters. */
1da177e4
LT
6948 config->rx_ring_num = rx_ring_num;
6949 for (i = 0; i < MAX_RX_RINGS; i++) {
6950 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
da6971d8 6951 (rxd_count[sp->rxd_mode] + 1);
1da177e4
LT
6952 config->rx_cfg[i].ring_priority = i;
6953 }
6954
6955 for (i = 0; i < rx_ring_num; i++) {
6956 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
6957 config->rx_cfg[i].f_no_snoop =
6958 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
6959 }
6960
6961 /* Setting Mac Control parameters */
6962 mac_control->rmac_pause_time = rmac_pause_time;
6963 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
6964 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
6965
6966
6967 /* Initialize Ring buffer parameters. */
6968 for (i = 0; i < config->rx_ring_num; i++)
6969 atomic_set(&sp->rx_bufs_left[i], 0);
6970
7ba013ac
K
6971 /* Initialize the number of ISRs currently running */
6972 atomic_set(&sp->isr_cnt, 0);
6973
1da177e4
LT
6974 /* initialize the shared memory used by the NIC and the host */
6975 if (init_shared_mem(sp)) {
6976 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
b41477f3 6977 dev->name);
1da177e4
LT
6978 ret = -ENOMEM;
6979 goto mem_alloc_failed;
6980 }
6981
6982 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
6983 pci_resource_len(pdev, 0));
6984 if (!sp->bar0) {
19a60522 6985 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
1da177e4
LT
6986 dev->name);
6987 ret = -ENOMEM;
6988 goto bar0_remap_failed;
6989 }
6990
6991 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
6992 pci_resource_len(pdev, 2));
6993 if (!sp->bar1) {
19a60522 6994 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
1da177e4
LT
6995 dev->name);
6996 ret = -ENOMEM;
6997 goto bar1_remap_failed;
6998 }
6999
7000 dev->irq = pdev->irq;
7001 dev->base_addr = (unsigned long) sp->bar0;
7002
7003 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7004 for (j = 0; j < MAX_TX_FIFOS; j++) {
1ee6dd77 7005 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
1da177e4
LT
7006 (sp->bar1 + (j * 0x00020000));
7007 }
7008
7009 /* Driver entry points */
7010 dev->open = &s2io_open;
7011 dev->stop = &s2io_close;
7012 dev->hard_start_xmit = &s2io_xmit;
7013 dev->get_stats = &s2io_get_stats;
7014 dev->set_multicast_list = &s2io_set_multicast;
7015 dev->do_ioctl = &s2io_ioctl;
7016 dev->change_mtu = &s2io_change_mtu;
7017 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
be3a6b02
K
7018 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7019 dev->vlan_rx_register = s2io_vlan_rx_register;
7020 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
20346722 7021
1da177e4
LT
7022 /*
7023 * will use eth_mac_addr() for dev->set_mac_address
7024 * mac address will be set every time dev->open() is called
7025 */
1da177e4 7026 dev->poll = s2io_poll;
20346722 7027 dev->weight = 32;
1da177e4 7028
612eff0e
BH
7029#ifdef CONFIG_NET_POLL_CONTROLLER
7030 dev->poll_controller = s2io_netpoll;
7031#endif
7032
1da177e4
LT
7033 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7034 if (sp->high_dma_flag == TRUE)
7035 dev->features |= NETIF_F_HIGHDMA;
1da177e4 7036 dev->features |= NETIF_F_TSO;
f83ef8c0 7037 dev->features |= NETIF_F_TSO6;
db874e65 7038 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
fed5eccd
AR
7039 dev->features |= NETIF_F_UFO;
7040 dev->features |= NETIF_F_HW_CSUM;
7041 }
1da177e4
LT
7042
7043 dev->tx_timeout = &s2io_tx_watchdog;
7044 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
c4028958
DH
7045 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7046 INIT_WORK(&sp->set_link_task, s2io_set_link);
1da177e4 7047
e960fc5c 7048 pci_save_state(sp->pdev);
1da177e4
LT
7049
7050 /* Setting swapper control on the NIC, for proper reset operation */
7051 if (s2io_set_swapper(sp)) {
7052 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7053 dev->name);
7054 ret = -EAGAIN;
7055 goto set_swap_failed;
7056 }
7057
541ae68f
K
7058 /* Verify if the Herc works on the slot its placed into */
7059 if (sp->device_type & XFRAME_II_DEVICE) {
7060 mode = s2io_verify_pci_mode(sp);
7061 if (mode < 0) {
7062 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7063 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7064 ret = -EBADSLT;
7065 goto set_swap_failed;
7066 }
7067 }
7068
7069 /* Not needed for Herc */
7070 if (sp->device_type & XFRAME_I_DEVICE) {
7071 /*
7072 * Fix for all "FFs" MAC address problems observed on
7073 * Alpha platforms
7074 */
7075 fix_mac_address(sp);
7076 s2io_reset(sp);
7077 }
1da177e4
LT
7078
7079 /*
1da177e4
LT
7080 * MAC address initialization.
7081 * For now only one mac address will be read and used.
7082 */
7083 bar0 = sp->bar0;
7084 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7085 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7086 writeq(val64, &bar0->rmac_addr_cmd_mem);
c92ca04b 7087 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41 7088 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
1da177e4
LT
7089 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7090 mac_down = (u32) tmp64;
7091 mac_up = (u32) (tmp64 >> 32);
7092
7093 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
7094
7095 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7096 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7097 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7098 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7099 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7100 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7101
1da177e4
LT
7102 /* Set the factory defined MAC address initially */
7103 dev->addr_len = ETH_ALEN;
7104 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7105
b41477f3
AR
7106 /* reset Nic and bring it to known state */
7107 s2io_reset(sp);
7108
1da177e4 7109 /*
20346722 7110 * Initialize the tasklet status and link state flags
541ae68f 7111 * and the card state parameter
1da177e4
LT
7112 */
7113 atomic_set(&(sp->card_state), 0);
7114 sp->tasklet_status = 0;
7115 sp->link_state = 0;
7116
1da177e4
LT
7117 /* Initialize spinlocks */
7118 spin_lock_init(&sp->tx_lock);
db874e65
SS
7119
7120 if (!napi)
7121 spin_lock_init(&sp->put_lock);
7ba013ac 7122 spin_lock_init(&sp->rx_lock);
1da177e4 7123
20346722
K
7124 /*
7125 * SXE-002: Configure link and activity LED to init state
7126 * on driver load.
1da177e4
LT
7127 */
7128 subid = sp->pdev->subsystem_device;
7129 if ((subid & 0xFF) >= 0x07) {
7130 val64 = readq(&bar0->gpio_control);
7131 val64 |= 0x0000800000000000ULL;
7132 writeq(val64, &bar0->gpio_control);
7133 val64 = 0x0411040400000000ULL;
7134 writeq(val64, (void __iomem *) bar0 + 0x2700);
7135 val64 = readq(&bar0->gpio_control);
7136 }
7137
7138 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
7139
7140 if (register_netdev(dev)) {
7141 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7142 ret = -ENODEV;
7143 goto register_failed;
7144 }
9dc737a7 7145 s2io_vpd_read(sp);
9dc737a7 7146 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n");
b41477f3
AR
7147 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7148 sp->product_name, get_xena_rev_id(sp->pdev));
7149 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7150 s2io_driver_version);
9dc737a7 7151 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
19a60522 7152 "%02x:%02x:%02x:%02x:%02x:%02x", dev->name,
541ae68f
K
7153 sp->def_mac_addr[0].mac_addr[0],
7154 sp->def_mac_addr[0].mac_addr[1],
7155 sp->def_mac_addr[0].mac_addr[2],
7156 sp->def_mac_addr[0].mac_addr[3],
7157 sp->def_mac_addr[0].mac_addr[4],
7158 sp->def_mac_addr[0].mac_addr[5]);
19a60522 7159 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
9dc737a7 7160 if (sp->device_type & XFRAME_II_DEVICE) {
0b1f7ebe 7161 mode = s2io_print_pci_mode(sp);
541ae68f 7162 if (mode < 0) {
9dc737a7 7163 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
541ae68f 7164 ret = -EBADSLT;
9dc737a7 7165 unregister_netdev(dev);
541ae68f
K
7166 goto set_swap_failed;
7167 }
541ae68f 7168 }
9dc737a7
AR
7169 switch(sp->rxd_mode) {
7170 case RXD_MODE_1:
7171 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7172 dev->name);
7173 break;
7174 case RXD_MODE_3B:
7175 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7176 dev->name);
7177 break;
7178 case RXD_MODE_3A:
7179 DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n",
7180 dev->name);
7181 break;
7182 }
db874e65
SS
7183
7184 if (napi)
7185 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
9dc737a7
AR
7186 switch(sp->intr_type) {
7187 case INTA:
7188 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7189 break;
7190 case MSI:
7191 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name);
7192 break;
7193 case MSI_X:
7194 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7195 break;
7196 }
7d3d0439
RA
7197 if (sp->lro)
7198 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
9dc737a7 7199 dev->name);
db874e65
SS
7200 if (ufo)
7201 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7202 " enabled\n", dev->name);
7ba013ac 7203 /* Initialize device name */
9dc737a7 7204 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7ba013ac 7205
b6e3f982
K
7206 /* Initialize bimodal Interrupts */
7207 sp->config.bimodal = bimodal;
7208 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7209 sp->config.bimodal = 0;
7210 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7211 dev->name);
7212 }
7213
20346722
K
7214 /*
7215 * Make Link state as off at this point, when the Link change
7216 * interrupt comes the state will be automatically changed to
1da177e4
LT
7217 * the right state.
7218 */
7219 netif_carrier_off(dev);
1da177e4
LT
7220
7221 return 0;
7222
7223 register_failed:
7224 set_swap_failed:
7225 iounmap(sp->bar1);
7226 bar1_remap_failed:
7227 iounmap(sp->bar0);
7228 bar0_remap_failed:
7229 mem_alloc_failed:
7230 free_shared_mem(sp);
7231 pci_disable_device(pdev);
cc6e7c44
RA
7232 if (dev_intr_type != MSI_X)
7233 pci_release_regions(pdev);
7234 else {
7235 release_mem_region(pci_resource_start(pdev, 0),
7236 pci_resource_len(pdev, 0));
7237 release_mem_region(pci_resource_start(pdev, 2),
7238 pci_resource_len(pdev, 2));
7239 }
1da177e4
LT
7240 pci_set_drvdata(pdev, NULL);
7241 free_netdev(dev);
7242
7243 return ret;
7244}
7245
7246/**
20346722 7247 * s2io_rem_nic - Free the PCI device
1da177e4 7248 * @pdev: structure containing the PCI related information of the device.
20346722 7249 * Description: This function is called by the Pci subsystem to release a
1da177e4 7250 * PCI device and free up all resource held up by the device. This could
20346722 7251 * be in response to a Hot plug event or when the driver is to be removed
1da177e4
LT
7252 * from memory.
7253 */
7254
7255static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7256{
7257 struct net_device *dev =
7258 (struct net_device *) pci_get_drvdata(pdev);
1ee6dd77 7259 struct s2io_nic *sp;
1da177e4
LT
7260
7261 if (dev == NULL) {
7262 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7263 return;
7264 }
7265
22747d6b
FR
7266 flush_scheduled_work();
7267
1da177e4
LT
7268 sp = dev->priv;
7269 unregister_netdev(dev);
7270
7271 free_shared_mem(sp);
7272 iounmap(sp->bar0);
7273 iounmap(sp->bar1);
cc6e7c44
RA
7274 if (sp->intr_type != MSI_X)
7275 pci_release_regions(pdev);
7276 else {
7277 release_mem_region(pci_resource_start(pdev, 0),
7278 pci_resource_len(pdev, 0));
7279 release_mem_region(pci_resource_start(pdev, 2),
7280 pci_resource_len(pdev, 2));
7281 }
1da177e4 7282 pci_set_drvdata(pdev, NULL);
1da177e4 7283 free_netdev(dev);
19a60522 7284 pci_disable_device(pdev);
1da177e4
LT
7285}
7286
7287/**
7288 * s2io_starter - Entry point for the driver
7289 * Description: This function is the entry point for the driver. It verifies
7290 * the module loadable parameters and initializes PCI configuration space.
7291 */
7292
7293int __init s2io_starter(void)
7294{
29917620 7295 return pci_register_driver(&s2io_driver);
1da177e4
LT
7296}
7297
7298/**
20346722 7299 * s2io_closer - Cleanup routine for the driver
1da177e4
LT
7300 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7301 */
7302
372cc597 7303static __exit void s2io_closer(void)
1da177e4
LT
7304{
7305 pci_unregister_driver(&s2io_driver);
7306 DBG_PRINT(INIT_DBG, "cleanup done\n");
7307}
7308
7309module_init(s2io_starter);
7310module_exit(s2io_closer);
7d3d0439 7311
6aa20a22 7312static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
1ee6dd77 7313 struct tcphdr **tcp, struct RxD_t *rxdp)
7d3d0439
RA
7314{
7315 int ip_off;
7316 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7317
7318 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7319 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7320 __FUNCTION__);
7321 return -1;
7322 }
7323
7324 /* TODO:
7325 * By default the VLAN field in the MAC is stripped by the card, if this
7326 * feature is turned off in rx_pa_cfg register, then the ip_off field
7327 * has to be shifted by a further 2 bytes
7328 */
7329 switch (l2_type) {
7330 case 0: /* DIX type */
7331 case 4: /* DIX type with VLAN */
7332 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7333 break;
7334 /* LLC, SNAP etc are considered non-mergeable */
7335 default:
7336 return -1;
7337 }
7338
7339 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7340 ip_len = (u8)((*ip)->ihl);
7341 ip_len <<= 2;
7342 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7343
7344 return 0;
7345}
7346
1ee6dd77 7347static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7d3d0439
RA
7348 struct tcphdr *tcp)
7349{
7350 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7351 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7352 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7353 return -1;
7354 return 0;
7355}
7356
7357static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7358{
7359 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7360}
7361
1ee6dd77 7362static void initiate_new_session(struct lro *lro, u8 *l2h,
7d3d0439
RA
7363 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7364{
7365 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7366 lro->l2h = l2h;
7367 lro->iph = ip;
7368 lro->tcph = tcp;
7369 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7370 lro->tcp_ack = ntohl(tcp->ack_seq);
7371 lro->sg_num = 1;
7372 lro->total_len = ntohs(ip->tot_len);
7373 lro->frags_len = 0;
6aa20a22 7374 /*
7d3d0439
RA
7375 * check if we saw TCP timestamp. Other consistency checks have
7376 * already been done.
7377 */
7378 if (tcp->doff == 8) {
7379 u32 *ptr;
7380 ptr = (u32 *)(tcp+1);
7381 lro->saw_ts = 1;
7382 lro->cur_tsval = *(ptr+1);
7383 lro->cur_tsecr = *(ptr+2);
7384 }
7385 lro->in_use = 1;
7386}
7387
1ee6dd77 7388static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7d3d0439
RA
7389{
7390 struct iphdr *ip = lro->iph;
7391 struct tcphdr *tcp = lro->tcph;
bd4f3ae1 7392 __sum16 nchk;
1ee6dd77 7393 struct stat_block *statinfo = sp->mac_control.stats_info;
7d3d0439
RA
7394 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7395
7396 /* Update L3 header */
7397 ip->tot_len = htons(lro->total_len);
7398 ip->check = 0;
7399 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7400 ip->check = nchk;
7401
7402 /* Update L4 header */
7403 tcp->ack_seq = lro->tcp_ack;
7404 tcp->window = lro->window;
7405
7406 /* Update tsecr field if this session has timestamps enabled */
7407 if (lro->saw_ts) {
7408 u32 *ptr = (u32 *)(tcp + 1);
7409 *(ptr+2) = lro->cur_tsecr;
7410 }
7411
7412 /* Update counters required for calculation of
7413 * average no. of packets aggregated.
7414 */
7415 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7416 statinfo->sw_stat.num_aggregations++;
7417}
7418
1ee6dd77 7419static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7d3d0439
RA
7420 struct tcphdr *tcp, u32 l4_pyld)
7421{
7422 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7423 lro->total_len += l4_pyld;
7424 lro->frags_len += l4_pyld;
7425 lro->tcp_next_seq += l4_pyld;
7426 lro->sg_num++;
7427
7428 /* Update ack seq no. and window ad(from this pkt) in LRO object */
7429 lro->tcp_ack = tcp->ack_seq;
7430 lro->window = tcp->window;
6aa20a22 7431
7d3d0439
RA
7432 if (lro->saw_ts) {
7433 u32 *ptr;
7434 /* Update tsecr and tsval from this packet */
7435 ptr = (u32 *) (tcp + 1);
6aa20a22 7436 lro->cur_tsval = *(ptr + 1);
7d3d0439
RA
7437 lro->cur_tsecr = *(ptr + 2);
7438 }
7439}
7440
1ee6dd77 7441static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7d3d0439
RA
7442 struct tcphdr *tcp, u32 tcp_pyld_len)
7443{
7d3d0439
RA
7444 u8 *ptr;
7445
79dc1901
AM
7446 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7447
7d3d0439
RA
7448 if (!tcp_pyld_len) {
7449 /* Runt frame or a pure ack */
7450 return -1;
7451 }
7452
7453 if (ip->ihl != 5) /* IP has options */
7454 return -1;
7455
75c30b13
AR
7456 /* If we see CE codepoint in IP header, packet is not mergeable */
7457 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7458 return -1;
7459
7460 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7d3d0439 7461 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
75c30b13 7462 tcp->ece || tcp->cwr || !tcp->ack) {
7d3d0439
RA
7463 /*
7464 * Currently recognize only the ack control word and
7465 * any other control field being set would result in
7466 * flushing the LRO session
7467 */
7468 return -1;
7469 }
7470
6aa20a22 7471 /*
7d3d0439
RA
7472 * Allow only one TCP timestamp option. Don't aggregate if
7473 * any other options are detected.
7474 */
7475 if (tcp->doff != 5 && tcp->doff != 8)
7476 return -1;
7477
7478 if (tcp->doff == 8) {
6aa20a22 7479 ptr = (u8 *)(tcp + 1);
7d3d0439
RA
7480 while (*ptr == TCPOPT_NOP)
7481 ptr++;
7482 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7483 return -1;
7484
7485 /* Ensure timestamp value increases monotonically */
7486 if (l_lro)
7487 if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7488 return -1;
7489
7490 /* timestamp echo reply should be non-zero */
6aa20a22 7491 if (*((u32 *)(ptr+6)) == 0)
7d3d0439
RA
7492 return -1;
7493 }
7494
7495 return 0;
7496}
7497
7498static int
1ee6dd77
RB
7499s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
7500 struct RxD_t *rxdp, struct s2io_nic *sp)
7d3d0439
RA
7501{
7502 struct iphdr *ip;
7503 struct tcphdr *tcph;
7504 int ret = 0, i;
7505
7506 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
7507 rxdp))) {
7508 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
7509 ip->saddr, ip->daddr);
7510 } else {
7511 return ret;
7512 }
7513
7514 tcph = (struct tcphdr *)*tcp;
7515 *tcp_len = get_l4_pyld_length(ip, tcph);
7516 for (i=0; i<MAX_LRO_SESSIONS; i++) {
1ee6dd77 7517 struct lro *l_lro = &sp->lro0_n[i];
7d3d0439
RA
7518 if (l_lro->in_use) {
7519 if (check_for_socket_match(l_lro, ip, tcph))
7520 continue;
7521 /* Sock pair matched */
7522 *lro = l_lro;
7523
7524 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
7525 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
7526 "0x%x, actual 0x%x\n", __FUNCTION__,
7527 (*lro)->tcp_next_seq,
7528 ntohl(tcph->seq));
7529
7530 sp->mac_control.stats_info->
7531 sw_stat.outof_sequence_pkts++;
7532 ret = 2;
7533 break;
7534 }
7535
7536 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
7537 ret = 1; /* Aggregate */
7538 else
7539 ret = 2; /* Flush both */
7540 break;
7541 }
7542 }
7543
7544 if (ret == 0) {
7545 /* Before searching for available LRO objects,
7546 * check if the pkt is L3/L4 aggregatable. If not
7547 * don't create new LRO session. Just send this
7548 * packet up.
7549 */
7550 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
7551 return 5;
7552 }
7553
7554 for (i=0; i<MAX_LRO_SESSIONS; i++) {
1ee6dd77 7555 struct lro *l_lro = &sp->lro0_n[i];
7d3d0439
RA
7556 if (!(l_lro->in_use)) {
7557 *lro = l_lro;
7558 ret = 3; /* Begin anew */
7559 break;
7560 }
7561 }
7562 }
7563
7564 if (ret == 0) { /* sessions exceeded */
7565 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
7566 __FUNCTION__);
7567 *lro = NULL;
7568 return ret;
7569 }
7570
7571 switch (ret) {
7572 case 3:
7573 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
7574 break;
7575 case 2:
7576 update_L3L4_header(sp, *lro);
7577 break;
7578 case 1:
7579 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
7580 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7581 update_L3L4_header(sp, *lro);
7582 ret = 4; /* Flush the LRO */
7583 }
7584 break;
7585 default:
7586 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
7587 __FUNCTION__);
7588 break;
7589 }
7590
7591 return ret;
7592}
7593
1ee6dd77 7594static void clear_lro_session(struct lro *lro)
7d3d0439 7595{
1ee6dd77 7596 static u16 lro_struct_size = sizeof(struct lro);
7d3d0439
RA
7597
7598 memset(lro, 0, lro_struct_size);
7599}
7600
7601static void queue_rx_frame(struct sk_buff *skb)
7602{
7603 struct net_device *dev = skb->dev;
7604
7605 skb->protocol = eth_type_trans(skb, dev);
db874e65
SS
7606 if (napi)
7607 netif_receive_skb(skb);
7608 else
7609 netif_rx(skb);
7d3d0439
RA
7610}
7611
1ee6dd77
RB
7612static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
7613 struct sk_buff *skb,
7d3d0439
RA
7614 u32 tcp_len)
7615{
75c30b13 7616 struct sk_buff *first = lro->parent;
7d3d0439
RA
7617
7618 first->len += tcp_len;
7619 first->data_len = lro->frags_len;
7620 skb_pull(skb, (skb->len - tcp_len));
75c30b13
AR
7621 if (skb_shinfo(first)->frag_list)
7622 lro->last_frag->next = skb;
7d3d0439
RA
7623 else
7624 skb_shinfo(first)->frag_list = skb;
372cc597 7625 first->truesize += skb->truesize;
75c30b13 7626 lro->last_frag = skb;
7d3d0439
RA
7627 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
7628 return;
7629}