]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/s2io.c
drivers/net/s2io.c: Remove unnecessary casts of pci_get_drvdata
[net-next-2.6.git] / drivers / net / s2io.c
CommitLineData
1da177e4 1/************************************************************************
776bd20f 2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
926bd900 3 * Copyright(c) 2002-2010 Exar Corp.
d44570e4 4 *
1da177e4
LT
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
20346722
K
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
1da177e4
LT
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
20346722 21 * Francois Romieu : For pointing out all code part that were
1da177e4 22 * deprecated and also styling related comments.
20346722 23 * Grant Grundler : For helping me get rid of some Architecture
1da177e4
LT
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
20346722 26 *
1da177e4 27 * The module loadable parameters that are supported by the driver and a brief
a2a20aef 28 * explanation of all the variables.
9dc737a7 29 *
20346722
K
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
9dc737a7
AR
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
da6971d8 34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
6d517a27 35 * values are 1, 2.
1da177e4 36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
20346722 37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
1da177e4 38 * Tx descriptors that can be associated with each corresponding FIFO.
9dc737a7 39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
8abc4d5b 40 * 2(MSI_X). Default value is '2(MSI_X)'
9dc737a7
AR
41 * lro_max_pkts: This parameter defines maximum number of packets can be
42 * aggregated as a single large packet
926930b2
SS
43 * napi: This parameter used to enable/disable NAPI (polling Rx)
44 * Possible values '1' for enable and '0' for disable. Default is '1'
45 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
46 * Possible values '1' for enable and '0' for disable. Default is '0'
47 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
48 * Possible values '1' for enable , '0' for disable.
49 * Default is '2' - which means disable in promisc mode
50 * and enable in non-promiscuous mode.
3a3d5756
SH
51 * multiq: This parameter used to enable/disable MULTIQUEUE support.
52 * Possible values '1' for enable and '0' for disable. Default is '0'
1da177e4
LT
53 ************************************************************************/
54
6cef2b8e
JP
55#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56
1da177e4
LT
57#include <linux/module.h>
58#include <linux/types.h>
59#include <linux/errno.h>
60#include <linux/ioport.h>
61#include <linux/pci.h>
1e7f0bd8 62#include <linux/dma-mapping.h>
1da177e4
LT
63#include <linux/kernel.h>
64#include <linux/netdevice.h>
65#include <linux/etherdevice.h>
40239396 66#include <linux/mdio.h>
1da177e4
LT
67#include <linux/skbuff.h>
68#include <linux/init.h>
69#include <linux/delay.h>
70#include <linux/stddef.h>
71#include <linux/ioctl.h>
72#include <linux/timex.h>
1da177e4 73#include <linux/ethtool.h>
1da177e4 74#include <linux/workqueue.h>
be3a6b02 75#include <linux/if_vlan.h>
7d3d0439
RA
76#include <linux/ip.h>
77#include <linux/tcp.h>
d44570e4
JP
78#include <linux/uaccess.h>
79#include <linux/io.h>
5a0e3ad6 80#include <linux/slab.h>
7d3d0439 81#include <net/tcp.h>
1da177e4 82
1da177e4 83#include <asm/system.h>
fe931395 84#include <asm/div64.h>
330ce0de 85#include <asm/irq.h>
1da177e4
LT
86
87/* local include */
88#include "s2io.h"
89#include "s2io-regs.h"
90
666be429 91#define DRV_VERSION "2.0.26.27"
6c1792f4 92
1da177e4 93/* S2io Driver name & version. */
20346722 94static char s2io_driver_name[] = "Neterion";
6c1792f4 95static char s2io_driver_version[] = DRV_VERSION;
1da177e4 96
d44570e4
JP
97static int rxd_size[2] = {32, 48};
98static int rxd_count[2] = {127, 85};
da6971d8 99
1ee6dd77 100static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
5e25b9dd
K
101{
102 int ret;
103
104 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
d44570e4 105 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
5e25b9dd
K
106
107 return ret;
108}
109
20346722 110/*
1da177e4
LT
111 * Cards with following subsystem_id have a link state indication
112 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
113 * macro below identifies these cards given the subsystem_id.
114 */
d44570e4
JP
115#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
116 (dev_type == XFRAME_I_DEVICE) ? \
117 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
118 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
1da177e4
LT
119
120#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
121 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
1da177e4 122
d44570e4 123static inline int is_s2io_card_up(const struct s2io_nic *sp)
92b84437
SS
124{
125 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
126}
127
1da177e4 128/* Ethtool related variables and Macros. */
6fce365d 129static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
1da177e4
LT
130 "Register test\t(offline)",
131 "Eeprom test\t(offline)",
132 "Link test\t(online)",
133 "RLDRAM test\t(offline)",
134 "BIST Test\t(offline)"
135};
136
6fce365d 137static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
1da177e4
LT
138 {"tmac_frms"},
139 {"tmac_data_octets"},
140 {"tmac_drop_frms"},
141 {"tmac_mcst_frms"},
142 {"tmac_bcst_frms"},
143 {"tmac_pause_ctrl_frms"},
bd1034f0
AR
144 {"tmac_ttl_octets"},
145 {"tmac_ucst_frms"},
146 {"tmac_nucst_frms"},
1da177e4 147 {"tmac_any_err_frms"},
bd1034f0 148 {"tmac_ttl_less_fb_octets"},
1da177e4
LT
149 {"tmac_vld_ip_octets"},
150 {"tmac_vld_ip"},
151 {"tmac_drop_ip"},
152 {"tmac_icmp"},
153 {"tmac_rst_tcp"},
154 {"tmac_tcp"},
155 {"tmac_udp"},
156 {"rmac_vld_frms"},
157 {"rmac_data_octets"},
158 {"rmac_fcs_err_frms"},
159 {"rmac_drop_frms"},
160 {"rmac_vld_mcst_frms"},
161 {"rmac_vld_bcst_frms"},
162 {"rmac_in_rng_len_err_frms"},
bd1034f0 163 {"rmac_out_rng_len_err_frms"},
1da177e4
LT
164 {"rmac_long_frms"},
165 {"rmac_pause_ctrl_frms"},
bd1034f0
AR
166 {"rmac_unsup_ctrl_frms"},
167 {"rmac_ttl_octets"},
168 {"rmac_accepted_ucst_frms"},
169 {"rmac_accepted_nucst_frms"},
1da177e4 170 {"rmac_discarded_frms"},
bd1034f0
AR
171 {"rmac_drop_events"},
172 {"rmac_ttl_less_fb_octets"},
173 {"rmac_ttl_frms"},
1da177e4
LT
174 {"rmac_usized_frms"},
175 {"rmac_osized_frms"},
176 {"rmac_frag_frms"},
177 {"rmac_jabber_frms"},
bd1034f0
AR
178 {"rmac_ttl_64_frms"},
179 {"rmac_ttl_65_127_frms"},
180 {"rmac_ttl_128_255_frms"},
181 {"rmac_ttl_256_511_frms"},
182 {"rmac_ttl_512_1023_frms"},
183 {"rmac_ttl_1024_1518_frms"},
1da177e4
LT
184 {"rmac_ip"},
185 {"rmac_ip_octets"},
186 {"rmac_hdr_err_ip"},
187 {"rmac_drop_ip"},
188 {"rmac_icmp"},
189 {"rmac_tcp"},
190 {"rmac_udp"},
191 {"rmac_err_drp_udp"},
bd1034f0
AR
192 {"rmac_xgmii_err_sym"},
193 {"rmac_frms_q0"},
194 {"rmac_frms_q1"},
195 {"rmac_frms_q2"},
196 {"rmac_frms_q3"},
197 {"rmac_frms_q4"},
198 {"rmac_frms_q5"},
199 {"rmac_frms_q6"},
200 {"rmac_frms_q7"},
201 {"rmac_full_q0"},
202 {"rmac_full_q1"},
203 {"rmac_full_q2"},
204 {"rmac_full_q3"},
205 {"rmac_full_q4"},
206 {"rmac_full_q5"},
207 {"rmac_full_q6"},
208 {"rmac_full_q7"},
1da177e4 209 {"rmac_pause_cnt"},
bd1034f0
AR
210 {"rmac_xgmii_data_err_cnt"},
211 {"rmac_xgmii_ctrl_err_cnt"},
1da177e4
LT
212 {"rmac_accepted_ip"},
213 {"rmac_err_tcp"},
bd1034f0
AR
214 {"rd_req_cnt"},
215 {"new_rd_req_cnt"},
216 {"new_rd_req_rtry_cnt"},
217 {"rd_rtry_cnt"},
218 {"wr_rtry_rd_ack_cnt"},
219 {"wr_req_cnt"},
220 {"new_wr_req_cnt"},
221 {"new_wr_req_rtry_cnt"},
222 {"wr_rtry_cnt"},
223 {"wr_disc_cnt"},
224 {"rd_rtry_wr_ack_cnt"},
225 {"txp_wr_cnt"},
226 {"txd_rd_cnt"},
227 {"txd_wr_cnt"},
228 {"rxd_rd_cnt"},
229 {"rxd_wr_cnt"},
230 {"txf_rd_cnt"},
fa1f0cb3
SS
231 {"rxf_wr_cnt"}
232};
233
6fce365d 234static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
bd1034f0
AR
235 {"rmac_ttl_1519_4095_frms"},
236 {"rmac_ttl_4096_8191_frms"},
237 {"rmac_ttl_8192_max_frms"},
238 {"rmac_ttl_gt_max_frms"},
239 {"rmac_osized_alt_frms"},
240 {"rmac_jabber_alt_frms"},
241 {"rmac_gt_max_alt_frms"},
242 {"rmac_vlan_frms"},
243 {"rmac_len_discard"},
244 {"rmac_fcs_discard"},
245 {"rmac_pf_discard"},
246 {"rmac_da_discard"},
247 {"rmac_red_discard"},
248 {"rmac_rts_discard"},
249 {"rmac_ingm_full_discard"},
fa1f0cb3
SS
250 {"link_fault_cnt"}
251};
252
6fce365d 253static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
7ba013ac
K
254 {"\n DRIVER STATISTICS"},
255 {"single_bit_ecc_errs"},
256 {"double_bit_ecc_errs"},
bd1034f0
AR
257 {"parity_err_cnt"},
258 {"serious_err_cnt"},
259 {"soft_reset_cnt"},
260 {"fifo_full_cnt"},
8116f3cf
SS
261 {"ring_0_full_cnt"},
262 {"ring_1_full_cnt"},
263 {"ring_2_full_cnt"},
264 {"ring_3_full_cnt"},
265 {"ring_4_full_cnt"},
266 {"ring_5_full_cnt"},
267 {"ring_6_full_cnt"},
268 {"ring_7_full_cnt"},
43b7c451
SH
269 {"alarm_transceiver_temp_high"},
270 {"alarm_transceiver_temp_low"},
271 {"alarm_laser_bias_current_high"},
272 {"alarm_laser_bias_current_low"},
273 {"alarm_laser_output_power_high"},
274 {"alarm_laser_output_power_low"},
275 {"warn_transceiver_temp_high"},
276 {"warn_transceiver_temp_low"},
277 {"warn_laser_bias_current_high"},
278 {"warn_laser_bias_current_low"},
279 {"warn_laser_output_power_high"},
280 {"warn_laser_output_power_low"},
281 {"lro_aggregated_pkts"},
282 {"lro_flush_both_count"},
283 {"lro_out_of_sequence_pkts"},
284 {"lro_flush_due_to_max_pkts"},
285 {"lro_avg_aggr_pkts"},
286 {"mem_alloc_fail_cnt"},
287 {"pci_map_fail_cnt"},
288 {"watchdog_timer_cnt"},
289 {"mem_allocated"},
290 {"mem_freed"},
291 {"link_up_cnt"},
292 {"link_down_cnt"},
293 {"link_up_time"},
294 {"link_down_time"},
295 {"tx_tcode_buf_abort_cnt"},
296 {"tx_tcode_desc_abort_cnt"},
297 {"tx_tcode_parity_err_cnt"},
298 {"tx_tcode_link_loss_cnt"},
299 {"tx_tcode_list_proc_err_cnt"},
300 {"rx_tcode_parity_err_cnt"},
301 {"rx_tcode_abort_cnt"},
302 {"rx_tcode_parity_abort_cnt"},
303 {"rx_tcode_rda_fail_cnt"},
304 {"rx_tcode_unkn_prot_cnt"},
305 {"rx_tcode_fcs_err_cnt"},
306 {"rx_tcode_buf_size_err_cnt"},
307 {"rx_tcode_rxd_corrupt_cnt"},
308 {"rx_tcode_unkn_err_cnt"},
8116f3cf
SS
309 {"tda_err_cnt"},
310 {"pfc_err_cnt"},
311 {"pcc_err_cnt"},
312 {"tti_err_cnt"},
313 {"tpa_err_cnt"},
314 {"sm_err_cnt"},
315 {"lso_err_cnt"},
316 {"mac_tmac_err_cnt"},
317 {"mac_rmac_err_cnt"},
318 {"xgxs_txgxs_err_cnt"},
319 {"xgxs_rxgxs_err_cnt"},
320 {"rc_err_cnt"},
321 {"prc_pcix_err_cnt"},
322 {"rpa_err_cnt"},
323 {"rda_err_cnt"},
324 {"rti_err_cnt"},
325 {"mc_err_cnt"}
1da177e4
LT
326};
327
4c3616cd
AMR
328#define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
329#define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
330#define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
fa1f0cb3 331
d44570e4
JP
332#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
333#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
fa1f0cb3 334
d44570e4
JP
335#define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
336#define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
1da177e4 337
4c3616cd 338#define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
d44570e4 339#define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN)
1da177e4 340
d44570e4
JP
341#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
342 init_timer(&timer); \
343 timer.function = handle; \
344 timer.data = (unsigned long)arg; \
345 mod_timer(&timer, (jiffies + exp)) \
25fff88e 346
2fd37688
SS
347/* copy mac addr to def_mac_addr array */
348static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
349{
350 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
351 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
352 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
353 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
354 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
355 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
356}
04025095 357
be3a6b02
K
358/* Add the vlan */
359static void s2io_vlan_rx_register(struct net_device *dev,
04025095 360 struct vlan_group *grp)
be3a6b02 361{
2fda096d 362 int i;
4cf1653a 363 struct s2io_nic *nic = netdev_priv(dev);
2fda096d 364 unsigned long flags[MAX_TX_FIFOS];
2fda096d 365 struct config_param *config = &nic->config;
ffb5df6c 366 struct mac_info *mac_control = &nic->mac_control;
2fda096d 367
13d866a9
JP
368 for (i = 0; i < config->tx_fifo_num; i++) {
369 struct fifo_info *fifo = &mac_control->fifos[i];
370
371 spin_lock_irqsave(&fifo->tx_lock, flags[i]);
372 }
be3a6b02 373
be3a6b02 374 nic->vlgrp = grp;
13d866a9
JP
375
376 for (i = config->tx_fifo_num - 1; i >= 0; i--) {
377 struct fifo_info *fifo = &mac_control->fifos[i];
378
379 spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
380 }
be3a6b02
K
381}
382
cdb5bf02 383/* Unregister the vlan */
04025095 384static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
cdb5bf02
SH
385{
386 int i;
4cf1653a 387 struct s2io_nic *nic = netdev_priv(dev);
cdb5bf02 388 unsigned long flags[MAX_TX_FIFOS];
cdb5bf02 389 struct config_param *config = &nic->config;
ffb5df6c 390 struct mac_info *mac_control = &nic->mac_control;
cdb5bf02 391
13d866a9
JP
392 for (i = 0; i < config->tx_fifo_num; i++) {
393 struct fifo_info *fifo = &mac_control->fifos[i];
394
395 spin_lock_irqsave(&fifo->tx_lock, flags[i]);
396 }
cdb5bf02
SH
397
398 if (nic->vlgrp)
399 vlan_group_set_device(nic->vlgrp, vid, NULL);
400
13d866a9
JP
401 for (i = config->tx_fifo_num - 1; i >= 0; i--) {
402 struct fifo_info *fifo = &mac_control->fifos[i];
403
404 spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
405 }
cdb5bf02
SH
406}
407
20346722 408/*
1da177e4
LT
409 * Constants to be programmed into the Xena's registers, to configure
410 * the XAUI.
411 */
412
1da177e4 413#define END_SIGN 0x0
f71e1309 414static const u64 herc_act_dtx_cfg[] = {
541ae68f 415 /* Set address */
e960fc5c 416 0x8000051536750000ULL, 0x80000515367500E0ULL,
541ae68f 417 /* Write data */
e960fc5c 418 0x8000051536750004ULL, 0x80000515367500E4ULL,
541ae68f
K
419 /* Set address */
420 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
421 /* Write data */
422 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
423 /* Set address */
e960fc5c 424 0x801205150D440000ULL, 0x801205150D4400E0ULL,
425 /* Write data */
426 0x801205150D440004ULL, 0x801205150D4400E4ULL,
427 /* Set address */
541ae68f
K
428 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
429 /* Write data */
430 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
431 /* Done */
432 END_SIGN
433};
434
f71e1309 435static const u64 xena_dtx_cfg[] = {
c92ca04b 436 /* Set address */
1da177e4 437 0x8000051500000000ULL, 0x80000515000000E0ULL,
c92ca04b
AR
438 /* Write data */
439 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
440 /* Set address */
441 0x8001051500000000ULL, 0x80010515000000E0ULL,
442 /* Write data */
443 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
444 /* Set address */
1da177e4 445 0x8002051500000000ULL, 0x80020515000000E0ULL,
c92ca04b
AR
446 /* Write data */
447 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1da177e4
LT
448 END_SIGN
449};
450
20346722 451/*
1da177e4
LT
452 * Constants for Fixing the MacAddress problem seen mostly on
453 * Alpha machines.
454 */
f71e1309 455static const u64 fix_mac[] = {
1da177e4
LT
456 0x0060000000000000ULL, 0x0060600000000000ULL,
457 0x0040600000000000ULL, 0x0000600000000000ULL,
458 0x0020600000000000ULL, 0x0060600000000000ULL,
459 0x0020600000000000ULL, 0x0060600000000000ULL,
460 0x0020600000000000ULL, 0x0060600000000000ULL,
461 0x0020600000000000ULL, 0x0060600000000000ULL,
462 0x0020600000000000ULL, 0x0060600000000000ULL,
463 0x0020600000000000ULL, 0x0060600000000000ULL,
464 0x0020600000000000ULL, 0x0060600000000000ULL,
465 0x0020600000000000ULL, 0x0060600000000000ULL,
466 0x0020600000000000ULL, 0x0060600000000000ULL,
467 0x0020600000000000ULL, 0x0060600000000000ULL,
468 0x0020600000000000ULL, 0x0000600000000000ULL,
469 0x0040600000000000ULL, 0x0060600000000000ULL,
470 END_SIGN
471};
472
b41477f3
AR
473MODULE_LICENSE("GPL");
474MODULE_VERSION(DRV_VERSION);
475
476
1da177e4 477/* Module Loadable parameters. */
6cfc482b 478S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
b41477f3 479S2IO_PARM_INT(rx_ring_num, 1);
3a3d5756 480S2IO_PARM_INT(multiq, 0);
b41477f3
AR
481S2IO_PARM_INT(rx_ring_mode, 1);
482S2IO_PARM_INT(use_continuous_tx_intrs, 1);
483S2IO_PARM_INT(rmac_pause_time, 0x100);
484S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
485S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
486S2IO_PARM_INT(shared_splits, 0);
487S2IO_PARM_INT(tmac_util_period, 5);
488S2IO_PARM_INT(rmac_util_period, 5);
b41477f3 489S2IO_PARM_INT(l3l4hdr_size, 128);
6cfc482b
SH
490/* 0 is no steering, 1 is Priority steering, 2 is Default steering */
491S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
303bcb4b 492/* Frequency of Rx desc syncs expressed as power of 2 */
b41477f3 493S2IO_PARM_INT(rxsync_frequency, 3);
eccb8628 494/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
8abc4d5b 495S2IO_PARM_INT(intr_type, 2);
7d3d0439 496/* Large receive offload feature */
43b7c451 497
7d3d0439
RA
498/* Max pkts to be aggregated by LRO at one time. If not specified,
499 * aggregation happens until we hit max IP pkt size(64K)
500 */
b41477f3 501S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
b41477f3 502S2IO_PARM_INT(indicate_max_pkts, 0);
db874e65
SS
503
504S2IO_PARM_INT(napi, 1);
505S2IO_PARM_INT(ufo, 0);
926930b2 506S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
b41477f3
AR
507
508static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
d44570e4 509{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
b41477f3 510static unsigned int rx_ring_sz[MAX_RX_RINGS] =
d44570e4 511{[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
b41477f3 512static unsigned int rts_frm_len[MAX_RX_RINGS] =
d44570e4 513{[0 ...(MAX_RX_RINGS - 1)] = 0 };
b41477f3
AR
514
515module_param_array(tx_fifo_len, uint, NULL, 0);
516module_param_array(rx_ring_sz, uint, NULL, 0);
517module_param_array(rts_frm_len, uint, NULL, 0);
1da177e4 518
20346722 519/*
1da177e4 520 * S2IO device table.
20346722 521 * This table lists all the devices that this driver supports.
1da177e4 522 */
a3aa1884 523static DEFINE_PCI_DEVICE_TABLE(s2io_tbl) = {
1da177e4
LT
524 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
525 PCI_ANY_ID, PCI_ANY_ID},
526 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
527 PCI_ANY_ID, PCI_ANY_ID},
528 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
d44570e4
JP
529 PCI_ANY_ID, PCI_ANY_ID},
530 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
531 PCI_ANY_ID, PCI_ANY_ID},
1da177e4
LT
532 {0,}
533};
534
535MODULE_DEVICE_TABLE(pci, s2io_tbl);
536
d796fdb7
LV
537static struct pci_error_handlers s2io_err_handler = {
538 .error_detected = s2io_io_error_detected,
539 .slot_reset = s2io_io_slot_reset,
540 .resume = s2io_io_resume,
541};
542
1da177e4 543static struct pci_driver s2io_driver = {
d44570e4
JP
544 .name = "S2IO",
545 .id_table = s2io_tbl,
546 .probe = s2io_init_nic,
547 .remove = __devexit_p(s2io_rem_nic),
548 .err_handler = &s2io_err_handler,
1da177e4
LT
549};
550
551/* A simplifier macro used both by init and free shared_mem Fns(). */
552#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
553
3a3d5756
SH
554/* netqueue manipulation helper functions */
555static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
556{
fd2ea0a7
DM
557 if (!sp->config.multiq) {
558 int i;
559
3a3d5756
SH
560 for (i = 0; i < sp->config.tx_fifo_num; i++)
561 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
3a3d5756 562 }
fd2ea0a7 563 netif_tx_stop_all_queues(sp->dev);
3a3d5756
SH
564}
565
566static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
567{
fd2ea0a7 568 if (!sp->config.multiq)
3a3d5756
SH
569 sp->mac_control.fifos[fifo_no].queue_state =
570 FIFO_QUEUE_STOP;
fd2ea0a7
DM
571
572 netif_tx_stop_all_queues(sp->dev);
3a3d5756
SH
573}
574
575static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
576{
fd2ea0a7
DM
577 if (!sp->config.multiq) {
578 int i;
579
3a3d5756
SH
580 for (i = 0; i < sp->config.tx_fifo_num; i++)
581 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
3a3d5756 582 }
fd2ea0a7 583 netif_tx_start_all_queues(sp->dev);
3a3d5756
SH
584}
585
586static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
587{
fd2ea0a7 588 if (!sp->config.multiq)
3a3d5756
SH
589 sp->mac_control.fifos[fifo_no].queue_state =
590 FIFO_QUEUE_START;
fd2ea0a7
DM
591
592 netif_tx_start_all_queues(sp->dev);
3a3d5756
SH
593}
594
595static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
596{
fd2ea0a7
DM
597 if (!sp->config.multiq) {
598 int i;
599
3a3d5756
SH
600 for (i = 0; i < sp->config.tx_fifo_num; i++)
601 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
3a3d5756 602 }
fd2ea0a7 603 netif_tx_wake_all_queues(sp->dev);
3a3d5756
SH
604}
605
606static inline void s2io_wake_tx_queue(
607 struct fifo_info *fifo, int cnt, u8 multiq)
608{
609
3a3d5756
SH
610 if (multiq) {
611 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
612 netif_wake_subqueue(fifo->dev, fifo->fifo_no);
b19fa1fa 613 } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
3a3d5756
SH
614 if (netif_queue_stopped(fifo->dev)) {
615 fifo->queue_state = FIFO_QUEUE_START;
616 netif_wake_queue(fifo->dev);
617 }
618 }
619}
620
1da177e4
LT
621/**
622 * init_shared_mem - Allocation and Initialization of Memory
623 * @nic: Device private variable.
20346722
K
624 * Description: The function allocates all the memory areas shared
625 * between the NIC and the driver. This includes Tx descriptors,
1da177e4
LT
626 * Rx descriptors and the statistics block.
627 */
628
629static int init_shared_mem(struct s2io_nic *nic)
630{
631 u32 size;
632 void *tmp_v_addr, *tmp_v_addr_next;
633 dma_addr_t tmp_p_addr, tmp_p_addr_next;
1ee6dd77 634 struct RxD_block *pre_rxd_blk = NULL;
372cc597 635 int i, j, blk_cnt;
1da177e4
LT
636 int lst_size, lst_per_page;
637 struct net_device *dev = nic->dev;
8ae418cf 638 unsigned long tmp;
1ee6dd77 639 struct buffAdd *ba;
ffb5df6c
JP
640 struct config_param *config = &nic->config;
641 struct mac_info *mac_control = &nic->mac_control;
491976b2 642 unsigned long long mem_allocated = 0;
1da177e4 643
13d866a9 644 /* Allocation and initialization of TXDLs in FIFOs */
1da177e4
LT
645 size = 0;
646 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
647 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
648
649 size += tx_cfg->fifo_len;
1da177e4
LT
650 }
651 if (size > MAX_AVAILABLE_TXDS) {
9e39f7c5
JP
652 DBG_PRINT(ERR_DBG,
653 "Too many TxDs requested: %d, max supported: %d\n",
654 size, MAX_AVAILABLE_TXDS);
b41477f3 655 return -EINVAL;
1da177e4
LT
656 }
657
2fda096d
SR
658 size = 0;
659 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
660 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
661
662 size = tx_cfg->fifo_len;
2fda096d
SR
663 /*
664 * Legal values are from 2 to 8192
665 */
666 if (size < 2) {
9e39f7c5
JP
667 DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
668 "Valid lengths are 2 through 8192\n",
669 i, size);
2fda096d
SR
670 return -EINVAL;
671 }
672 }
673
1ee6dd77 674 lst_size = (sizeof(struct TxD) * config->max_txds);
1da177e4
LT
675 lst_per_page = PAGE_SIZE / lst_size;
676
677 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
678 struct fifo_info *fifo = &mac_control->fifos[i];
679 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
680 int fifo_len = tx_cfg->fifo_len;
1ee6dd77 681 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
13d866a9
JP
682
683 fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
684 if (!fifo->list_info) {
d44570e4 685 DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
1da177e4
LT
686 return -ENOMEM;
687 }
491976b2 688 mem_allocated += list_holder_size;
1da177e4
LT
689 }
690 for (i = 0; i < config->tx_fifo_num; i++) {
691 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
692 lst_per_page);
13d866a9
JP
693 struct fifo_info *fifo = &mac_control->fifos[i];
694 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
695
696 fifo->tx_curr_put_info.offset = 0;
697 fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
698 fifo->tx_curr_get_info.offset = 0;
699 fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
700 fifo->fifo_no = i;
701 fifo->nic = nic;
702 fifo->max_txds = MAX_SKB_FRAGS + 2;
703 fifo->dev = dev;
20346722 704
1da177e4
LT
705 for (j = 0; j < page_num; j++) {
706 int k = 0;
707 dma_addr_t tmp_p;
708 void *tmp_v;
709 tmp_v = pci_alloc_consistent(nic->pdev,
710 PAGE_SIZE, &tmp_p);
711 if (!tmp_v) {
9e39f7c5
JP
712 DBG_PRINT(INFO_DBG,
713 "pci_alloc_consistent failed for TxDL\n");
1da177e4
LT
714 return -ENOMEM;
715 }
776bd20f 716 /* If we got a zero DMA address(can happen on
717 * certain platforms like PPC), reallocate.
718 * Store virtual address of page we don't want,
719 * to be freed later.
720 */
721 if (!tmp_p) {
722 mac_control->zerodma_virt_addr = tmp_v;
6aa20a22 723 DBG_PRINT(INIT_DBG,
9e39f7c5
JP
724 "%s: Zero DMA address for TxDL. "
725 "Virtual address %p\n",
726 dev->name, tmp_v);
776bd20f 727 tmp_v = pci_alloc_consistent(nic->pdev,
d44570e4 728 PAGE_SIZE, &tmp_p);
776bd20f 729 if (!tmp_v) {
0c61ed5f 730 DBG_PRINT(INFO_DBG,
9e39f7c5 731 "pci_alloc_consistent failed for TxDL\n");
776bd20f 732 return -ENOMEM;
733 }
491976b2 734 mem_allocated += PAGE_SIZE;
776bd20f 735 }
1da177e4
LT
736 while (k < lst_per_page) {
737 int l = (j * lst_per_page) + k;
13d866a9 738 if (l == tx_cfg->fifo_len)
20346722 739 break;
13d866a9 740 fifo->list_info[l].list_virt_addr =
d44570e4 741 tmp_v + (k * lst_size);
13d866a9 742 fifo->list_info[l].list_phy_addr =
d44570e4 743 tmp_p + (k * lst_size);
1da177e4
LT
744 k++;
745 }
746 }
747 }
1da177e4 748
2fda096d 749 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
750 struct fifo_info *fifo = &mac_control->fifos[i];
751 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
752
753 size = tx_cfg->fifo_len;
754 fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
755 if (!fifo->ufo_in_band_v)
2fda096d
SR
756 return -ENOMEM;
757 mem_allocated += (size * sizeof(u64));
758 }
fed5eccd 759
1da177e4
LT
760 /* Allocation and initialization of RXDs in Rings */
761 size = 0;
762 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
763 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
764 struct ring_info *ring = &mac_control->rings[i];
765
766 if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
9e39f7c5
JP
767 DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
768 "multiple of RxDs per Block\n",
769 dev->name, i);
1da177e4
LT
770 return FAILURE;
771 }
13d866a9
JP
772 size += rx_cfg->num_rxd;
773 ring->block_count = rx_cfg->num_rxd /
d44570e4 774 (rxd_count[nic->rxd_mode] + 1);
13d866a9 775 ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
1da177e4 776 }
da6971d8 777 if (nic->rxd_mode == RXD_MODE_1)
1ee6dd77 778 size = (size * (sizeof(struct RxD1)));
da6971d8 779 else
1ee6dd77 780 size = (size * (sizeof(struct RxD3)));
1da177e4
LT
781
782 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
783 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
784 struct ring_info *ring = &mac_control->rings[i];
785
786 ring->rx_curr_get_info.block_index = 0;
787 ring->rx_curr_get_info.offset = 0;
788 ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
789 ring->rx_curr_put_info.block_index = 0;
790 ring->rx_curr_put_info.offset = 0;
791 ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
792 ring->nic = nic;
793 ring->ring_no = i;
13d866a9
JP
794
795 blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
1da177e4
LT
796 /* Allocating all the Rx blocks */
797 for (j = 0; j < blk_cnt; j++) {
1ee6dd77 798 struct rx_block_info *rx_blocks;
da6971d8
AR
799 int l;
800
13d866a9 801 rx_blocks = &ring->rx_blocks[j];
d44570e4 802 size = SIZE_OF_BLOCK; /* size is always page size */
1da177e4
LT
803 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
804 &tmp_p_addr);
805 if (tmp_v_addr == NULL) {
806 /*
20346722
K
807 * In case of failure, free_shared_mem()
808 * is called, which should free any
809 * memory that was alloced till the
1da177e4
LT
810 * failure happened.
811 */
da6971d8 812 rx_blocks->block_virt_addr = tmp_v_addr;
1da177e4
LT
813 return -ENOMEM;
814 }
491976b2 815 mem_allocated += size;
1da177e4 816 memset(tmp_v_addr, 0, size);
4f870320
JP
817
818 size = sizeof(struct rxd_info) *
819 rxd_count[nic->rxd_mode];
da6971d8
AR
820 rx_blocks->block_virt_addr = tmp_v_addr;
821 rx_blocks->block_dma_addr = tmp_p_addr;
4f870320 822 rx_blocks->rxds = kmalloc(size, GFP_KERNEL);
372cc597
SS
823 if (!rx_blocks->rxds)
824 return -ENOMEM;
4f870320 825 mem_allocated += size;
d44570e4 826 for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
da6971d8
AR
827 rx_blocks->rxds[l].virt_addr =
828 rx_blocks->block_virt_addr +
829 (rxd_size[nic->rxd_mode] * l);
830 rx_blocks->rxds[l].dma_addr =
831 rx_blocks->block_dma_addr +
832 (rxd_size[nic->rxd_mode] * l);
833 }
1da177e4
LT
834 }
835 /* Interlinking all Rx Blocks */
836 for (j = 0; j < blk_cnt; j++) {
13d866a9
JP
837 int next = (j + 1) % blk_cnt;
838 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
839 tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
840 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
841 tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
1da177e4 842
d44570e4 843 pre_rxd_blk = (struct RxD_block *)tmp_v_addr;
1da177e4 844 pre_rxd_blk->reserved_2_pNext_RxD_block =
d44570e4 845 (unsigned long)tmp_v_addr_next;
1da177e4 846 pre_rxd_blk->pNext_RxD_Blk_physical =
d44570e4 847 (u64)tmp_p_addr_next;
1da177e4
LT
848 }
849 }
6d517a27 850 if (nic->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
851 /*
852 * Allocation of Storages for buffer addresses in 2BUFF mode
853 * and the buffers as well.
854 */
855 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
856 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
857 struct ring_info *ring = &mac_control->rings[i];
858
859 blk_cnt = rx_cfg->num_rxd /
d44570e4 860 (rxd_count[nic->rxd_mode] + 1);
4f870320
JP
861 size = sizeof(struct buffAdd *) * blk_cnt;
862 ring->ba = kmalloc(size, GFP_KERNEL);
13d866a9 863 if (!ring->ba)
1da177e4 864 return -ENOMEM;
4f870320 865 mem_allocated += size;
da6971d8
AR
866 for (j = 0; j < blk_cnt; j++) {
867 int k = 0;
4f870320
JP
868
869 size = sizeof(struct buffAdd) *
870 (rxd_count[nic->rxd_mode] + 1);
871 ring->ba[j] = kmalloc(size, GFP_KERNEL);
13d866a9 872 if (!ring->ba[j])
1da177e4 873 return -ENOMEM;
4f870320 874 mem_allocated += size;
da6971d8 875 while (k != rxd_count[nic->rxd_mode]) {
13d866a9 876 ba = &ring->ba[j][k];
4f870320
JP
877 size = BUF0_LEN + ALIGN_SIZE;
878 ba->ba_0_org = kmalloc(size, GFP_KERNEL);
da6971d8
AR
879 if (!ba->ba_0_org)
880 return -ENOMEM;
4f870320 881 mem_allocated += size;
da6971d8
AR
882 tmp = (unsigned long)ba->ba_0_org;
883 tmp += ALIGN_SIZE;
d44570e4
JP
884 tmp &= ~((unsigned long)ALIGN_SIZE);
885 ba->ba_0 = (void *)tmp;
da6971d8 886
4f870320
JP
887 size = BUF1_LEN + ALIGN_SIZE;
888 ba->ba_1_org = kmalloc(size, GFP_KERNEL);
da6971d8
AR
889 if (!ba->ba_1_org)
890 return -ENOMEM;
4f870320 891 mem_allocated += size;
d44570e4 892 tmp = (unsigned long)ba->ba_1_org;
da6971d8 893 tmp += ALIGN_SIZE;
d44570e4
JP
894 tmp &= ~((unsigned long)ALIGN_SIZE);
895 ba->ba_1 = (void *)tmp;
da6971d8
AR
896 k++;
897 }
1da177e4
LT
898 }
899 }
900 }
1da177e4
LT
901
902 /* Allocation and initialization of Statistics block */
1ee6dd77 903 size = sizeof(struct stat_block);
d44570e4
JP
904 mac_control->stats_mem =
905 pci_alloc_consistent(nic->pdev, size,
906 &mac_control->stats_mem_phy);
1da177e4
LT
907
908 if (!mac_control->stats_mem) {
20346722
K
909 /*
910 * In case of failure, free_shared_mem() is called, which
911 * should free any memory that was alloced till the
1da177e4
LT
912 * failure happened.
913 */
914 return -ENOMEM;
915 }
491976b2 916 mem_allocated += size;
1da177e4
LT
917 mac_control->stats_mem_sz = size;
918
919 tmp_v_addr = mac_control->stats_mem;
d44570e4 920 mac_control->stats_info = (struct stat_block *)tmp_v_addr;
1da177e4 921 memset(tmp_v_addr, 0, size);
3a22813a
BL
922 DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
923 dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
491976b2 924 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
1da177e4
LT
925 return SUCCESS;
926}
927
20346722
K
928/**
929 * free_shared_mem - Free the allocated Memory
1da177e4
LT
930 * @nic: Device private variable.
931 * Description: This function is to free all memory locations allocated by
932 * the init_shared_mem() function and return it to the kernel.
933 */
934
935static void free_shared_mem(struct s2io_nic *nic)
936{
937 int i, j, blk_cnt, size;
938 void *tmp_v_addr;
939 dma_addr_t tmp_p_addr;
1da177e4 940 int lst_size, lst_per_page;
8910b49f 941 struct net_device *dev;
491976b2 942 int page_num = 0;
ffb5df6c
JP
943 struct config_param *config;
944 struct mac_info *mac_control;
945 struct stat_block *stats;
946 struct swStat *swstats;
1da177e4
LT
947
948 if (!nic)
949 return;
950
8910b49f
MG
951 dev = nic->dev;
952
1da177e4 953 config = &nic->config;
ffb5df6c
JP
954 mac_control = &nic->mac_control;
955 stats = mac_control->stats_info;
956 swstats = &stats->sw_stat;
1da177e4 957
d44570e4 958 lst_size = sizeof(struct TxD) * config->max_txds;
1da177e4
LT
959 lst_per_page = PAGE_SIZE / lst_size;
960
961 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
962 struct fifo_info *fifo = &mac_control->fifos[i];
963 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
964
965 page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
1da177e4
LT
966 for (j = 0; j < page_num; j++) {
967 int mem_blks = (j * lst_per_page);
13d866a9
JP
968 struct list_info_hold *fli;
969
970 if (!fifo->list_info)
6aa20a22 971 return;
13d866a9
JP
972
973 fli = &fifo->list_info[mem_blks];
974 if (!fli->list_virt_addr)
1da177e4
LT
975 break;
976 pci_free_consistent(nic->pdev, PAGE_SIZE,
13d866a9
JP
977 fli->list_virt_addr,
978 fli->list_phy_addr);
ffb5df6c 979 swstats->mem_freed += PAGE_SIZE;
1da177e4 980 }
776bd20f 981 /* If we got a zero DMA address during allocation,
982 * free the page now
983 */
984 if (mac_control->zerodma_virt_addr) {
985 pci_free_consistent(nic->pdev, PAGE_SIZE,
986 mac_control->zerodma_virt_addr,
987 (dma_addr_t)0);
6aa20a22 988 DBG_PRINT(INIT_DBG,
9e39f7c5
JP
989 "%s: Freeing TxDL with zero DMA address. "
990 "Virtual address %p\n",
991 dev->name, mac_control->zerodma_virt_addr);
ffb5df6c 992 swstats->mem_freed += PAGE_SIZE;
776bd20f 993 }
13d866a9 994 kfree(fifo->list_info);
82c2d023 995 swstats->mem_freed += tx_cfg->fifo_len *
d44570e4 996 sizeof(struct list_info_hold);
1da177e4
LT
997 }
998
1da177e4 999 size = SIZE_OF_BLOCK;
1da177e4 1000 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
1001 struct ring_info *ring = &mac_control->rings[i];
1002
1003 blk_cnt = ring->block_count;
1da177e4 1004 for (j = 0; j < blk_cnt; j++) {
13d866a9
JP
1005 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
1006 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
1da177e4
LT
1007 if (tmp_v_addr == NULL)
1008 break;
1009 pci_free_consistent(nic->pdev, size,
1010 tmp_v_addr, tmp_p_addr);
ffb5df6c 1011 swstats->mem_freed += size;
13d866a9 1012 kfree(ring->rx_blocks[j].rxds);
ffb5df6c
JP
1013 swstats->mem_freed += sizeof(struct rxd_info) *
1014 rxd_count[nic->rxd_mode];
1da177e4
LT
1015 }
1016 }
1017
6d517a27 1018 if (nic->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
1019 /* Freeing buffer storage addresses in 2BUFF mode. */
1020 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
1021 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1022 struct ring_info *ring = &mac_control->rings[i];
1023
1024 blk_cnt = rx_cfg->num_rxd /
1025 (rxd_count[nic->rxd_mode] + 1);
da6971d8
AR
1026 for (j = 0; j < blk_cnt; j++) {
1027 int k = 0;
13d866a9 1028 if (!ring->ba[j])
da6971d8
AR
1029 continue;
1030 while (k != rxd_count[nic->rxd_mode]) {
13d866a9 1031 struct buffAdd *ba = &ring->ba[j][k];
da6971d8 1032 kfree(ba->ba_0_org);
ffb5df6c
JP
1033 swstats->mem_freed +=
1034 BUF0_LEN + ALIGN_SIZE;
da6971d8 1035 kfree(ba->ba_1_org);
ffb5df6c
JP
1036 swstats->mem_freed +=
1037 BUF1_LEN + ALIGN_SIZE;
da6971d8
AR
1038 k++;
1039 }
13d866a9 1040 kfree(ring->ba[j]);
ffb5df6c
JP
1041 swstats->mem_freed += sizeof(struct buffAdd) *
1042 (rxd_count[nic->rxd_mode] + 1);
1da177e4 1043 }
13d866a9 1044 kfree(ring->ba);
ffb5df6c
JP
1045 swstats->mem_freed += sizeof(struct buffAdd *) *
1046 blk_cnt;
1da177e4 1047 }
1da177e4 1048 }
1da177e4 1049
2fda096d 1050 for (i = 0; i < nic->config.tx_fifo_num; i++) {
13d866a9
JP
1051 struct fifo_info *fifo = &mac_control->fifos[i];
1052 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1053
1054 if (fifo->ufo_in_band_v) {
ffb5df6c
JP
1055 swstats->mem_freed += tx_cfg->fifo_len *
1056 sizeof(u64);
13d866a9 1057 kfree(fifo->ufo_in_band_v);
2fda096d
SR
1058 }
1059 }
1060
1da177e4 1061 if (mac_control->stats_mem) {
ffb5df6c 1062 swstats->mem_freed += mac_control->stats_mem_sz;
1da177e4
LT
1063 pci_free_consistent(nic->pdev,
1064 mac_control->stats_mem_sz,
1065 mac_control->stats_mem,
1066 mac_control->stats_mem_phy);
491976b2 1067 }
1da177e4
LT
1068}
1069
541ae68f
K
1070/**
1071 * s2io_verify_pci_mode -
1072 */
1073
1ee6dd77 1074static int s2io_verify_pci_mode(struct s2io_nic *nic)
541ae68f 1075{
1ee6dd77 1076 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f
K
1077 register u64 val64 = 0;
1078 int mode;
1079
1080 val64 = readq(&bar0->pci_mode);
1081 mode = (u8)GET_PCI_MODE(val64);
1082
d44570e4 1083 if (val64 & PCI_MODE_UNKNOWN_MODE)
541ae68f
K
1084 return -1; /* Unknown PCI mode */
1085 return mode;
1086}
1087
c92ca04b
AR
1088#define NEC_VENID 0x1033
1089#define NEC_DEVID 0x0125
1090static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1091{
1092 struct pci_dev *tdev = NULL;
26d36b64
AC
1093 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1094 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
7ad62dbc 1095 if (tdev->bus == s2io_pdev->bus->parent) {
26d36b64 1096 pci_dev_put(tdev);
c92ca04b 1097 return 1;
7ad62dbc 1098 }
c92ca04b
AR
1099 }
1100 }
1101 return 0;
1102}
541ae68f 1103
7b32a312 1104static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
541ae68f
K
1105/**
1106 * s2io_print_pci_mode -
1107 */
1ee6dd77 1108static int s2io_print_pci_mode(struct s2io_nic *nic)
541ae68f 1109{
1ee6dd77 1110 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f
K
1111 register u64 val64 = 0;
1112 int mode;
1113 struct config_param *config = &nic->config;
9e39f7c5 1114 const char *pcimode;
541ae68f
K
1115
1116 val64 = readq(&bar0->pci_mode);
1117 mode = (u8)GET_PCI_MODE(val64);
1118
d44570e4 1119 if (val64 & PCI_MODE_UNKNOWN_MODE)
541ae68f
K
1120 return -1; /* Unknown PCI mode */
1121
c92ca04b
AR
1122 config->bus_speed = bus_speed[mode];
1123
1124 if (s2io_on_nec_bridge(nic->pdev)) {
1125 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
d44570e4 1126 nic->dev->name);
c92ca04b
AR
1127 return mode;
1128 }
1129
d44570e4
JP
1130 switch (mode) {
1131 case PCI_MODE_PCI_33:
9e39f7c5 1132 pcimode = "33MHz PCI bus";
d44570e4
JP
1133 break;
1134 case PCI_MODE_PCI_66:
9e39f7c5 1135 pcimode = "66MHz PCI bus";
d44570e4
JP
1136 break;
1137 case PCI_MODE_PCIX_M1_66:
9e39f7c5 1138 pcimode = "66MHz PCIX(M1) bus";
d44570e4
JP
1139 break;
1140 case PCI_MODE_PCIX_M1_100:
9e39f7c5 1141 pcimode = "100MHz PCIX(M1) bus";
d44570e4
JP
1142 break;
1143 case PCI_MODE_PCIX_M1_133:
9e39f7c5 1144 pcimode = "133MHz PCIX(M1) bus";
d44570e4
JP
1145 break;
1146 case PCI_MODE_PCIX_M2_66:
9e39f7c5 1147 pcimode = "133MHz PCIX(M2) bus";
d44570e4
JP
1148 break;
1149 case PCI_MODE_PCIX_M2_100:
9e39f7c5 1150 pcimode = "200MHz PCIX(M2) bus";
d44570e4
JP
1151 break;
1152 case PCI_MODE_PCIX_M2_133:
9e39f7c5 1153 pcimode = "266MHz PCIX(M2) bus";
d44570e4
JP
1154 break;
1155 default:
9e39f7c5
JP
1156 pcimode = "unsupported bus!";
1157 mode = -1;
541ae68f
K
1158 }
1159
9e39f7c5
JP
1160 DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1161 nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1162
541ae68f
K
1163 return mode;
1164}
1165
b7c5678f
RV
1166/**
1167 * init_tti - Initialization transmit traffic interrupt scheme
1168 * @nic: device private variable
1169 * @link: link status (UP/DOWN) used to enable/disable continuous
1170 * transmit interrupts
1171 * Description: The function configures transmit traffic interrupts
1172 * Return Value: SUCCESS on success and
1173 * '-1' on failure
1174 */
1175
0d66afe7 1176static int init_tti(struct s2io_nic *nic, int link)
b7c5678f
RV
1177{
1178 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1179 register u64 val64 = 0;
1180 int i;
ffb5df6c 1181 struct config_param *config = &nic->config;
b7c5678f
RV
1182
1183 for (i = 0; i < config->tx_fifo_num; i++) {
1184 /*
1185 * TTI Initialization. Default Tx timer gets us about
1186 * 250 interrupts per sec. Continuous interrupts are enabled
1187 * by default.
1188 */
1189 if (nic->device_type == XFRAME_II_DEVICE) {
1190 int count = (nic->config.bus_speed * 125)/2;
1191 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1192 } else
1193 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1194
1195 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
d44570e4
JP
1196 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1197 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1198 TTI_DATA1_MEM_TX_TIMER_AC_EN;
ac731ab6
SH
1199 if (i == 0)
1200 if (use_continuous_tx_intrs && (link == LINK_UP))
1201 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
b7c5678f
RV
1202 writeq(val64, &bar0->tti_data1_mem);
1203
ac731ab6
SH
1204 if (nic->config.intr_type == MSI_X) {
1205 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1206 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1207 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1208 TTI_DATA2_MEM_TX_UFC_D(0x300);
1209 } else {
1210 if ((nic->config.tx_steering_type ==
d44570e4
JP
1211 TX_DEFAULT_STEERING) &&
1212 (config->tx_fifo_num > 1) &&
1213 (i >= nic->udp_fifo_idx) &&
1214 (i < (nic->udp_fifo_idx +
1215 nic->total_udp_fifos)))
ac731ab6
SH
1216 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1217 TTI_DATA2_MEM_TX_UFC_B(0x80) |
1218 TTI_DATA2_MEM_TX_UFC_C(0x100) |
1219 TTI_DATA2_MEM_TX_UFC_D(0x120);
1220 else
1221 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1222 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1223 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1224 TTI_DATA2_MEM_TX_UFC_D(0x80);
1225 }
b7c5678f
RV
1226
1227 writeq(val64, &bar0->tti_data2_mem);
1228
d44570e4
JP
1229 val64 = TTI_CMD_MEM_WE |
1230 TTI_CMD_MEM_STROBE_NEW_CMD |
1231 TTI_CMD_MEM_OFFSET(i);
b7c5678f
RV
1232 writeq(val64, &bar0->tti_command_mem);
1233
1234 if (wait_for_cmd_complete(&bar0->tti_command_mem,
d44570e4
JP
1235 TTI_CMD_MEM_STROBE_NEW_CMD,
1236 S2IO_BIT_RESET) != SUCCESS)
b7c5678f
RV
1237 return FAILURE;
1238 }
1239
1240 return SUCCESS;
1241}
1242
20346722
K
1243/**
1244 * init_nic - Initialization of hardware
b7c5678f 1245 * @nic: device private variable
20346722
K
1246 * Description: The function sequentially configures every block
1247 * of the H/W from their reset values.
1248 * Return Value: SUCCESS on success and
1da177e4
LT
1249 * '-1' on failure (endian settings incorrect).
1250 */
1251
1252static int init_nic(struct s2io_nic *nic)
1253{
1ee6dd77 1254 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
1255 struct net_device *dev = nic->dev;
1256 register u64 val64 = 0;
1257 void __iomem *add;
1258 u32 time;
1259 int i, j;
c92ca04b 1260 int dtx_cnt = 0;
1da177e4 1261 unsigned long long mem_share;
20346722 1262 int mem_size;
ffb5df6c
JP
1263 struct config_param *config = &nic->config;
1264 struct mac_info *mac_control = &nic->mac_control;
1da177e4 1265
5e25b9dd 1266 /* to set the swapper controle on the card */
d44570e4
JP
1267 if (s2io_set_swapper(nic)) {
1268 DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
9f74ffde 1269 return -EIO;
1da177e4
LT
1270 }
1271
541ae68f
K
1272 /*
1273 * Herc requires EOI to be removed from reset before XGXS, so..
1274 */
1275 if (nic->device_type & XFRAME_II_DEVICE) {
1276 val64 = 0xA500000000ULL;
1277 writeq(val64, &bar0->sw_reset);
1278 msleep(500);
1279 val64 = readq(&bar0->sw_reset);
1280 }
1281
1da177e4
LT
1282 /* Remove XGXS from reset state */
1283 val64 = 0;
1284 writeq(val64, &bar0->sw_reset);
1da177e4 1285 msleep(500);
20346722 1286 val64 = readq(&bar0->sw_reset);
1da177e4 1287
7962024e
SH
1288 /* Ensure that it's safe to access registers by checking
1289 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1290 */
1291 if (nic->device_type == XFRAME_II_DEVICE) {
1292 for (i = 0; i < 50; i++) {
1293 val64 = readq(&bar0->adapter_status);
1294 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1295 break;
1296 msleep(10);
1297 }
1298 if (i == 50)
1299 return -ENODEV;
1300 }
1301
1da177e4
LT
1302 /* Enable Receiving broadcasts */
1303 add = &bar0->mac_cfg;
1304 val64 = readq(&bar0->mac_cfg);
1305 val64 |= MAC_RMAC_BCAST_ENABLE;
1306 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
d44570e4 1307 writel((u32)val64, add);
1da177e4
LT
1308 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1309 writel((u32) (val64 >> 32), (add + 4));
1310
1311 /* Read registers in all blocks */
1312 val64 = readq(&bar0->mac_int_mask);
1313 val64 = readq(&bar0->mc_int_mask);
1314 val64 = readq(&bar0->xgxs_int_mask);
1315
1316 /* Set MTU */
1317 val64 = dev->mtu;
1318 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1319
541ae68f
K
1320 if (nic->device_type & XFRAME_II_DEVICE) {
1321 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
303bcb4b 1322 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1da177e4 1323 &bar0->dtx_control, UF);
541ae68f
K
1324 if (dtx_cnt & 0x1)
1325 msleep(1); /* Necessary!! */
1da177e4
LT
1326 dtx_cnt++;
1327 }
541ae68f 1328 } else {
c92ca04b
AR
1329 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1330 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1331 &bar0->dtx_control, UF);
1332 val64 = readq(&bar0->dtx_control);
1333 dtx_cnt++;
1da177e4
LT
1334 }
1335 }
1336
1337 /* Tx DMA Initialization */
1338 val64 = 0;
1339 writeq(val64, &bar0->tx_fifo_partition_0);
1340 writeq(val64, &bar0->tx_fifo_partition_1);
1341 writeq(val64, &bar0->tx_fifo_partition_2);
1342 writeq(val64, &bar0->tx_fifo_partition_3);
1343
1da177e4 1344 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
1345 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1346
1347 val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1348 vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1da177e4
LT
1349
1350 if (i == (config->tx_fifo_num - 1)) {
1351 if (i % 2 == 0)
1352 i++;
1353 }
1354
1355 switch (i) {
1356 case 1:
1357 writeq(val64, &bar0->tx_fifo_partition_0);
1358 val64 = 0;
b7c5678f 1359 j = 0;
1da177e4
LT
1360 break;
1361 case 3:
1362 writeq(val64, &bar0->tx_fifo_partition_1);
1363 val64 = 0;
b7c5678f 1364 j = 0;
1da177e4
LT
1365 break;
1366 case 5:
1367 writeq(val64, &bar0->tx_fifo_partition_2);
1368 val64 = 0;
b7c5678f 1369 j = 0;
1da177e4
LT
1370 break;
1371 case 7:
1372 writeq(val64, &bar0->tx_fifo_partition_3);
b7c5678f
RV
1373 val64 = 0;
1374 j = 0;
1375 break;
1376 default:
1377 j++;
1da177e4
LT
1378 break;
1379 }
1380 }
1381
5e25b9dd
K
1382 /*
1383 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1384 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1385 */
d44570e4 1386 if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
5e25b9dd
K
1387 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1388
1da177e4
LT
1389 val64 = readq(&bar0->tx_fifo_partition_0);
1390 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
d44570e4 1391 &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1da177e4 1392
20346722
K
1393 /*
1394 * Initialization of Tx_PA_CONFIG register to ignore packet
1da177e4
LT
1395 * integrity checking.
1396 */
1397 val64 = readq(&bar0->tx_pa_cfg);
d44570e4
JP
1398 val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1399 TX_PA_CFG_IGNORE_SNAP_OUI |
1400 TX_PA_CFG_IGNORE_LLC_CTRL |
1401 TX_PA_CFG_IGNORE_L2_ERR;
1da177e4
LT
1402 writeq(val64, &bar0->tx_pa_cfg);
1403
1404 /* Rx DMA intialization. */
1405 val64 = 0;
1406 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
1407 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1408
1409 val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1da177e4
LT
1410 }
1411 writeq(val64, &bar0->rx_queue_priority);
1412
20346722
K
1413 /*
1414 * Allocating equal share of memory to all the
1da177e4
LT
1415 * configured Rings.
1416 */
1417 val64 = 0;
541ae68f
K
1418 if (nic->device_type & XFRAME_II_DEVICE)
1419 mem_size = 32;
1420 else
1421 mem_size = 64;
1422
1da177e4
LT
1423 for (i = 0; i < config->rx_ring_num; i++) {
1424 switch (i) {
1425 case 0:
20346722
K
1426 mem_share = (mem_size / config->rx_ring_num +
1427 mem_size % config->rx_ring_num);
1da177e4
LT
1428 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1429 continue;
1430 case 1:
20346722 1431 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1432 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1433 continue;
1434 case 2:
20346722 1435 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1436 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1437 continue;
1438 case 3:
20346722 1439 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1440 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1441 continue;
1442 case 4:
20346722 1443 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1444 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1445 continue;
1446 case 5:
20346722 1447 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1448 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1449 continue;
1450 case 6:
20346722 1451 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1452 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1453 continue;
1454 case 7:
20346722 1455 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1456 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1457 continue;
1458 }
1459 }
1460 writeq(val64, &bar0->rx_queue_cfg);
1461
20346722 1462 /*
5e25b9dd 1463 * Filling Tx round robin registers
b7c5678f 1464 * as per the number of FIFOs for equal scheduling priority
1da177e4 1465 */
5e25b9dd
K
1466 switch (config->tx_fifo_num) {
1467 case 1:
b7c5678f 1468 val64 = 0x0;
5e25b9dd
K
1469 writeq(val64, &bar0->tx_w_round_robin_0);
1470 writeq(val64, &bar0->tx_w_round_robin_1);
1471 writeq(val64, &bar0->tx_w_round_robin_2);
1472 writeq(val64, &bar0->tx_w_round_robin_3);
1473 writeq(val64, &bar0->tx_w_round_robin_4);
1474 break;
1475 case 2:
b7c5678f 1476 val64 = 0x0001000100010001ULL;
5e25b9dd 1477 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1478 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1479 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1480 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1481 val64 = 0x0001000100000000ULL;
5e25b9dd
K
1482 writeq(val64, &bar0->tx_w_round_robin_4);
1483 break;
1484 case 3:
b7c5678f 1485 val64 = 0x0001020001020001ULL;
5e25b9dd 1486 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1487 val64 = 0x0200010200010200ULL;
5e25b9dd 1488 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1489 val64 = 0x0102000102000102ULL;
5e25b9dd 1490 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1491 val64 = 0x0001020001020001ULL;
5e25b9dd 1492 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1493 val64 = 0x0200010200000000ULL;
5e25b9dd
K
1494 writeq(val64, &bar0->tx_w_round_robin_4);
1495 break;
1496 case 4:
b7c5678f 1497 val64 = 0x0001020300010203ULL;
5e25b9dd 1498 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1499 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1500 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1501 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1502 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1503 writeq(val64, &bar0->tx_w_round_robin_4);
1504 break;
1505 case 5:
b7c5678f 1506 val64 = 0x0001020304000102ULL;
5e25b9dd 1507 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1508 val64 = 0x0304000102030400ULL;
5e25b9dd 1509 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1510 val64 = 0x0102030400010203ULL;
5e25b9dd 1511 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1512 val64 = 0x0400010203040001ULL;
5e25b9dd 1513 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1514 val64 = 0x0203040000000000ULL;
5e25b9dd
K
1515 writeq(val64, &bar0->tx_w_round_robin_4);
1516 break;
1517 case 6:
b7c5678f 1518 val64 = 0x0001020304050001ULL;
5e25b9dd 1519 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1520 val64 = 0x0203040500010203ULL;
5e25b9dd 1521 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1522 val64 = 0x0405000102030405ULL;
5e25b9dd 1523 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1524 val64 = 0x0001020304050001ULL;
5e25b9dd 1525 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1526 val64 = 0x0203040500000000ULL;
5e25b9dd
K
1527 writeq(val64, &bar0->tx_w_round_robin_4);
1528 break;
1529 case 7:
b7c5678f 1530 val64 = 0x0001020304050600ULL;
5e25b9dd 1531 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1532 val64 = 0x0102030405060001ULL;
5e25b9dd 1533 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1534 val64 = 0x0203040506000102ULL;
5e25b9dd 1535 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1536 val64 = 0x0304050600010203ULL;
5e25b9dd 1537 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1538 val64 = 0x0405060000000000ULL;
5e25b9dd
K
1539 writeq(val64, &bar0->tx_w_round_robin_4);
1540 break;
1541 case 8:
b7c5678f 1542 val64 = 0x0001020304050607ULL;
5e25b9dd 1543 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1544 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1545 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1546 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1547 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1548 writeq(val64, &bar0->tx_w_round_robin_4);
1549 break;
1550 }
1551
b41477f3 1552 /* Enable all configured Tx FIFO partitions */
5d3213cc
AR
1553 val64 = readq(&bar0->tx_fifo_partition_0);
1554 val64 |= (TX_FIFO_PARTITION_EN);
1555 writeq(val64, &bar0->tx_fifo_partition_0);
1556
5e25b9dd 1557 /* Filling the Rx round robin registers as per the
0425b46a
SH
1558 * number of Rings and steering based on QoS with
1559 * equal priority.
1560 */
5e25b9dd
K
1561 switch (config->rx_ring_num) {
1562 case 1:
0425b46a
SH
1563 val64 = 0x0;
1564 writeq(val64, &bar0->rx_w_round_robin_0);
1565 writeq(val64, &bar0->rx_w_round_robin_1);
1566 writeq(val64, &bar0->rx_w_round_robin_2);
1567 writeq(val64, &bar0->rx_w_round_robin_3);
1568 writeq(val64, &bar0->rx_w_round_robin_4);
1569
5e25b9dd
K
1570 val64 = 0x8080808080808080ULL;
1571 writeq(val64, &bar0->rts_qos_steering);
1572 break;
1573 case 2:
0425b46a 1574 val64 = 0x0001000100010001ULL;
5e25b9dd 1575 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1576 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1577 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1578 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1579 val64 = 0x0001000100000000ULL;
5e25b9dd
K
1580 writeq(val64, &bar0->rx_w_round_robin_4);
1581
1582 val64 = 0x8080808040404040ULL;
1583 writeq(val64, &bar0->rts_qos_steering);
1584 break;
1585 case 3:
0425b46a 1586 val64 = 0x0001020001020001ULL;
5e25b9dd 1587 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1588 val64 = 0x0200010200010200ULL;
5e25b9dd 1589 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1590 val64 = 0x0102000102000102ULL;
5e25b9dd 1591 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1592 val64 = 0x0001020001020001ULL;
5e25b9dd 1593 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1594 val64 = 0x0200010200000000ULL;
5e25b9dd
K
1595 writeq(val64, &bar0->rx_w_round_robin_4);
1596
1597 val64 = 0x8080804040402020ULL;
1598 writeq(val64, &bar0->rts_qos_steering);
1599 break;
1600 case 4:
0425b46a 1601 val64 = 0x0001020300010203ULL;
5e25b9dd 1602 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1603 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1604 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1605 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1606 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1607 writeq(val64, &bar0->rx_w_round_robin_4);
1608
1609 val64 = 0x8080404020201010ULL;
1610 writeq(val64, &bar0->rts_qos_steering);
1611 break;
1612 case 5:
0425b46a 1613 val64 = 0x0001020304000102ULL;
5e25b9dd 1614 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1615 val64 = 0x0304000102030400ULL;
5e25b9dd 1616 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1617 val64 = 0x0102030400010203ULL;
5e25b9dd 1618 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1619 val64 = 0x0400010203040001ULL;
5e25b9dd 1620 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1621 val64 = 0x0203040000000000ULL;
5e25b9dd
K
1622 writeq(val64, &bar0->rx_w_round_robin_4);
1623
1624 val64 = 0x8080404020201008ULL;
1625 writeq(val64, &bar0->rts_qos_steering);
1626 break;
1627 case 6:
0425b46a 1628 val64 = 0x0001020304050001ULL;
5e25b9dd 1629 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1630 val64 = 0x0203040500010203ULL;
5e25b9dd 1631 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1632 val64 = 0x0405000102030405ULL;
5e25b9dd 1633 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1634 val64 = 0x0001020304050001ULL;
5e25b9dd 1635 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1636 val64 = 0x0203040500000000ULL;
5e25b9dd
K
1637 writeq(val64, &bar0->rx_w_round_robin_4);
1638
1639 val64 = 0x8080404020100804ULL;
1640 writeq(val64, &bar0->rts_qos_steering);
1641 break;
1642 case 7:
0425b46a 1643 val64 = 0x0001020304050600ULL;
5e25b9dd 1644 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1645 val64 = 0x0102030405060001ULL;
5e25b9dd 1646 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1647 val64 = 0x0203040506000102ULL;
5e25b9dd 1648 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1649 val64 = 0x0304050600010203ULL;
5e25b9dd 1650 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1651 val64 = 0x0405060000000000ULL;
5e25b9dd
K
1652 writeq(val64, &bar0->rx_w_round_robin_4);
1653
1654 val64 = 0x8080402010080402ULL;
1655 writeq(val64, &bar0->rts_qos_steering);
1656 break;
1657 case 8:
0425b46a 1658 val64 = 0x0001020304050607ULL;
5e25b9dd 1659 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1660 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1661 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1662 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1663 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1664 writeq(val64, &bar0->rx_w_round_robin_4);
1665
1666 val64 = 0x8040201008040201ULL;
1667 writeq(val64, &bar0->rts_qos_steering);
1668 break;
1669 }
1da177e4
LT
1670
1671 /* UDP Fix */
1672 val64 = 0;
20346722 1673 for (i = 0; i < 8; i++)
1da177e4
LT
1674 writeq(val64, &bar0->rts_frm_len_n[i]);
1675
5e25b9dd
K
1676 /* Set the default rts frame length for the rings configured */
1677 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1678 for (i = 0 ; i < config->rx_ring_num ; i++)
1679 writeq(val64, &bar0->rts_frm_len_n[i]);
1680
1681 /* Set the frame length for the configured rings
1682 * desired by the user
1683 */
1684 for (i = 0; i < config->rx_ring_num; i++) {
1685 /* If rts_frm_len[i] == 0 then it is assumed that user not
1686 * specified frame length steering.
1687 * If the user provides the frame length then program
1688 * the rts_frm_len register for those values or else
1689 * leave it as it is.
1690 */
1691 if (rts_frm_len[i] != 0) {
1692 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
d44570e4 1693 &bar0->rts_frm_len_n[i]);
5e25b9dd
K
1694 }
1695 }
8a4bdbaa 1696
9fc93a41
SS
1697 /* Disable differentiated services steering logic */
1698 for (i = 0; i < 64; i++) {
1699 if (rts_ds_steer(nic, i, 0) == FAILURE) {
9e39f7c5
JP
1700 DBG_PRINT(ERR_DBG,
1701 "%s: rts_ds_steer failed on codepoint %d\n",
1702 dev->name, i);
9f74ffde 1703 return -ENODEV;
9fc93a41
SS
1704 }
1705 }
1706
20346722 1707 /* Program statistics memory */
1da177e4 1708 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1da177e4 1709
541ae68f
K
1710 if (nic->device_type == XFRAME_II_DEVICE) {
1711 val64 = STAT_BC(0x320);
1712 writeq(val64, &bar0->stat_byte_cnt);
1713 }
1714
20346722 1715 /*
1da177e4
LT
1716 * Initializing the sampling rate for the device to calculate the
1717 * bandwidth utilization.
1718 */
1719 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
d44570e4 1720 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1da177e4
LT
1721 writeq(val64, &bar0->mac_link_util);
1722
20346722
K
1723 /*
1724 * Initializing the Transmit and Receive Traffic Interrupt
1da177e4
LT
1725 * Scheme.
1726 */
1da177e4 1727
b7c5678f
RV
1728 /* Initialize TTI */
1729 if (SUCCESS != init_tti(nic, nic->last_link_state))
1730 return -ENODEV;
1da177e4 1731
8a4bdbaa
SS
1732 /* RTI Initialization */
1733 if (nic->device_type == XFRAME_II_DEVICE) {
541ae68f 1734 /*
8a4bdbaa
SS
1735 * Programmed to generate Apprx 500 Intrs per
1736 * second
1737 */
1738 int count = (nic->config.bus_speed * 125)/4;
1739 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1740 } else
1741 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1742 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
d44570e4
JP
1743 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1744 RTI_DATA1_MEM_RX_URNG_C(0x30) |
1745 RTI_DATA1_MEM_RX_TIMER_AC_EN;
8a4bdbaa
SS
1746
1747 writeq(val64, &bar0->rti_data1_mem);
1748
1749 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1750 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1751 if (nic->config.intr_type == MSI_X)
d44570e4
JP
1752 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1753 RTI_DATA2_MEM_RX_UFC_D(0x40));
8a4bdbaa 1754 else
d44570e4
JP
1755 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1756 RTI_DATA2_MEM_RX_UFC_D(0x80));
8a4bdbaa 1757 writeq(val64, &bar0->rti_data2_mem);
1da177e4 1758
8a4bdbaa 1759 for (i = 0; i < config->rx_ring_num; i++) {
d44570e4
JP
1760 val64 = RTI_CMD_MEM_WE |
1761 RTI_CMD_MEM_STROBE_NEW_CMD |
1762 RTI_CMD_MEM_OFFSET(i);
8a4bdbaa 1763 writeq(val64, &bar0->rti_command_mem);
1da177e4 1764
8a4bdbaa
SS
1765 /*
1766 * Once the operation completes, the Strobe bit of the
1767 * command register will be reset. We poll for this
1768 * particular condition. We wait for a maximum of 500ms
1769 * for the operation to complete, if it's not complete
1770 * by then we return error.
1771 */
1772 time = 0;
f957bcf0 1773 while (true) {
8a4bdbaa
SS
1774 val64 = readq(&bar0->rti_command_mem);
1775 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1776 break;
b6e3f982 1777
8a4bdbaa 1778 if (time > 10) {
9e39f7c5 1779 DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
8a4bdbaa 1780 dev->name);
9f74ffde 1781 return -ENODEV;
b6e3f982 1782 }
8a4bdbaa
SS
1783 time++;
1784 msleep(50);
1da177e4 1785 }
1da177e4
LT
1786 }
1787
20346722
K
1788 /*
1789 * Initializing proper values as Pause threshold into all
1da177e4
LT
1790 * the 8 Queues on Rx side.
1791 */
1792 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1793 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1794
1795 /* Disable RMAC PAD STRIPPING */
509a2671 1796 add = &bar0->mac_cfg;
1da177e4
LT
1797 val64 = readq(&bar0->mac_cfg);
1798 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1799 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1800 writel((u32) (val64), add);
1801 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1802 writel((u32) (val64 >> 32), (add + 4));
1803 val64 = readq(&bar0->mac_cfg);
1804
7d3d0439
RA
1805 /* Enable FCS stripping by adapter */
1806 add = &bar0->mac_cfg;
1807 val64 = readq(&bar0->mac_cfg);
1808 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1809 if (nic->device_type == XFRAME_II_DEVICE)
1810 writeq(val64, &bar0->mac_cfg);
1811 else {
1812 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1813 writel((u32) (val64), add);
1814 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1815 writel((u32) (val64 >> 32), (add + 4));
1816 }
1817
20346722
K
1818 /*
1819 * Set the time value to be inserted in the pause frame
1da177e4
LT
1820 * generated by xena.
1821 */
1822 val64 = readq(&bar0->rmac_pause_cfg);
1823 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1824 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1825 writeq(val64, &bar0->rmac_pause_cfg);
1826
20346722 1827 /*
1da177e4
LT
1828 * Set the Threshold Limit for Generating the pause frame
1829 * If the amount of data in any Queue exceeds ratio of
1830 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1831 * pause frame is generated
1832 */
1833 val64 = 0;
1834 for (i = 0; i < 4; i++) {
d44570e4
JP
1835 val64 |= (((u64)0xFF00 |
1836 nic->mac_control.mc_pause_threshold_q0q3)
1837 << (i * 2 * 8));
1da177e4
LT
1838 }
1839 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1840
1841 val64 = 0;
1842 for (i = 0; i < 4; i++) {
d44570e4
JP
1843 val64 |= (((u64)0xFF00 |
1844 nic->mac_control.mc_pause_threshold_q4q7)
1845 << (i * 2 * 8));
1da177e4
LT
1846 }
1847 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1848
20346722
K
1849 /*
1850 * TxDMA will stop Read request if the number of read split has
1da177e4
LT
1851 * exceeded the limit pointed by shared_splits
1852 */
1853 val64 = readq(&bar0->pic_control);
1854 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1855 writeq(val64, &bar0->pic_control);
1856
863c11a9
AR
1857 if (nic->config.bus_speed == 266) {
1858 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1859 writeq(0x0, &bar0->read_retry_delay);
1860 writeq(0x0, &bar0->write_retry_delay);
1861 }
1862
541ae68f
K
1863 /*
1864 * Programming the Herc to split every write transaction
1865 * that does not start on an ADB to reduce disconnects.
1866 */
1867 if (nic->device_type == XFRAME_II_DEVICE) {
19a60522
SS
1868 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1869 MISC_LINK_STABILITY_PRD(3);
863c11a9
AR
1870 writeq(val64, &bar0->misc_control);
1871 val64 = readq(&bar0->pic_control2);
b7b5a128 1872 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
863c11a9 1873 writeq(val64, &bar0->pic_control2);
541ae68f 1874 }
c92ca04b
AR
1875 if (strstr(nic->product_name, "CX4")) {
1876 val64 = TMAC_AVG_IPG(0x17);
1877 writeq(val64, &bar0->tmac_avg_ipg);
a371a07d
K
1878 }
1879
1da177e4
LT
1880 return SUCCESS;
1881}
a371a07d
K
1882#define LINK_UP_DOWN_INTERRUPT 1
1883#define MAC_RMAC_ERR_TIMER 2
1884
1ee6dd77 1885static int s2io_link_fault_indication(struct s2io_nic *nic)
a371a07d
K
1886{
1887 if (nic->device_type == XFRAME_II_DEVICE)
1888 return LINK_UP_DOWN_INTERRUPT;
1889 else
1890 return MAC_RMAC_ERR_TIMER;
1891}
8116f3cf 1892
9caab458
SS
1893/**
1894 * do_s2io_write_bits - update alarm bits in alarm register
1895 * @value: alarm bits
1896 * @flag: interrupt status
1897 * @addr: address value
1898 * Description: update alarm bits in alarm register
1899 * Return Value:
1900 * NONE.
1901 */
1902static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1903{
1904 u64 temp64;
1905
1906 temp64 = readq(addr);
1907
d44570e4
JP
1908 if (flag == ENABLE_INTRS)
1909 temp64 &= ~((u64)value);
9caab458 1910 else
d44570e4 1911 temp64 |= ((u64)value);
9caab458
SS
1912 writeq(temp64, addr);
1913}
1da177e4 1914
43b7c451 1915static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
9caab458
SS
1916{
1917 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1918 register u64 gen_int_mask = 0;
01e16faa 1919 u64 interruptible;
9caab458 1920
01e16faa 1921 writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
9caab458 1922 if (mask & TX_DMA_INTR) {
9caab458
SS
1923 gen_int_mask |= TXDMA_INT_M;
1924
1925 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
d44570e4
JP
1926 TXDMA_PCC_INT | TXDMA_TTI_INT |
1927 TXDMA_LSO_INT | TXDMA_TPA_INT |
1928 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
9caab458
SS
1929
1930 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
d44570e4
JP
1931 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1932 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1933 &bar0->pfc_err_mask);
9caab458
SS
1934
1935 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
d44570e4
JP
1936 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1937 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
9caab458
SS
1938
1939 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
d44570e4
JP
1940 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1941 PCC_N_SERR | PCC_6_COF_OV_ERR |
1942 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1943 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1944 PCC_TXB_ECC_SG_ERR,
1945 flag, &bar0->pcc_err_mask);
9caab458
SS
1946
1947 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
d44570e4 1948 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
9caab458
SS
1949
1950 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
d44570e4
JP
1951 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1952 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1953 flag, &bar0->lso_err_mask);
9caab458
SS
1954
1955 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
d44570e4 1956 flag, &bar0->tpa_err_mask);
9caab458
SS
1957
1958 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
9caab458
SS
1959 }
1960
1961 if (mask & TX_MAC_INTR) {
1962 gen_int_mask |= TXMAC_INT_M;
1963 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
d44570e4 1964 &bar0->mac_int_mask);
9caab458 1965 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
d44570e4
JP
1966 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1967 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1968 flag, &bar0->mac_tmac_err_mask);
9caab458
SS
1969 }
1970
1971 if (mask & TX_XGXS_INTR) {
1972 gen_int_mask |= TXXGXS_INT_M;
1973 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
d44570e4 1974 &bar0->xgxs_int_mask);
9caab458 1975 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
d44570e4
JP
1976 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1977 flag, &bar0->xgxs_txgxs_err_mask);
9caab458
SS
1978 }
1979
1980 if (mask & RX_DMA_INTR) {
1981 gen_int_mask |= RXDMA_INT_M;
1982 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
d44570e4
JP
1983 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1984 flag, &bar0->rxdma_int_mask);
9caab458 1985 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
d44570e4
JP
1986 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1987 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1988 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
9caab458 1989 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
d44570e4
JP
1990 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1991 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1992 &bar0->prc_pcix_err_mask);
9caab458 1993 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
d44570e4
JP
1994 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1995 &bar0->rpa_err_mask);
9caab458 1996 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
d44570e4
JP
1997 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1998 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1999 RDA_FRM_ECC_SG_ERR |
2000 RDA_MISC_ERR|RDA_PCIX_ERR,
2001 flag, &bar0->rda_err_mask);
9caab458 2002 do_s2io_write_bits(RTI_SM_ERR_ALARM |
d44570e4
JP
2003 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
2004 flag, &bar0->rti_err_mask);
9caab458
SS
2005 }
2006
2007 if (mask & RX_MAC_INTR) {
2008 gen_int_mask |= RXMAC_INT_M;
2009 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
d44570e4
JP
2010 &bar0->mac_int_mask);
2011 interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2012 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2013 RMAC_DOUBLE_ECC_ERR);
01e16faa
SH
2014 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
2015 interruptible |= RMAC_LINK_STATE_CHANGE_INT;
2016 do_s2io_write_bits(interruptible,
d44570e4 2017 flag, &bar0->mac_rmac_err_mask);
9caab458
SS
2018 }
2019
d44570e4 2020 if (mask & RX_XGXS_INTR) {
9caab458
SS
2021 gen_int_mask |= RXXGXS_INT_M;
2022 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
d44570e4 2023 &bar0->xgxs_int_mask);
9caab458 2024 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
d44570e4 2025 &bar0->xgxs_rxgxs_err_mask);
9caab458
SS
2026 }
2027
2028 if (mask & MC_INTR) {
2029 gen_int_mask |= MC_INT_M;
d44570e4
JP
2030 do_s2io_write_bits(MC_INT_MASK_MC_INT,
2031 flag, &bar0->mc_int_mask);
9caab458 2032 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
d44570e4
JP
2033 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2034 &bar0->mc_err_mask);
9caab458
SS
2035 }
2036 nic->general_int_mask = gen_int_mask;
2037
2038 /* Remove this line when alarm interrupts are enabled */
2039 nic->general_int_mask = 0;
2040}
d44570e4 2041
20346722
K
2042/**
2043 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1da177e4
LT
2044 * @nic: device private variable,
2045 * @mask: A mask indicating which Intr block must be modified and,
2046 * @flag: A flag indicating whether to enable or disable the Intrs.
2047 * Description: This function will either disable or enable the interrupts
20346722
K
2048 * depending on the flag argument. The mask argument can be used to
2049 * enable/disable any Intr block.
1da177e4
LT
2050 * Return Value: NONE.
2051 */
2052
2053static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2054{
1ee6dd77 2055 struct XENA_dev_config __iomem *bar0 = nic->bar0;
9caab458
SS
2056 register u64 temp64 = 0, intr_mask = 0;
2057
2058 intr_mask = nic->general_int_mask;
1da177e4
LT
2059
2060 /* Top level interrupt classification */
2061 /* PIC Interrupts */
9caab458 2062 if (mask & TX_PIC_INTR) {
1da177e4 2063 /* Enable PIC Intrs in the general intr mask register */
9caab458 2064 intr_mask |= TXPIC_INT_M;
1da177e4 2065 if (flag == ENABLE_INTRS) {
20346722 2066 /*
a371a07d 2067 * If Hercules adapter enable GPIO otherwise
b41477f3 2068 * disable all PCIX, Flash, MDIO, IIC and GPIO
20346722
K
2069 * interrupts for now.
2070 * TODO
1da177e4 2071 */
a371a07d 2072 if (s2io_link_fault_indication(nic) ==
d44570e4 2073 LINK_UP_DOWN_INTERRUPT) {
9caab458 2074 do_s2io_write_bits(PIC_INT_GPIO, flag,
d44570e4 2075 &bar0->pic_int_mask);
9caab458 2076 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
d44570e4 2077 &bar0->gpio_int_mask);
9caab458 2078 } else
a371a07d 2079 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1da177e4 2080 } else if (flag == DISABLE_INTRS) {
20346722
K
2081 /*
2082 * Disable PIC Intrs in the general
2083 * intr mask register
1da177e4
LT
2084 */
2085 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1da177e4
LT
2086 }
2087 }
2088
1da177e4
LT
2089 /* Tx traffic interrupts */
2090 if (mask & TX_TRAFFIC_INTR) {
9caab458 2091 intr_mask |= TXTRAFFIC_INT_M;
1da177e4 2092 if (flag == ENABLE_INTRS) {
20346722 2093 /*
1da177e4 2094 * Enable all the Tx side interrupts
20346722 2095 * writing 0 Enables all 64 TX interrupt levels
1da177e4
LT
2096 */
2097 writeq(0x0, &bar0->tx_traffic_mask);
2098 } else if (flag == DISABLE_INTRS) {
20346722
K
2099 /*
2100 * Disable Tx Traffic Intrs in the general intr mask
1da177e4
LT
2101 * register.
2102 */
2103 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1da177e4
LT
2104 }
2105 }
2106
2107 /* Rx traffic interrupts */
2108 if (mask & RX_TRAFFIC_INTR) {
9caab458 2109 intr_mask |= RXTRAFFIC_INT_M;
1da177e4 2110 if (flag == ENABLE_INTRS) {
1da177e4
LT
2111 /* writing 0 Enables all 8 RX interrupt levels */
2112 writeq(0x0, &bar0->rx_traffic_mask);
2113 } else if (flag == DISABLE_INTRS) {
20346722
K
2114 /*
2115 * Disable Rx Traffic Intrs in the general intr mask
1da177e4
LT
2116 * register.
2117 */
2118 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1da177e4
LT
2119 }
2120 }
9caab458
SS
2121
2122 temp64 = readq(&bar0->general_int_mask);
2123 if (flag == ENABLE_INTRS)
d44570e4 2124 temp64 &= ~((u64)intr_mask);
9caab458
SS
2125 else
2126 temp64 = DISABLE_ALL_INTRS;
2127 writeq(temp64, &bar0->general_int_mask);
2128
2129 nic->general_int_mask = readq(&bar0->general_int_mask);
1da177e4
LT
2130}
2131
19a60522
SS
2132/**
2133 * verify_pcc_quiescent- Checks for PCC quiescent state
2134 * Return: 1 If PCC is quiescence
2135 * 0 If PCC is not quiescence
2136 */
1ee6dd77 2137static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
20346722 2138{
19a60522 2139 int ret = 0, herc;
1ee6dd77 2140 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522 2141 u64 val64 = readq(&bar0->adapter_status);
8a4bdbaa 2142
19a60522 2143 herc = (sp->device_type == XFRAME_II_DEVICE);
20346722 2144
f957bcf0 2145 if (flag == false) {
44c10138 2146 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
19a60522 2147 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 2148 ret = 1;
19a60522
SS
2149 } else {
2150 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 2151 ret = 1;
20346722
K
2152 }
2153 } else {
44c10138 2154 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
5e25b9dd 2155 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
19a60522 2156 ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 2157 ret = 1;
5e25b9dd
K
2158 } else {
2159 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
19a60522 2160 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 2161 ret = 1;
20346722
K
2162 }
2163 }
2164
2165 return ret;
2166}
2167/**
2168 * verify_xena_quiescence - Checks whether the H/W is ready
1da177e4 2169 * Description: Returns whether the H/W is ready to go or not. Depending
20346722 2170 * on whether adapter enable bit was written or not the comparison
1da177e4
LT
2171 * differs and the calling function passes the input argument flag to
2172 * indicate this.
20346722 2173 * Return: 1 If xena is quiescence
1da177e4
LT
2174 * 0 If Xena is not quiescence
2175 */
2176
1ee6dd77 2177static int verify_xena_quiescence(struct s2io_nic *sp)
1da177e4 2178{
19a60522 2179 int mode;
1ee6dd77 2180 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522
SS
2181 u64 val64 = readq(&bar0->adapter_status);
2182 mode = s2io_verify_pci_mode(sp);
1da177e4 2183
19a60522 2184 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
9e39f7c5 2185 DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
19a60522
SS
2186 return 0;
2187 }
2188 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
9e39f7c5 2189 DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
19a60522
SS
2190 return 0;
2191 }
2192 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
9e39f7c5 2193 DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
19a60522
SS
2194 return 0;
2195 }
2196 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
9e39f7c5 2197 DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
19a60522
SS
2198 return 0;
2199 }
2200 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
9e39f7c5 2201 DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
19a60522
SS
2202 return 0;
2203 }
2204 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
9e39f7c5 2205 DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
19a60522
SS
2206 return 0;
2207 }
2208 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
9e39f7c5 2209 DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
19a60522
SS
2210 return 0;
2211 }
2212 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
9e39f7c5 2213 DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
19a60522 2214 return 0;
1da177e4
LT
2215 }
2216
19a60522
SS
2217 /*
2218 * In PCI 33 mode, the P_PLL is not used, and therefore,
2219 * the the P_PLL_LOCK bit in the adapter_status register will
2220 * not be asserted.
2221 */
2222 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
d44570e4
JP
2223 sp->device_type == XFRAME_II_DEVICE &&
2224 mode != PCI_MODE_PCI_33) {
9e39f7c5 2225 DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
19a60522
SS
2226 return 0;
2227 }
2228 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
d44570e4 2229 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
9e39f7c5 2230 DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
19a60522
SS
2231 return 0;
2232 }
2233 return 1;
1da177e4
LT
2234}
2235
2236/**
2237 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2238 * @sp: Pointer to device specifc structure
20346722 2239 * Description :
1da177e4
LT
2240 * New procedure to clear mac address reading problems on Alpha platforms
2241 *
2242 */
2243
d44570e4 2244static void fix_mac_address(struct s2io_nic *sp)
1da177e4 2245{
1ee6dd77 2246 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
2247 u64 val64;
2248 int i = 0;
2249
2250 while (fix_mac[i] != END_SIGN) {
2251 writeq(fix_mac[i++], &bar0->gpio_control);
20346722 2252 udelay(10);
1da177e4
LT
2253 val64 = readq(&bar0->gpio_control);
2254 }
2255}
2256
2257/**
20346722 2258 * start_nic - Turns the device on
1da177e4 2259 * @nic : device private variable.
20346722
K
2260 * Description:
2261 * This function actually turns the device on. Before this function is
2262 * called,all Registers are configured from their reset states
2263 * and shared memory is allocated but the NIC is still quiescent. On
1da177e4
LT
2264 * calling this function, the device interrupts are cleared and the NIC is
2265 * literally switched on by writing into the adapter control register.
20346722 2266 * Return Value:
1da177e4
LT
2267 * SUCCESS on success and -1 on failure.
2268 */
2269
2270static int start_nic(struct s2io_nic *nic)
2271{
1ee6dd77 2272 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
2273 struct net_device *dev = nic->dev;
2274 register u64 val64 = 0;
20346722 2275 u16 subid, i;
ffb5df6c
JP
2276 struct config_param *config = &nic->config;
2277 struct mac_info *mac_control = &nic->mac_control;
1da177e4
LT
2278
2279 /* PRC Initialization and configuration */
2280 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
2281 struct ring_info *ring = &mac_control->rings[i];
2282
d44570e4 2283 writeq((u64)ring->rx_blocks[0].block_dma_addr,
1da177e4
LT
2284 &bar0->prc_rxd0_n[i]);
2285
2286 val64 = readq(&bar0->prc_ctrl_n[i]);
da6971d8
AR
2287 if (nic->rxd_mode == RXD_MODE_1)
2288 val64 |= PRC_CTRL_RC_ENABLED;
2289 else
2290 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
863c11a9
AR
2291 if (nic->device_type == XFRAME_II_DEVICE)
2292 val64 |= PRC_CTRL_GROUP_READS;
2293 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2294 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1da177e4
LT
2295 writeq(val64, &bar0->prc_ctrl_n[i]);
2296 }
2297
da6971d8
AR
2298 if (nic->rxd_mode == RXD_MODE_3B) {
2299 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2300 val64 = readq(&bar0->rx_pa_cfg);
2301 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2302 writeq(val64, &bar0->rx_pa_cfg);
2303 }
1da177e4 2304
926930b2
SS
2305 if (vlan_tag_strip == 0) {
2306 val64 = readq(&bar0->rx_pa_cfg);
2307 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2308 writeq(val64, &bar0->rx_pa_cfg);
cd0fce03 2309 nic->vlan_strip_flag = 0;
926930b2
SS
2310 }
2311
20346722 2312 /*
1da177e4
LT
2313 * Enabling MC-RLDRAM. After enabling the device, we timeout
2314 * for around 100ms, which is approximately the time required
2315 * for the device to be ready for operation.
2316 */
2317 val64 = readq(&bar0->mc_rldram_mrs);
2318 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2319 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2320 val64 = readq(&bar0->mc_rldram_mrs);
2321
20346722 2322 msleep(100); /* Delay by around 100 ms. */
1da177e4
LT
2323
2324 /* Enabling ECC Protection. */
2325 val64 = readq(&bar0->adapter_control);
2326 val64 &= ~ADAPTER_ECC_EN;
2327 writeq(val64, &bar0->adapter_control);
2328
20346722
K
2329 /*
2330 * Verify if the device is ready to be enabled, if so enable
1da177e4
LT
2331 * it.
2332 */
2333 val64 = readq(&bar0->adapter_status);
19a60522 2334 if (!verify_xena_quiescence(nic)) {
9e39f7c5
JP
2335 DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2336 "Adapter status reads: 0x%llx\n",
2337 dev->name, (unsigned long long)val64);
1da177e4
LT
2338 return FAILURE;
2339 }
2340
20346722 2341 /*
1da177e4 2342 * With some switches, link might be already up at this point.
20346722
K
2343 * Because of this weird behavior, when we enable laser,
2344 * we may not get link. We need to handle this. We cannot
2345 * figure out which switch is misbehaving. So we are forced to
2346 * make a global change.
1da177e4
LT
2347 */
2348
2349 /* Enabling Laser. */
2350 val64 = readq(&bar0->adapter_control);
2351 val64 |= ADAPTER_EOI_TX_ON;
2352 writeq(val64, &bar0->adapter_control);
2353
c92ca04b
AR
2354 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2355 /*
2356 * Dont see link state interrupts initally on some switches,
2357 * so directly scheduling the link state task here.
2358 */
2359 schedule_work(&nic->set_link_task);
2360 }
1da177e4
LT
2361 /* SXE-002: Initialize link and activity LED */
2362 subid = nic->pdev->subsystem_device;
541ae68f
K
2363 if (((subid & 0xFF) >= 0x07) &&
2364 (nic->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
2365 val64 = readq(&bar0->gpio_control);
2366 val64 |= 0x0000800000000000ULL;
2367 writeq(val64, &bar0->gpio_control);
2368 val64 = 0x0411040400000000ULL;
509a2671 2369 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
2370 }
2371
1da177e4
LT
2372 return SUCCESS;
2373}
fed5eccd
AR
2374/**
2375 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2376 */
d44570e4
JP
2377static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2378 struct TxD *txdlp, int get_off)
fed5eccd 2379{
1ee6dd77 2380 struct s2io_nic *nic = fifo_data->nic;
fed5eccd 2381 struct sk_buff *skb;
1ee6dd77 2382 struct TxD *txds;
fed5eccd
AR
2383 u16 j, frg_cnt;
2384
2385 txds = txdlp;
2fda096d 2386 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
d44570e4
JP
2387 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2388 sizeof(u64), PCI_DMA_TODEVICE);
fed5eccd
AR
2389 txds++;
2390 }
2391
d44570e4 2392 skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
fed5eccd 2393 if (!skb) {
1ee6dd77 2394 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
fed5eccd
AR
2395 return NULL;
2396 }
d44570e4 2397 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
e743d313 2398 skb_headlen(skb), PCI_DMA_TODEVICE);
fed5eccd
AR
2399 frg_cnt = skb_shinfo(skb)->nr_frags;
2400 if (frg_cnt) {
2401 txds++;
2402 for (j = 0; j < frg_cnt; j++, txds++) {
2403 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2404 if (!txds->Buffer_Pointer)
2405 break;
d44570e4
JP
2406 pci_unmap_page(nic->pdev,
2407 (dma_addr_t)txds->Buffer_Pointer,
fed5eccd
AR
2408 frag->size, PCI_DMA_TODEVICE);
2409 }
2410 }
d44570e4
JP
2411 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2412 return skb;
fed5eccd 2413}
1da177e4 2414
20346722
K
2415/**
2416 * free_tx_buffers - Free all queued Tx buffers
1da177e4 2417 * @nic : device private variable.
20346722 2418 * Description:
1da177e4 2419 * Free all queued Tx buffers.
20346722 2420 * Return Value: void
d44570e4 2421 */
1da177e4
LT
2422
2423static void free_tx_buffers(struct s2io_nic *nic)
2424{
2425 struct net_device *dev = nic->dev;
2426 struct sk_buff *skb;
1ee6dd77 2427 struct TxD *txdp;
1da177e4 2428 int i, j;
fed5eccd 2429 int cnt = 0;
ffb5df6c
JP
2430 struct config_param *config = &nic->config;
2431 struct mac_info *mac_control = &nic->mac_control;
2432 struct stat_block *stats = mac_control->stats_info;
2433 struct swStat *swstats = &stats->sw_stat;
1da177e4
LT
2434
2435 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
2436 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2437 struct fifo_info *fifo = &mac_control->fifos[i];
2fda096d 2438 unsigned long flags;
13d866a9
JP
2439
2440 spin_lock_irqsave(&fifo->tx_lock, flags);
2441 for (j = 0; j < tx_cfg->fifo_len; j++) {
2442 txdp = (struct TxD *)fifo->list_info[j].list_virt_addr;
fed5eccd
AR
2443 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2444 if (skb) {
ffb5df6c 2445 swstats->mem_freed += skb->truesize;
fed5eccd
AR
2446 dev_kfree_skb(skb);
2447 cnt++;
1da177e4 2448 }
1da177e4
LT
2449 }
2450 DBG_PRINT(INTR_DBG,
9e39f7c5 2451 "%s: forcibly freeing %d skbs on FIFO%d\n",
1da177e4 2452 dev->name, cnt, i);
13d866a9
JP
2453 fifo->tx_curr_get_info.offset = 0;
2454 fifo->tx_curr_put_info.offset = 0;
2455 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1da177e4
LT
2456 }
2457}
2458
20346722
K
2459/**
2460 * stop_nic - To stop the nic
1da177e4 2461 * @nic ; device private variable.
20346722
K
2462 * Description:
2463 * This function does exactly the opposite of what the start_nic()
1da177e4
LT
2464 * function does. This function is called to stop the device.
2465 * Return Value:
2466 * void.
2467 */
2468
2469static void stop_nic(struct s2io_nic *nic)
2470{
1ee6dd77 2471 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4 2472 register u64 val64 = 0;
5d3213cc 2473 u16 interruptible;
1da177e4
LT
2474
2475 /* Disable all interrupts */
9caab458 2476 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
e960fc5c 2477 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
9caab458 2478 interruptible |= TX_PIC_INTR;
1da177e4
LT
2479 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2480
5d3213cc
AR
2481 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2482 val64 = readq(&bar0->adapter_control);
2483 val64 &= ~(ADAPTER_CNTL_EN);
2484 writeq(val64, &bar0->adapter_control);
1da177e4
LT
2485}
2486
20346722
K
2487/**
2488 * fill_rx_buffers - Allocates the Rx side skbs
0425b46a 2489 * @ring_info: per ring structure
3f78d885
SH
2490 * @from_card_up: If this is true, we will map the buffer to get
2491 * the dma address for buf0 and buf1 to give it to the card.
2492 * Else we will sync the already mapped buffer to give it to the card.
20346722 2493 * Description:
1da177e4
LT
2494 * The function allocates Rx side skbs and puts the physical
2495 * address of these buffers into the RxD buffer pointers, so that the NIC
2496 * can DMA the received frame into these locations.
2497 * The NIC supports 3 receive modes, viz
2498 * 1. single buffer,
2499 * 2. three buffer and
2500 * 3. Five buffer modes.
20346722
K
2501 * Each mode defines how many fragments the received frame will be split
2502 * up into by the NIC. The frame is split into L3 header, L4 Header,
1da177e4
LT
2503 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2504 * is split into 3 fragments. As of now only single buffer mode is
2505 * supported.
2506 * Return Value:
2507 * SUCCESS on success or an appropriate -ve value on failure.
2508 */
8d8bb39b 2509static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
d44570e4 2510 int from_card_up)
1da177e4 2511{
1da177e4 2512 struct sk_buff *skb;
1ee6dd77 2513 struct RxD_t *rxdp;
0425b46a 2514 int off, size, block_no, block_no1;
1da177e4 2515 u32 alloc_tab = 0;
20346722 2516 u32 alloc_cnt;
20346722 2517 u64 tmp;
1ee6dd77 2518 struct buffAdd *ba;
1ee6dd77 2519 struct RxD_t *first_rxdp = NULL;
363dc367 2520 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
0425b46a 2521 int rxd_index = 0;
6d517a27
VP
2522 struct RxD1 *rxdp1;
2523 struct RxD3 *rxdp3;
ffb5df6c 2524 struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
1da177e4 2525
0425b46a 2526 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
1da177e4 2527
0425b46a 2528 block_no1 = ring->rx_curr_get_info.block_index;
1da177e4 2529 while (alloc_tab < alloc_cnt) {
0425b46a 2530 block_no = ring->rx_curr_put_info.block_index;
1da177e4 2531
0425b46a
SH
2532 off = ring->rx_curr_put_info.offset;
2533
2534 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2535
2536 rxd_index = off + 1;
2537 if (block_no)
2538 rxd_index += (block_no * ring->rxd_count);
da6971d8 2539
7d2e3cb7 2540 if ((block_no == block_no1) &&
d44570e4
JP
2541 (off == ring->rx_curr_get_info.offset) &&
2542 (rxdp->Host_Control)) {
9e39f7c5
JP
2543 DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2544 ring->dev->name);
1da177e4
LT
2545 goto end;
2546 }
0425b46a
SH
2547 if (off && (off == ring->rxd_count)) {
2548 ring->rx_curr_put_info.block_index++;
2549 if (ring->rx_curr_put_info.block_index ==
d44570e4 2550 ring->block_count)
0425b46a
SH
2551 ring->rx_curr_put_info.block_index = 0;
2552 block_no = ring->rx_curr_put_info.block_index;
2553 off = 0;
2554 ring->rx_curr_put_info.offset = off;
2555 rxdp = ring->rx_blocks[block_no].block_virt_addr;
1da177e4 2556 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
0425b46a
SH
2557 ring->dev->name, rxdp);
2558
1da177e4 2559 }
c9fcbf47 2560
da6971d8 2561 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
d44570e4
JP
2562 ((ring->rxd_mode == RXD_MODE_3B) &&
2563 (rxdp->Control_2 & s2BIT(0)))) {
0425b46a 2564 ring->rx_curr_put_info.offset = off;
1da177e4
LT
2565 goto end;
2566 }
da6971d8 2567 /* calculate size of skb based on ring mode */
d44570e4
JP
2568 size = ring->mtu +
2569 HEADER_ETHERNET_II_802_3_SIZE +
2570 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
0425b46a 2571 if (ring->rxd_mode == RXD_MODE_1)
da6971d8 2572 size += NET_IP_ALIGN;
da6971d8 2573 else
0425b46a 2574 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
1da177e4 2575
da6971d8
AR
2576 /* allocate skb */
2577 skb = dev_alloc_skb(size);
d44570e4 2578 if (!skb) {
9e39f7c5
JP
2579 DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2580 ring->dev->name);
303bcb4b
K
2581 if (first_rxdp) {
2582 wmb();
2583 first_rxdp->Control_1 |= RXD_OWN_XENA;
2584 }
ffb5df6c 2585 swstats->mem_alloc_fail_cnt++;
7d2e3cb7 2586
da6971d8
AR
2587 return -ENOMEM ;
2588 }
ffb5df6c 2589 swstats->mem_allocated += skb->truesize;
0425b46a
SH
2590
2591 if (ring->rxd_mode == RXD_MODE_1) {
da6971d8 2592 /* 1 buffer mode - normal operation mode */
d44570e4 2593 rxdp1 = (struct RxD1 *)rxdp;
1ee6dd77 2594 memset(rxdp, 0, sizeof(struct RxD1));
da6971d8 2595 skb_reserve(skb, NET_IP_ALIGN);
d44570e4
JP
2596 rxdp1->Buffer0_ptr =
2597 pci_map_single(ring->pdev, skb->data,
2598 size - NET_IP_ALIGN,
2599 PCI_DMA_FROMDEVICE);
8d8bb39b 2600 if (pci_dma_mapping_error(nic->pdev,
d44570e4 2601 rxdp1->Buffer0_ptr))
491abf25
VP
2602 goto pci_map_failed;
2603
8a4bdbaa 2604 rxdp->Control_2 =
491976b2 2605 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
d44570e4 2606 rxdp->Host_Control = (unsigned long)skb;
0425b46a 2607 } else if (ring->rxd_mode == RXD_MODE_3B) {
da6971d8 2608 /*
6d517a27
VP
2609 * 2 buffer mode -
2610 * 2 buffer mode provides 128
da6971d8 2611 * byte aligned receive buffers.
da6971d8
AR
2612 */
2613
d44570e4 2614 rxdp3 = (struct RxD3 *)rxdp;
491976b2 2615 /* save buffer pointers to avoid frequent dma mapping */
6d517a27
VP
2616 Buffer0_ptr = rxdp3->Buffer0_ptr;
2617 Buffer1_ptr = rxdp3->Buffer1_ptr;
1ee6dd77 2618 memset(rxdp, 0, sizeof(struct RxD3));
363dc367 2619 /* restore the buffer pointers for dma sync*/
6d517a27
VP
2620 rxdp3->Buffer0_ptr = Buffer0_ptr;
2621 rxdp3->Buffer1_ptr = Buffer1_ptr;
363dc367 2622
0425b46a 2623 ba = &ring->ba[block_no][off];
da6971d8 2624 skb_reserve(skb, BUF0_LEN);
d44570e4 2625 tmp = (u64)(unsigned long)skb->data;
da6971d8
AR
2626 tmp += ALIGN_SIZE;
2627 tmp &= ~ALIGN_SIZE;
2628 skb->data = (void *) (unsigned long)tmp;
27a884dc 2629 skb_reset_tail_pointer(skb);
da6971d8 2630
3f78d885 2631 if (from_card_up) {
6d517a27 2632 rxdp3->Buffer0_ptr =
d44570e4
JP
2633 pci_map_single(ring->pdev, ba->ba_0,
2634 BUF0_LEN,
2635 PCI_DMA_FROMDEVICE);
2636 if (pci_dma_mapping_error(nic->pdev,
2637 rxdp3->Buffer0_ptr))
3f78d885
SH
2638 goto pci_map_failed;
2639 } else
0425b46a 2640 pci_dma_sync_single_for_device(ring->pdev,
d44570e4
JP
2641 (dma_addr_t)rxdp3->Buffer0_ptr,
2642 BUF0_LEN,
2643 PCI_DMA_FROMDEVICE);
491abf25 2644
da6971d8 2645 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
0425b46a 2646 if (ring->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
2647 /* Two buffer mode */
2648
2649 /*
6aa20a22 2650 * Buffer2 will have L3/L4 header plus
da6971d8
AR
2651 * L4 payload
2652 */
d44570e4
JP
2653 rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
2654 skb->data,
2655 ring->mtu + 4,
2656 PCI_DMA_FROMDEVICE);
da6971d8 2657
8d8bb39b 2658 if (pci_dma_mapping_error(nic->pdev,
d44570e4 2659 rxdp3->Buffer2_ptr))
491abf25
VP
2660 goto pci_map_failed;
2661
3f78d885 2662 if (from_card_up) {
0425b46a
SH
2663 rxdp3->Buffer1_ptr =
2664 pci_map_single(ring->pdev,
d44570e4
JP
2665 ba->ba_1,
2666 BUF1_LEN,
2667 PCI_DMA_FROMDEVICE);
0425b46a 2668
8d8bb39b 2669 if (pci_dma_mapping_error(nic->pdev,
d44570e4
JP
2670 rxdp3->Buffer1_ptr)) {
2671 pci_unmap_single(ring->pdev,
2672 (dma_addr_t)(unsigned long)
2673 skb->data,
2674 ring->mtu + 4,
2675 PCI_DMA_FROMDEVICE);
3f78d885
SH
2676 goto pci_map_failed;
2677 }
75c30b13 2678 }
da6971d8
AR
2679 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2680 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
d44570e4 2681 (ring->mtu + 4);
da6971d8 2682 }
b7b5a128 2683 rxdp->Control_2 |= s2BIT(0);
0425b46a 2684 rxdp->Host_Control = (unsigned long) (skb);
1da177e4 2685 }
303bcb4b
K
2686 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2687 rxdp->Control_1 |= RXD_OWN_XENA;
1da177e4 2688 off++;
0425b46a 2689 if (off == (ring->rxd_count + 1))
da6971d8 2690 off = 0;
0425b46a 2691 ring->rx_curr_put_info.offset = off;
20346722 2692
da6971d8 2693 rxdp->Control_2 |= SET_RXD_MARKER;
303bcb4b
K
2694 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2695 if (first_rxdp) {
2696 wmb();
2697 first_rxdp->Control_1 |= RXD_OWN_XENA;
2698 }
2699 first_rxdp = rxdp;
2700 }
0425b46a 2701 ring->rx_bufs_left += 1;
1da177e4
LT
2702 alloc_tab++;
2703 }
2704
d44570e4 2705end:
303bcb4b
K
2706 /* Transfer ownership of first descriptor to adapter just before
2707 * exiting. Before that, use memory barrier so that ownership
2708 * and other fields are seen by adapter correctly.
2709 */
2710 if (first_rxdp) {
2711 wmb();
2712 first_rxdp->Control_1 |= RXD_OWN_XENA;
2713 }
2714
1da177e4 2715 return SUCCESS;
d44570e4 2716
491abf25 2717pci_map_failed:
ffb5df6c
JP
2718 swstats->pci_map_fail_cnt++;
2719 swstats->mem_freed += skb->truesize;
491abf25
VP
2720 dev_kfree_skb_irq(skb);
2721 return -ENOMEM;
1da177e4
LT
2722}
2723
da6971d8
AR
2724static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2725{
2726 struct net_device *dev = sp->dev;
2727 int j;
2728 struct sk_buff *skb;
1ee6dd77 2729 struct RxD_t *rxdp;
1ee6dd77 2730 struct buffAdd *ba;
6d517a27
VP
2731 struct RxD1 *rxdp1;
2732 struct RxD3 *rxdp3;
ffb5df6c
JP
2733 struct mac_info *mac_control = &sp->mac_control;
2734 struct stat_block *stats = mac_control->stats_info;
2735 struct swStat *swstats = &stats->sw_stat;
da6971d8 2736
da6971d8
AR
2737 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2738 rxdp = mac_control->rings[ring_no].
d44570e4
JP
2739 rx_blocks[blk].rxds[j].virt_addr;
2740 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2741 if (!skb)
da6971d8 2742 continue;
da6971d8 2743 if (sp->rxd_mode == RXD_MODE_1) {
d44570e4
JP
2744 rxdp1 = (struct RxD1 *)rxdp;
2745 pci_unmap_single(sp->pdev,
2746 (dma_addr_t)rxdp1->Buffer0_ptr,
2747 dev->mtu +
2748 HEADER_ETHERNET_II_802_3_SIZE +
2749 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2750 PCI_DMA_FROMDEVICE);
1ee6dd77 2751 memset(rxdp, 0, sizeof(struct RxD1));
d44570e4
JP
2752 } else if (sp->rxd_mode == RXD_MODE_3B) {
2753 rxdp3 = (struct RxD3 *)rxdp;
2754 ba = &mac_control->rings[ring_no].ba[blk][j];
2755 pci_unmap_single(sp->pdev,
2756 (dma_addr_t)rxdp3->Buffer0_ptr,
2757 BUF0_LEN,
2758 PCI_DMA_FROMDEVICE);
2759 pci_unmap_single(sp->pdev,
2760 (dma_addr_t)rxdp3->Buffer1_ptr,
2761 BUF1_LEN,
2762 PCI_DMA_FROMDEVICE);
2763 pci_unmap_single(sp->pdev,
2764 (dma_addr_t)rxdp3->Buffer2_ptr,
2765 dev->mtu + 4,
2766 PCI_DMA_FROMDEVICE);
1ee6dd77 2767 memset(rxdp, 0, sizeof(struct RxD3));
da6971d8 2768 }
ffb5df6c 2769 swstats->mem_freed += skb->truesize;
da6971d8 2770 dev_kfree_skb(skb);
0425b46a 2771 mac_control->rings[ring_no].rx_bufs_left -= 1;
da6971d8
AR
2772 }
2773}
2774
1da177e4 2775/**
20346722 2776 * free_rx_buffers - Frees all Rx buffers
1da177e4 2777 * @sp: device private variable.
20346722 2778 * Description:
1da177e4
LT
2779 * This function will free all Rx buffers allocated by host.
2780 * Return Value:
2781 * NONE.
2782 */
2783
2784static void free_rx_buffers(struct s2io_nic *sp)
2785{
2786 struct net_device *dev = sp->dev;
da6971d8 2787 int i, blk = 0, buf_cnt = 0;
ffb5df6c
JP
2788 struct config_param *config = &sp->config;
2789 struct mac_info *mac_control = &sp->mac_control;
1da177e4
LT
2790
2791 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
2792 struct ring_info *ring = &mac_control->rings[i];
2793
da6971d8 2794 for (blk = 0; blk < rx_ring_sz[i]; blk++)
d44570e4 2795 free_rxd_blk(sp, i, blk);
1da177e4 2796
13d866a9
JP
2797 ring->rx_curr_put_info.block_index = 0;
2798 ring->rx_curr_get_info.block_index = 0;
2799 ring->rx_curr_put_info.offset = 0;
2800 ring->rx_curr_get_info.offset = 0;
2801 ring->rx_bufs_left = 0;
9e39f7c5 2802 DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
1da177e4
LT
2803 dev->name, buf_cnt, i);
2804 }
2805}
2806
8d8bb39b 2807static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
f61e0a35 2808{
8d8bb39b 2809 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
9e39f7c5
JP
2810 DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2811 ring->dev->name);
f61e0a35
SH
2812 }
2813 return 0;
2814}
2815
1da177e4
LT
2816/**
2817 * s2io_poll - Rx interrupt handler for NAPI support
bea3348e 2818 * @napi : pointer to the napi structure.
20346722 2819 * @budget : The number of packets that were budgeted to be processed
1da177e4
LT
2820 * during one pass through the 'Poll" function.
2821 * Description:
2822 * Comes into picture only if NAPI support has been incorporated. It does
2823 * the same thing that rx_intr_handler does, but not in a interrupt context
2824 * also It will process only a given number of packets.
2825 * Return value:
2826 * 0 on success and 1 if there are No Rx packets to be processed.
2827 */
2828
f61e0a35 2829static int s2io_poll_msix(struct napi_struct *napi, int budget)
1da177e4 2830{
f61e0a35
SH
2831 struct ring_info *ring = container_of(napi, struct ring_info, napi);
2832 struct net_device *dev = ring->dev;
f61e0a35 2833 int pkts_processed = 0;
1a79d1c3
AV
2834 u8 __iomem *addr = NULL;
2835 u8 val8 = 0;
4cf1653a 2836 struct s2io_nic *nic = netdev_priv(dev);
1ee6dd77 2837 struct XENA_dev_config __iomem *bar0 = nic->bar0;
f61e0a35 2838 int budget_org = budget;
1da177e4 2839
f61e0a35
SH
2840 if (unlikely(!is_s2io_card_up(nic)))
2841 return 0;
1da177e4 2842
f61e0a35 2843 pkts_processed = rx_intr_handler(ring, budget);
8d8bb39b 2844 s2io_chk_rx_buffers(nic, ring);
1da177e4 2845
f61e0a35 2846 if (pkts_processed < budget_org) {
288379f0 2847 napi_complete(napi);
f61e0a35 2848 /*Re Enable MSI-Rx Vector*/
1a79d1c3 2849 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
f61e0a35
SH
2850 addr += 7 - ring->ring_no;
2851 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2852 writeb(val8, addr);
2853 val8 = readb(addr);
1da177e4 2854 }
f61e0a35
SH
2855 return pkts_processed;
2856}
d44570e4 2857
f61e0a35
SH
2858static int s2io_poll_inta(struct napi_struct *napi, int budget)
2859{
2860 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
f61e0a35
SH
2861 int pkts_processed = 0;
2862 int ring_pkts_processed, i;
2863 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2864 int budget_org = budget;
ffb5df6c
JP
2865 struct config_param *config = &nic->config;
2866 struct mac_info *mac_control = &nic->mac_control;
1da177e4 2867
f61e0a35
SH
2868 if (unlikely(!is_s2io_card_up(nic)))
2869 return 0;
1da177e4 2870
1da177e4 2871 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9 2872 struct ring_info *ring = &mac_control->rings[i];
f61e0a35 2873 ring_pkts_processed = rx_intr_handler(ring, budget);
8d8bb39b 2874 s2io_chk_rx_buffers(nic, ring);
f61e0a35
SH
2875 pkts_processed += ring_pkts_processed;
2876 budget -= ring_pkts_processed;
2877 if (budget <= 0)
1da177e4 2878 break;
1da177e4 2879 }
f61e0a35 2880 if (pkts_processed < budget_org) {
288379f0 2881 napi_complete(napi);
f61e0a35
SH
2882 /* Re enable the Rx interrupts for the ring */
2883 writeq(0, &bar0->rx_traffic_mask);
2884 readl(&bar0->rx_traffic_mask);
2885 }
2886 return pkts_processed;
1da177e4 2887}
20346722 2888
b41477f3 2889#ifdef CONFIG_NET_POLL_CONTROLLER
612eff0e 2890/**
b41477f3 2891 * s2io_netpoll - netpoll event handler entry point
612eff0e
BH
2892 * @dev : pointer to the device structure.
2893 * Description:
b41477f3
AR
2894 * This function will be called by upper layer to check for events on the
2895 * interface in situations where interrupts are disabled. It is used for
2896 * specific in-kernel networking tasks, such as remote consoles and kernel
2897 * debugging over the network (example netdump in RedHat).
612eff0e 2898 */
612eff0e
BH
2899static void s2io_netpoll(struct net_device *dev)
2900{
4cf1653a 2901 struct s2io_nic *nic = netdev_priv(dev);
1ee6dd77 2902 struct XENA_dev_config __iomem *bar0 = nic->bar0;
b41477f3 2903 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
612eff0e 2904 int i;
ffb5df6c
JP
2905 struct config_param *config = &nic->config;
2906 struct mac_info *mac_control = &nic->mac_control;
612eff0e 2907
d796fdb7
LV
2908 if (pci_channel_offline(nic->pdev))
2909 return;
2910
612eff0e
BH
2911 disable_irq(dev->irq);
2912
612eff0e 2913 writeq(val64, &bar0->rx_traffic_int);
b41477f3
AR
2914 writeq(val64, &bar0->tx_traffic_int);
2915
6aa20a22 2916 /* we need to free up the transmitted skbufs or else netpoll will
b41477f3
AR
2917 * run out of skbs and will fail and eventually netpoll application such
2918 * as netdump will fail.
2919 */
2920 for (i = 0; i < config->tx_fifo_num; i++)
2921 tx_intr_handler(&mac_control->fifos[i]);
612eff0e 2922
b41477f3 2923 /* check for received packet and indicate up to network */
13d866a9
JP
2924 for (i = 0; i < config->rx_ring_num; i++) {
2925 struct ring_info *ring = &mac_control->rings[i];
2926
2927 rx_intr_handler(ring, 0);
2928 }
612eff0e
BH
2929
2930 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
2931 struct ring_info *ring = &mac_control->rings[i];
2932
2933 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
9e39f7c5
JP
2934 DBG_PRINT(INFO_DBG,
2935 "%s: Out of memory in Rx Netpoll!!\n",
2936 dev->name);
612eff0e
BH
2937 break;
2938 }
2939 }
612eff0e 2940 enable_irq(dev->irq);
612eff0e
BH
2941}
2942#endif
2943
20346722 2944/**
1da177e4 2945 * rx_intr_handler - Rx interrupt handler
f61e0a35
SH
2946 * @ring_info: per ring structure.
2947 * @budget: budget for napi processing.
20346722
K
2948 * Description:
2949 * If the interrupt is because of a received frame or if the
1da177e4 2950 * receive ring contains fresh as yet un-processed frames,this function is
20346722
K
2951 * called. It picks out the RxD at which place the last Rx processing had
2952 * stopped and sends the skb to the OSM's Rx handler and then increments
1da177e4
LT
2953 * the offset.
2954 * Return Value:
f61e0a35 2955 * No. of napi packets processed.
1da177e4 2956 */
f61e0a35 2957static int rx_intr_handler(struct ring_info *ring_data, int budget)
1da177e4 2958{
c9fcbf47 2959 int get_block, put_block;
1ee6dd77
RB
2960 struct rx_curr_get_info get_info, put_info;
2961 struct RxD_t *rxdp;
1da177e4 2962 struct sk_buff *skb;
f61e0a35 2963 int pkt_cnt = 0, napi_pkts = 0;
7d3d0439 2964 int i;
d44570e4
JP
2965 struct RxD1 *rxdp1;
2966 struct RxD3 *rxdp3;
7d3d0439 2967
20346722
K
2968 get_info = ring_data->rx_curr_get_info;
2969 get_block = get_info.block_index;
1ee6dd77 2970 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
20346722 2971 put_block = put_info.block_index;
da6971d8 2972 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
db874e65 2973
da6971d8 2974 while (RXD_IS_UP2DT(rxdp)) {
db874e65
SS
2975 /*
2976 * If your are next to put index then it's
2977 * FIFO full condition
2978 */
da6971d8
AR
2979 if ((get_block == put_block) &&
2980 (get_info.offset + 1) == put_info.offset) {
0425b46a 2981 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
d44570e4 2982 ring_data->dev->name);
da6971d8
AR
2983 break;
2984 }
d44570e4 2985 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
20346722 2986 if (skb == NULL) {
9e39f7c5 2987 DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
0425b46a 2988 ring_data->dev->name);
f61e0a35 2989 return 0;
1da177e4 2990 }
0425b46a 2991 if (ring_data->rxd_mode == RXD_MODE_1) {
d44570e4 2992 rxdp1 = (struct RxD1 *)rxdp;
0425b46a 2993 pci_unmap_single(ring_data->pdev, (dma_addr_t)
d44570e4
JP
2994 rxdp1->Buffer0_ptr,
2995 ring_data->mtu +
2996 HEADER_ETHERNET_II_802_3_SIZE +
2997 HEADER_802_2_SIZE +
2998 HEADER_SNAP_SIZE,
2999 PCI_DMA_FROMDEVICE);
0425b46a 3000 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
d44570e4
JP
3001 rxdp3 = (struct RxD3 *)rxdp;
3002 pci_dma_sync_single_for_cpu(ring_data->pdev,
3003 (dma_addr_t)rxdp3->Buffer0_ptr,
3004 BUF0_LEN,
3005 PCI_DMA_FROMDEVICE);
3006 pci_unmap_single(ring_data->pdev,
3007 (dma_addr_t)rxdp3->Buffer2_ptr,
3008 ring_data->mtu + 4,
3009 PCI_DMA_FROMDEVICE);
da6971d8 3010 }
863c11a9 3011 prefetch(skb->data);
20346722
K
3012 rx_osm_handler(ring_data, rxdp);
3013 get_info.offset++;
da6971d8
AR
3014 ring_data->rx_curr_get_info.offset = get_info.offset;
3015 rxdp = ring_data->rx_blocks[get_block].
d44570e4 3016 rxds[get_info.offset].virt_addr;
0425b46a 3017 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
20346722 3018 get_info.offset = 0;
da6971d8 3019 ring_data->rx_curr_get_info.offset = get_info.offset;
20346722 3020 get_block++;
da6971d8
AR
3021 if (get_block == ring_data->block_count)
3022 get_block = 0;
3023 ring_data->rx_curr_get_info.block_index = get_block;
20346722
K
3024 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3025 }
1da177e4 3026
f61e0a35
SH
3027 if (ring_data->nic->config.napi) {
3028 budget--;
3029 napi_pkts++;
3030 if (!budget)
0425b46a
SH
3031 break;
3032 }
20346722 3033 pkt_cnt++;
1da177e4
LT
3034 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3035 break;
3036 }
0425b46a 3037 if (ring_data->lro) {
7d3d0439 3038 /* Clear all LRO sessions before exiting */
d44570e4 3039 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
0425b46a 3040 struct lro *lro = &ring_data->lro0_n[i];
7d3d0439 3041 if (lro->in_use) {
0425b46a 3042 update_L3L4_header(ring_data->nic, lro);
cdb5bf02 3043 queue_rx_frame(lro->parent, lro->vlan_tag);
7d3d0439
RA
3044 clear_lro_session(lro);
3045 }
3046 }
3047 }
d44570e4 3048 return napi_pkts;
1da177e4 3049}
20346722
K
3050
3051/**
1da177e4
LT
3052 * tx_intr_handler - Transmit interrupt handler
3053 * @nic : device private variable
20346722
K
3054 * Description:
3055 * If an interrupt was raised to indicate DMA complete of the
3056 * Tx packet, this function is called. It identifies the last TxD
3057 * whose buffer was freed and frees all skbs whose data have already
1da177e4
LT
3058 * DMA'ed into the NICs internal memory.
3059 * Return Value:
3060 * NONE
3061 */
3062
1ee6dd77 3063static void tx_intr_handler(struct fifo_info *fifo_data)
1da177e4 3064{
1ee6dd77 3065 struct s2io_nic *nic = fifo_data->nic;
1ee6dd77 3066 struct tx_curr_get_info get_info, put_info;
3a3d5756 3067 struct sk_buff *skb = NULL;
1ee6dd77 3068 struct TxD *txdlp;
3a3d5756 3069 int pkt_cnt = 0;
2fda096d 3070 unsigned long flags = 0;
f9046eb3 3071 u8 err_mask;
ffb5df6c
JP
3072 struct stat_block *stats = nic->mac_control.stats_info;
3073 struct swStat *swstats = &stats->sw_stat;
1da177e4 3074
2fda096d 3075 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
d44570e4 3076 return;
2fda096d 3077
20346722 3078 get_info = fifo_data->tx_curr_get_info;
1ee6dd77 3079 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
d44570e4
JP
3080 txdlp = (struct TxD *)
3081 fifo_data->list_info[get_info.offset].list_virt_addr;
20346722
K
3082 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3083 (get_info.offset != put_info.offset) &&
3084 (txdlp->Host_Control)) {
3085 /* Check for TxD errors */
3086 if (txdlp->Control_1 & TXD_T_CODE) {
3087 unsigned long long err;
3088 err = txdlp->Control_1 & TXD_T_CODE;
bd1034f0 3089 if (err & 0x1) {
ffb5df6c 3090 swstats->parity_err_cnt++;
bd1034f0 3091 }
491976b2
SH
3092
3093 /* update t_code statistics */
f9046eb3 3094 err_mask = err >> 48;
d44570e4
JP
3095 switch (err_mask) {
3096 case 2:
ffb5df6c 3097 swstats->tx_buf_abort_cnt++;
491976b2
SH
3098 break;
3099
d44570e4 3100 case 3:
ffb5df6c 3101 swstats->tx_desc_abort_cnt++;
491976b2
SH
3102 break;
3103
d44570e4 3104 case 7:
ffb5df6c 3105 swstats->tx_parity_err_cnt++;
491976b2
SH
3106 break;
3107
d44570e4 3108 case 10:
ffb5df6c 3109 swstats->tx_link_loss_cnt++;
491976b2
SH
3110 break;
3111
d44570e4 3112 case 15:
ffb5df6c 3113 swstats->tx_list_proc_err_cnt++;
491976b2 3114 break;
d44570e4 3115 }
20346722 3116 }
1da177e4 3117
fed5eccd 3118 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
20346722 3119 if (skb == NULL) {
2fda096d 3120 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
9e39f7c5
JP
3121 DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3122 __func__);
20346722
K
3123 return;
3124 }
3a3d5756 3125 pkt_cnt++;
20346722 3126
20346722 3127 /* Updating the statistics block */
ffb5df6c 3128 swstats->mem_freed += skb->truesize;
20346722
K
3129 dev_kfree_skb_irq(skb);
3130
3131 get_info.offset++;
863c11a9
AR
3132 if (get_info.offset == get_info.fifo_len + 1)
3133 get_info.offset = 0;
d44570e4
JP
3134 txdlp = (struct TxD *)
3135 fifo_data->list_info[get_info.offset].list_virt_addr;
3136 fifo_data->tx_curr_get_info.offset = get_info.offset;
1da177e4
LT
3137 }
3138
3a3d5756 3139 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
2fda096d
SR
3140
3141 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
1da177e4
LT
3142}
3143
bd1034f0
AR
3144/**
3145 * s2io_mdio_write - Function to write in to MDIO registers
3146 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3147 * @addr : address value
3148 * @value : data value
3149 * @dev : pointer to net_device structure
3150 * Description:
3151 * This function is used to write values to the MDIO registers
3152 * NONE
3153 */
d44570e4
JP
3154static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3155 struct net_device *dev)
bd1034f0 3156{
d44570e4 3157 u64 val64;
4cf1653a 3158 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 3159 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0 3160
d44570e4
JP
3161 /* address transaction */
3162 val64 = MDIO_MMD_INDX_ADDR(addr) |
3163 MDIO_MMD_DEV_ADDR(mmd_type) |
3164 MDIO_MMS_PRT_ADDR(0x0);
bd1034f0
AR
3165 writeq(val64, &bar0->mdio_control);
3166 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3167 writeq(val64, &bar0->mdio_control);
3168 udelay(100);
3169
d44570e4
JP
3170 /* Data transaction */
3171 val64 = MDIO_MMD_INDX_ADDR(addr) |
3172 MDIO_MMD_DEV_ADDR(mmd_type) |
3173 MDIO_MMS_PRT_ADDR(0x0) |
3174 MDIO_MDIO_DATA(value) |
3175 MDIO_OP(MDIO_OP_WRITE_TRANS);
bd1034f0
AR
3176 writeq(val64, &bar0->mdio_control);
3177 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3178 writeq(val64, &bar0->mdio_control);
3179 udelay(100);
3180
d44570e4
JP
3181 val64 = MDIO_MMD_INDX_ADDR(addr) |
3182 MDIO_MMD_DEV_ADDR(mmd_type) |
3183 MDIO_MMS_PRT_ADDR(0x0) |
3184 MDIO_OP(MDIO_OP_READ_TRANS);
bd1034f0
AR
3185 writeq(val64, &bar0->mdio_control);
3186 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3187 writeq(val64, &bar0->mdio_control);
3188 udelay(100);
bd1034f0
AR
3189}
3190
3191/**
3192 * s2io_mdio_read - Function to write in to MDIO registers
3193 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3194 * @addr : address value
3195 * @dev : pointer to net_device structure
3196 * Description:
3197 * This function is used to read values to the MDIO registers
3198 * NONE
3199 */
3200static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3201{
3202 u64 val64 = 0x0;
3203 u64 rval64 = 0x0;
4cf1653a 3204 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 3205 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0
AR
3206
3207 /* address transaction */
d44570e4
JP
3208 val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3209 | MDIO_MMD_DEV_ADDR(mmd_type)
3210 | MDIO_MMS_PRT_ADDR(0x0));
bd1034f0
AR
3211 writeq(val64, &bar0->mdio_control);
3212 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3213 writeq(val64, &bar0->mdio_control);
3214 udelay(100);
3215
3216 /* Data transaction */
d44570e4
JP
3217 val64 = MDIO_MMD_INDX_ADDR(addr) |
3218 MDIO_MMD_DEV_ADDR(mmd_type) |
3219 MDIO_MMS_PRT_ADDR(0x0) |
3220 MDIO_OP(MDIO_OP_READ_TRANS);
bd1034f0
AR
3221 writeq(val64, &bar0->mdio_control);
3222 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3223 writeq(val64, &bar0->mdio_control);
3224 udelay(100);
3225
3226 /* Read the value from regs */
3227 rval64 = readq(&bar0->mdio_control);
3228 rval64 = rval64 & 0xFFFF0000;
3229 rval64 = rval64 >> 16;
3230 return rval64;
3231}
d44570e4 3232
bd1034f0
AR
3233/**
3234 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
fbfecd37 3235 * @counter : counter value to be updated
bd1034f0
AR
3236 * @flag : flag to indicate the status
3237 * @type : counter type
3238 * Description:
3239 * This function is to check the status of the xpak counters value
3240 * NONE
3241 */
3242
d44570e4
JP
3243static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3244 u16 flag, u16 type)
bd1034f0
AR
3245{
3246 u64 mask = 0x3;
3247 u64 val64;
3248 int i;
d44570e4 3249 for (i = 0; i < index; i++)
bd1034f0
AR
3250 mask = mask << 0x2;
3251
d44570e4 3252 if (flag > 0) {
bd1034f0
AR
3253 *counter = *counter + 1;
3254 val64 = *regs_stat & mask;
3255 val64 = val64 >> (index * 0x2);
3256 val64 = val64 + 1;
d44570e4
JP
3257 if (val64 == 3) {
3258 switch (type) {
bd1034f0 3259 case 1:
9e39f7c5
JP
3260 DBG_PRINT(ERR_DBG,
3261 "Take Xframe NIC out of service.\n");
3262 DBG_PRINT(ERR_DBG,
3263"Excessive temperatures may result in premature transceiver failure.\n");
d44570e4 3264 break;
bd1034f0 3265 case 2:
9e39f7c5
JP
3266 DBG_PRINT(ERR_DBG,
3267 "Take Xframe NIC out of service.\n");
3268 DBG_PRINT(ERR_DBG,
3269"Excessive bias currents may indicate imminent laser diode failure.\n");
d44570e4 3270 break;
bd1034f0 3271 case 3:
9e39f7c5
JP
3272 DBG_PRINT(ERR_DBG,
3273 "Take Xframe NIC out of service.\n");
3274 DBG_PRINT(ERR_DBG,
3275"Excessive laser output power may saturate far-end receiver.\n");
d44570e4 3276 break;
bd1034f0 3277 default:
d44570e4
JP
3278 DBG_PRINT(ERR_DBG,
3279 "Incorrect XPAK Alarm type\n");
bd1034f0
AR
3280 }
3281 val64 = 0x0;
3282 }
3283 val64 = val64 << (index * 0x2);
3284 *regs_stat = (*regs_stat & (~mask)) | (val64);
3285
3286 } else {
3287 *regs_stat = *regs_stat & (~mask);
3288 }
3289}
3290
3291/**
3292 * s2io_updt_xpak_counter - Function to update the xpak counters
3293 * @dev : pointer to net_device struct
3294 * Description:
3295 * This function is to upate the status of the xpak counters value
3296 * NONE
3297 */
3298static void s2io_updt_xpak_counter(struct net_device *dev)
3299{
3300 u16 flag = 0x0;
3301 u16 type = 0x0;
3302 u16 val16 = 0x0;
3303 u64 val64 = 0x0;
3304 u64 addr = 0x0;
3305
4cf1653a 3306 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c
JP
3307 struct stat_block *stats = sp->mac_control.stats_info;
3308 struct xpakStat *xstats = &stats->xpak_stat;
bd1034f0
AR
3309
3310 /* Check the communication with the MDIO slave */
40239396 3311 addr = MDIO_CTRL1;
bd1034f0 3312 val64 = 0x0;
40239396 3313 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
d44570e4 3314 if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
9e39f7c5
JP
3315 DBG_PRINT(ERR_DBG,
3316 "ERR: MDIO slave access failed - Returned %llx\n",
3317 (unsigned long long)val64);
bd1034f0
AR
3318 return;
3319 }
3320
40239396 3321 /* Check for the expected value of control reg 1 */
d44570e4 3322 if (val64 != MDIO_CTRL1_SPEED10G) {
9e39f7c5
JP
3323 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3324 "Returned: %llx- Expected: 0x%x\n",
40239396 3325 (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
bd1034f0
AR
3326 return;
3327 }
3328
3329 /* Loading the DOM register to MDIO register */
3330 addr = 0xA100;
40239396
BH
3331 s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3332 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
bd1034f0
AR
3333
3334 /* Reading the Alarm flags */
3335 addr = 0xA070;
3336 val64 = 0x0;
40239396 3337 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
bd1034f0
AR
3338
3339 flag = CHECKBIT(val64, 0x7);
3340 type = 1;
ffb5df6c
JP
3341 s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3342 &xstats->xpak_regs_stat,
d44570e4 3343 0x0, flag, type);
bd1034f0 3344
d44570e4 3345 if (CHECKBIT(val64, 0x6))
ffb5df6c 3346 xstats->alarm_transceiver_temp_low++;
bd1034f0
AR
3347
3348 flag = CHECKBIT(val64, 0x3);
3349 type = 2;
ffb5df6c
JP
3350 s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3351 &xstats->xpak_regs_stat,
d44570e4 3352 0x2, flag, type);
bd1034f0 3353
d44570e4 3354 if (CHECKBIT(val64, 0x2))
ffb5df6c 3355 xstats->alarm_laser_bias_current_low++;
bd1034f0
AR
3356
3357 flag = CHECKBIT(val64, 0x1);
3358 type = 3;
ffb5df6c
JP
3359 s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3360 &xstats->xpak_regs_stat,
d44570e4 3361 0x4, flag, type);
bd1034f0 3362
d44570e4 3363 if (CHECKBIT(val64, 0x0))
ffb5df6c 3364 xstats->alarm_laser_output_power_low++;
bd1034f0
AR
3365
3366 /* Reading the Warning flags */
3367 addr = 0xA074;
3368 val64 = 0x0;
40239396 3369 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
bd1034f0 3370
d44570e4 3371 if (CHECKBIT(val64, 0x7))
ffb5df6c 3372 xstats->warn_transceiver_temp_high++;
bd1034f0 3373
d44570e4 3374 if (CHECKBIT(val64, 0x6))
ffb5df6c 3375 xstats->warn_transceiver_temp_low++;
bd1034f0 3376
d44570e4 3377 if (CHECKBIT(val64, 0x3))
ffb5df6c 3378 xstats->warn_laser_bias_current_high++;
bd1034f0 3379
d44570e4 3380 if (CHECKBIT(val64, 0x2))
ffb5df6c 3381 xstats->warn_laser_bias_current_low++;
bd1034f0 3382
d44570e4 3383 if (CHECKBIT(val64, 0x1))
ffb5df6c 3384 xstats->warn_laser_output_power_high++;
bd1034f0 3385
d44570e4 3386 if (CHECKBIT(val64, 0x0))
ffb5df6c 3387 xstats->warn_laser_output_power_low++;
bd1034f0
AR
3388}
3389
20346722 3390/**
1da177e4 3391 * wait_for_cmd_complete - waits for a command to complete.
20346722 3392 * @sp : private member of the device structure, which is a pointer to the
1da177e4 3393 * s2io_nic structure.
20346722
K
3394 * Description: Function that waits for a command to Write into RMAC
3395 * ADDR DATA registers to be completed and returns either success or
3396 * error depending on whether the command was complete or not.
1da177e4
LT
3397 * Return value:
3398 * SUCCESS on success and FAILURE on failure.
3399 */
3400
9fc93a41 3401static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
d44570e4 3402 int bit_state)
1da177e4 3403{
9fc93a41 3404 int ret = FAILURE, cnt = 0, delay = 1;
1da177e4
LT
3405 u64 val64;
3406
9fc93a41
SS
3407 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3408 return FAILURE;
3409
3410 do {
c92ca04b 3411 val64 = readq(addr);
9fc93a41
SS
3412 if (bit_state == S2IO_BIT_RESET) {
3413 if (!(val64 & busy_bit)) {
3414 ret = SUCCESS;
3415 break;
3416 }
3417 } else {
2d146eb1 3418 if (val64 & busy_bit) {
9fc93a41
SS
3419 ret = SUCCESS;
3420 break;
3421 }
1da177e4 3422 }
c92ca04b 3423
d44570e4 3424 if (in_interrupt())
9fc93a41 3425 mdelay(delay);
c92ca04b 3426 else
9fc93a41 3427 msleep(delay);
c92ca04b 3428
9fc93a41
SS
3429 if (++cnt >= 10)
3430 delay = 50;
3431 } while (cnt < 20);
1da177e4
LT
3432 return ret;
3433}
19a60522
SS
3434/*
3435 * check_pci_device_id - Checks if the device id is supported
3436 * @id : device id
3437 * Description: Function to check if the pci device id is supported by driver.
3438 * Return value: Actual device id if supported else PCI_ANY_ID
3439 */
3440static u16 check_pci_device_id(u16 id)
3441{
3442 switch (id) {
3443 case PCI_DEVICE_ID_HERC_WIN:
3444 case PCI_DEVICE_ID_HERC_UNI:
3445 return XFRAME_II_DEVICE;
3446 case PCI_DEVICE_ID_S2IO_UNI:
3447 case PCI_DEVICE_ID_S2IO_WIN:
3448 return XFRAME_I_DEVICE;
3449 default:
3450 return PCI_ANY_ID;
3451 }
3452}
1da177e4 3453
20346722
K
3454/**
3455 * s2io_reset - Resets the card.
1da177e4
LT
3456 * @sp : private member of the device structure.
3457 * Description: Function to Reset the card. This function then also
20346722 3458 * restores the previously saved PCI configuration space registers as
1da177e4
LT
3459 * the card reset also resets the configuration space.
3460 * Return value:
3461 * void.
3462 */
3463
d44570e4 3464static void s2io_reset(struct s2io_nic *sp)
1da177e4 3465{
1ee6dd77 3466 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 3467 u64 val64;
5e25b9dd 3468 u16 subid, pci_cmd;
19a60522
SS
3469 int i;
3470 u16 val16;
491976b2
SH
3471 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3472 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
ffb5df6c
JP
3473 struct stat_block *stats;
3474 struct swStat *swstats;
491976b2 3475
9e39f7c5 3476 DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3a22813a 3477 __func__, pci_name(sp->pdev));
1da177e4 3478
0b1f7ebe 3479 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
e960fc5c 3480 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
0b1f7ebe 3481
1da177e4
LT
3482 val64 = SW_RESET_ALL;
3483 writeq(val64, &bar0->sw_reset);
d44570e4 3484 if (strstr(sp->product_name, "CX4"))
c92ca04b 3485 msleep(750);
19a60522
SS
3486 msleep(250);
3487 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
1da177e4 3488
19a60522
SS
3489 /* Restore the PCI state saved during initialization. */
3490 pci_restore_state(sp->pdev);
b8a623bf 3491 pci_save_state(sp->pdev);
19a60522
SS
3492 pci_read_config_word(sp->pdev, 0x2, &val16);
3493 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3494 break;
3495 msleep(200);
3496 }
1da177e4 3497
d44570e4
JP
3498 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3499 DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
19a60522
SS
3500
3501 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3502
3503 s2io_init_pci(sp);
1da177e4 3504
20346722
K
3505 /* Set swapper to enable I/O register access */
3506 s2io_set_swapper(sp);
3507
faa4f796
SH
3508 /* restore mac_addr entries */
3509 do_s2io_restore_unicast_mc(sp);
3510
cc6e7c44
RA
3511 /* Restore the MSIX table entries from local variables */
3512 restore_xmsi_data(sp);
3513
5e25b9dd 3514 /* Clear certain PCI/PCI-X fields after reset */
303bcb4b 3515 if (sp->device_type == XFRAME_II_DEVICE) {
b41477f3 3516 /* Clear "detected parity error" bit */
303bcb4b 3517 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
5e25b9dd 3518
303bcb4b
K
3519 /* Clearing PCIX Ecc status register */
3520 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
5e25b9dd 3521
303bcb4b 3522 /* Clearing PCI_STATUS error reflected here */
b7b5a128 3523 writeq(s2BIT(62), &bar0->txpic_int_reg);
303bcb4b 3524 }
5e25b9dd 3525
20346722 3526 /* Reset device statistics maintained by OS */
d44570e4 3527 memset(&sp->stats, 0, sizeof(struct net_device_stats));
8a4bdbaa 3528
ffb5df6c
JP
3529 stats = sp->mac_control.stats_info;
3530 swstats = &stats->sw_stat;
3531
491976b2 3532 /* save link up/down time/cnt, reset/memory/watchdog cnt */
ffb5df6c
JP
3533 up_cnt = swstats->link_up_cnt;
3534 down_cnt = swstats->link_down_cnt;
3535 up_time = swstats->link_up_time;
3536 down_time = swstats->link_down_time;
3537 reset_cnt = swstats->soft_reset_cnt;
3538 mem_alloc_cnt = swstats->mem_allocated;
3539 mem_free_cnt = swstats->mem_freed;
3540 watchdog_cnt = swstats->watchdog_timer_cnt;
3541
3542 memset(stats, 0, sizeof(struct stat_block));
3543
491976b2 3544 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
ffb5df6c
JP
3545 swstats->link_up_cnt = up_cnt;
3546 swstats->link_down_cnt = down_cnt;
3547 swstats->link_up_time = up_time;
3548 swstats->link_down_time = down_time;
3549 swstats->soft_reset_cnt = reset_cnt;
3550 swstats->mem_allocated = mem_alloc_cnt;
3551 swstats->mem_freed = mem_free_cnt;
3552 swstats->watchdog_timer_cnt = watchdog_cnt;
20346722 3553
1da177e4
LT
3554 /* SXE-002: Configure link and activity LED to turn it off */
3555 subid = sp->pdev->subsystem_device;
541ae68f
K
3556 if (((subid & 0xFF) >= 0x07) &&
3557 (sp->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
3558 val64 = readq(&bar0->gpio_control);
3559 val64 |= 0x0000800000000000ULL;
3560 writeq(val64, &bar0->gpio_control);
3561 val64 = 0x0411040400000000ULL;
509a2671 3562 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
3563 }
3564
541ae68f
K
3565 /*
3566 * Clear spurious ECC interrupts that would have occured on
3567 * XFRAME II cards after reset.
3568 */
3569 if (sp->device_type == XFRAME_II_DEVICE) {
3570 val64 = readq(&bar0->pcc_err_reg);
3571 writeq(val64, &bar0->pcc_err_reg);
3572 }
3573
f957bcf0 3574 sp->device_enabled_once = false;
1da177e4
LT
3575}
3576
3577/**
20346722
K
3578 * s2io_set_swapper - to set the swapper controle on the card
3579 * @sp : private member of the device structure,
1da177e4 3580 * pointer to the s2io_nic structure.
20346722 3581 * Description: Function to set the swapper control on the card
1da177e4
LT
3582 * correctly depending on the 'endianness' of the system.
3583 * Return value:
3584 * SUCCESS on success and FAILURE on failure.
3585 */
3586
d44570e4 3587static int s2io_set_swapper(struct s2io_nic *sp)
1da177e4
LT
3588{
3589 struct net_device *dev = sp->dev;
1ee6dd77 3590 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
3591 u64 val64, valt, valr;
3592
20346722 3593 /*
1da177e4
LT
3594 * Set proper endian settings and verify the same by reading
3595 * the PIF Feed-back register.
3596 */
3597
3598 val64 = readq(&bar0->pif_rd_swapper_fb);
3599 if (val64 != 0x0123456789ABCDEFULL) {
3600 int i = 0;
3601 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3602 0x8100008181000081ULL, /* FE=1, SE=0 */
3603 0x4200004242000042ULL, /* FE=0, SE=1 */
3604 0}; /* FE=0, SE=0 */
3605
d44570e4 3606 while (i < 4) {
1da177e4
LT
3607 writeq(value[i], &bar0->swapper_ctrl);
3608 val64 = readq(&bar0->pif_rd_swapper_fb);
3609 if (val64 == 0x0123456789ABCDEFULL)
3610 break;
3611 i++;
3612 }
3613 if (i == 4) {
9e39f7c5
JP
3614 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3615 "feedback read %llx\n",
3616 dev->name, (unsigned long long)val64);
1da177e4
LT
3617 return FAILURE;
3618 }
3619 valr = value[i];
3620 } else {
3621 valr = readq(&bar0->swapper_ctrl);
3622 }
3623
3624 valt = 0x0123456789ABCDEFULL;
3625 writeq(valt, &bar0->xmsi_address);
3626 val64 = readq(&bar0->xmsi_address);
3627
d44570e4 3628 if (val64 != valt) {
1da177e4
LT
3629 int i = 0;
3630 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3631 0x0081810000818100ULL, /* FE=1, SE=0 */
3632 0x0042420000424200ULL, /* FE=0, SE=1 */
3633 0}; /* FE=0, SE=0 */
3634
d44570e4 3635 while (i < 4) {
1da177e4
LT
3636 writeq((value[i] | valr), &bar0->swapper_ctrl);
3637 writeq(valt, &bar0->xmsi_address);
3638 val64 = readq(&bar0->xmsi_address);
d44570e4 3639 if (val64 == valt)
1da177e4
LT
3640 break;
3641 i++;
3642 }
d44570e4 3643 if (i == 4) {
20346722 3644 unsigned long long x = val64;
9e39f7c5
JP
3645 DBG_PRINT(ERR_DBG,
3646 "Write failed, Xmsi_addr reads:0x%llx\n", x);
1da177e4
LT
3647 return FAILURE;
3648 }
3649 }
3650 val64 = readq(&bar0->swapper_ctrl);
3651 val64 &= 0xFFFF000000000000ULL;
3652
d44570e4 3653#ifdef __BIG_ENDIAN
20346722
K
3654 /*
3655 * The device by default set to a big endian format, so a
1da177e4
LT
3656 * big endian driver need not set anything.
3657 */
3658 val64 |= (SWAPPER_CTRL_TXP_FE |
d44570e4
JP
3659 SWAPPER_CTRL_TXP_SE |
3660 SWAPPER_CTRL_TXD_R_FE |
3661 SWAPPER_CTRL_TXD_W_FE |
3662 SWAPPER_CTRL_TXF_R_FE |
3663 SWAPPER_CTRL_RXD_R_FE |
3664 SWAPPER_CTRL_RXD_W_FE |
3665 SWAPPER_CTRL_RXF_W_FE |
3666 SWAPPER_CTRL_XMSI_FE |
3667 SWAPPER_CTRL_STATS_FE |
3668 SWAPPER_CTRL_STATS_SE);
eaae7f72 3669 if (sp->config.intr_type == INTA)
cc6e7c44 3670 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3671 writeq(val64, &bar0->swapper_ctrl);
3672#else
20346722 3673 /*
1da177e4 3674 * Initially we enable all bits to make it accessible by the
20346722 3675 * driver, then we selectively enable only those bits that
1da177e4
LT
3676 * we want to set.
3677 */
3678 val64 |= (SWAPPER_CTRL_TXP_FE |
d44570e4
JP
3679 SWAPPER_CTRL_TXP_SE |
3680 SWAPPER_CTRL_TXD_R_FE |
3681 SWAPPER_CTRL_TXD_R_SE |
3682 SWAPPER_CTRL_TXD_W_FE |
3683 SWAPPER_CTRL_TXD_W_SE |
3684 SWAPPER_CTRL_TXF_R_FE |
3685 SWAPPER_CTRL_RXD_R_FE |
3686 SWAPPER_CTRL_RXD_R_SE |
3687 SWAPPER_CTRL_RXD_W_FE |
3688 SWAPPER_CTRL_RXD_W_SE |
3689 SWAPPER_CTRL_RXF_W_FE |
3690 SWAPPER_CTRL_XMSI_FE |
3691 SWAPPER_CTRL_STATS_FE |
3692 SWAPPER_CTRL_STATS_SE);
eaae7f72 3693 if (sp->config.intr_type == INTA)
cc6e7c44 3694 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3695 writeq(val64, &bar0->swapper_ctrl);
3696#endif
3697 val64 = readq(&bar0->swapper_ctrl);
3698
20346722
K
3699 /*
3700 * Verifying if endian settings are accurate by reading a
1da177e4
LT
3701 * feedback register.
3702 */
3703 val64 = readq(&bar0->pif_rd_swapper_fb);
3704 if (val64 != 0x0123456789ABCDEFULL) {
3705 /* Endian settings are incorrect, calls for another dekko. */
9e39f7c5
JP
3706 DBG_PRINT(ERR_DBG,
3707 "%s: Endian settings are wrong, feedback read %llx\n",
3708 dev->name, (unsigned long long)val64);
1da177e4
LT
3709 return FAILURE;
3710 }
3711
3712 return SUCCESS;
3713}
3714
1ee6dd77 3715static int wait_for_msix_trans(struct s2io_nic *nic, int i)
cc6e7c44 3716{
1ee6dd77 3717 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3718 u64 val64;
3719 int ret = 0, cnt = 0;
3720
3721 do {
3722 val64 = readq(&bar0->xmsi_access);
b7b5a128 3723 if (!(val64 & s2BIT(15)))
cc6e7c44
RA
3724 break;
3725 mdelay(1);
3726 cnt++;
d44570e4 3727 } while (cnt < 5);
cc6e7c44
RA
3728 if (cnt == 5) {
3729 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3730 ret = 1;
3731 }
3732
3733 return ret;
3734}
3735
1ee6dd77 3736static void restore_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3737{
1ee6dd77 3738 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44 3739 u64 val64;
f61e0a35
SH
3740 int i, msix_index;
3741
f61e0a35
SH
3742 if (nic->device_type == XFRAME_I_DEVICE)
3743 return;
cc6e7c44 3744
d44570e4
JP
3745 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3746 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
cc6e7c44
RA
3747 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3748 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
f61e0a35 3749 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
cc6e7c44 3750 writeq(val64, &bar0->xmsi_access);
f61e0a35 3751 if (wait_for_msix_trans(nic, msix_index)) {
9e39f7c5
JP
3752 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3753 __func__, msix_index);
cc6e7c44
RA
3754 continue;
3755 }
3756 }
3757}
3758
1ee6dd77 3759static void store_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3760{
1ee6dd77 3761 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44 3762 u64 val64, addr, data;
f61e0a35
SH
3763 int i, msix_index;
3764
3765 if (nic->device_type == XFRAME_I_DEVICE)
3766 return;
cc6e7c44
RA
3767
3768 /* Store and display */
d44570e4
JP
3769 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3770 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
f61e0a35 3771 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
cc6e7c44 3772 writeq(val64, &bar0->xmsi_access);
f61e0a35 3773 if (wait_for_msix_trans(nic, msix_index)) {
9e39f7c5
JP
3774 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3775 __func__, msix_index);
cc6e7c44
RA
3776 continue;
3777 }
3778 addr = readq(&bar0->xmsi_address);
3779 data = readq(&bar0->xmsi_data);
3780 if (addr && data) {
3781 nic->msix_info[i].addr = addr;
3782 nic->msix_info[i].data = data;
3783 }
3784 }
3785}
3786
1ee6dd77 3787static int s2io_enable_msi_x(struct s2io_nic *nic)
cc6e7c44 3788{
1ee6dd77 3789 struct XENA_dev_config __iomem *bar0 = nic->bar0;
ac731ab6 3790 u64 rx_mat;
cc6e7c44
RA
3791 u16 msi_control; /* Temp variable */
3792 int ret, i, j, msix_indx = 1;
4f870320 3793 int size;
ffb5df6c
JP
3794 struct stat_block *stats = nic->mac_control.stats_info;
3795 struct swStat *swstats = &stats->sw_stat;
cc6e7c44 3796
4f870320 3797 size = nic->num_entries * sizeof(struct msix_entry);
44364a03 3798 nic->entries = kzalloc(size, GFP_KERNEL);
bd684e43 3799 if (!nic->entries) {
d44570e4
JP
3800 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3801 __func__);
ffb5df6c 3802 swstats->mem_alloc_fail_cnt++;
cc6e7c44
RA
3803 return -ENOMEM;
3804 }
ffb5df6c 3805 swstats->mem_allocated += size;
f61e0a35 3806
4f870320 3807 size = nic->num_entries * sizeof(struct s2io_msix_entry);
44364a03 3808 nic->s2io_entries = kzalloc(size, GFP_KERNEL);
bd684e43 3809 if (!nic->s2io_entries) {
8a4bdbaa 3810 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
d44570e4 3811 __func__);
ffb5df6c 3812 swstats->mem_alloc_fail_cnt++;
cc6e7c44 3813 kfree(nic->entries);
ffb5df6c 3814 swstats->mem_freed
f61e0a35 3815 += (nic->num_entries * sizeof(struct msix_entry));
cc6e7c44
RA
3816 return -ENOMEM;
3817 }
ffb5df6c 3818 swstats->mem_allocated += size;
cc6e7c44 3819
ac731ab6
SH
3820 nic->entries[0].entry = 0;
3821 nic->s2io_entries[0].entry = 0;
3822 nic->s2io_entries[0].in_use = MSIX_FLG;
3823 nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3824 nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3825
f61e0a35
SH
3826 for (i = 1; i < nic->num_entries; i++) {
3827 nic->entries[i].entry = ((i - 1) * 8) + 1;
3828 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
cc6e7c44
RA
3829 nic->s2io_entries[i].arg = NULL;
3830 nic->s2io_entries[i].in_use = 0;
3831 }
3832
8a4bdbaa 3833 rx_mat = readq(&bar0->rx_mat);
f61e0a35 3834 for (j = 0; j < nic->config.rx_ring_num; j++) {
8a4bdbaa 3835 rx_mat |= RX_MAT_SET(j, msix_indx);
f61e0a35
SH
3836 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3837 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3838 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3839 msix_indx += 8;
cc6e7c44 3840 }
8a4bdbaa 3841 writeq(rx_mat, &bar0->rx_mat);
f61e0a35 3842 readq(&bar0->rx_mat);
cc6e7c44 3843
f61e0a35 3844 ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
c92ca04b 3845 /* We fail init if error or we get less vectors than min required */
cc6e7c44 3846 if (ret) {
9e39f7c5 3847 DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
cc6e7c44 3848 kfree(nic->entries);
ffb5df6c
JP
3849 swstats->mem_freed += nic->num_entries *
3850 sizeof(struct msix_entry);
cc6e7c44 3851 kfree(nic->s2io_entries);
ffb5df6c
JP
3852 swstats->mem_freed += nic->num_entries *
3853 sizeof(struct s2io_msix_entry);
cc6e7c44
RA
3854 nic->entries = NULL;
3855 nic->s2io_entries = NULL;
3856 return -ENOMEM;
3857 }
3858
3859 /*
3860 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3861 * in the herc NIC. (Temp change, needs to be removed later)
3862 */
3863 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3864 msi_control |= 0x1; /* Enable MSI */
3865 pci_write_config_word(nic->pdev, 0x42, msi_control);
3866
3867 return 0;
3868}
3869
8abc4d5b 3870/* Handle software interrupt used during MSI(X) test */
33390a70 3871static irqreturn_t s2io_test_intr(int irq, void *dev_id)
8abc4d5b
SS
3872{
3873 struct s2io_nic *sp = dev_id;
3874
3875 sp->msi_detected = 1;
3876 wake_up(&sp->msi_wait);
3877
3878 return IRQ_HANDLED;
3879}
3880
3881/* Test interrupt path by forcing a a software IRQ */
33390a70 3882static int s2io_test_msi(struct s2io_nic *sp)
8abc4d5b
SS
3883{
3884 struct pci_dev *pdev = sp->pdev;
3885 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3886 int err;
3887 u64 val64, saved64;
3888
3889 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
d44570e4 3890 sp->name, sp);
8abc4d5b
SS
3891 if (err) {
3892 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
d44570e4 3893 sp->dev->name, pci_name(pdev), pdev->irq);
8abc4d5b
SS
3894 return err;
3895 }
3896
d44570e4 3897 init_waitqueue_head(&sp->msi_wait);
8abc4d5b
SS
3898 sp->msi_detected = 0;
3899
3900 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3901 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3902 val64 |= SCHED_INT_CTRL_TIMER_EN;
3903 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3904 writeq(val64, &bar0->scheduled_int_ctrl);
3905
3906 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3907
3908 if (!sp->msi_detected) {
3909 /* MSI(X) test failed, go back to INTx mode */
2450022a 3910 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
9e39f7c5
JP
3911 "using MSI(X) during test\n",
3912 sp->dev->name, pci_name(pdev));
8abc4d5b
SS
3913
3914 err = -EOPNOTSUPP;
3915 }
3916
3917 free_irq(sp->entries[1].vector, sp);
3918
3919 writeq(saved64, &bar0->scheduled_int_ctrl);
3920
3921 return err;
3922}
18b2b7bd
SH
3923
3924static void remove_msix_isr(struct s2io_nic *sp)
3925{
3926 int i;
3927 u16 msi_control;
3928
f61e0a35 3929 for (i = 0; i < sp->num_entries; i++) {
d44570e4 3930 if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
18b2b7bd
SH
3931 int vector = sp->entries[i].vector;
3932 void *arg = sp->s2io_entries[i].arg;
3933 free_irq(vector, arg);
3934 }
3935 }
3936
3937 kfree(sp->entries);
3938 kfree(sp->s2io_entries);
3939 sp->entries = NULL;
3940 sp->s2io_entries = NULL;
3941
3942 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3943 msi_control &= 0xFFFE; /* Disable MSI */
3944 pci_write_config_word(sp->pdev, 0x42, msi_control);
3945
3946 pci_disable_msix(sp->pdev);
3947}
3948
3949static void remove_inta_isr(struct s2io_nic *sp)
3950{
3951 struct net_device *dev = sp->dev;
3952
3953 free_irq(sp->pdev->irq, dev);
3954}
3955
1da177e4
LT
3956/* ********************************************************* *
3957 * Functions defined below concern the OS part of the driver *
3958 * ********************************************************* */
3959
20346722 3960/**
1da177e4
LT
3961 * s2io_open - open entry point of the driver
3962 * @dev : pointer to the device structure.
3963 * Description:
3964 * This function is the open entry point of the driver. It mainly calls a
3965 * function to allocate Rx buffers and inserts them into the buffer
20346722 3966 * descriptors and then enables the Rx part of the NIC.
1da177e4
LT
3967 * Return value:
3968 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3969 * file on failure.
3970 */
3971
ac1f60db 3972static int s2io_open(struct net_device *dev)
1da177e4 3973{
4cf1653a 3974 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c 3975 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
1da177e4
LT
3976 int err = 0;
3977
20346722
K
3978 /*
3979 * Make sure you have link off by default every time
1da177e4
LT
3980 * Nic is initialized
3981 */
3982 netif_carrier_off(dev);
0b1f7ebe 3983 sp->last_link_state = 0;
1da177e4
LT
3984
3985 /* Initialize H/W and enable interrupts */
c92ca04b
AR
3986 err = s2io_card_up(sp);
3987 if (err) {
1da177e4
LT
3988 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3989 dev->name);
e6a8fee2 3990 goto hw_init_failed;
1da177e4
LT
3991 }
3992
2fd37688 3993 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
1da177e4 3994 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
e6a8fee2 3995 s2io_card_down(sp);
20346722 3996 err = -ENODEV;
e6a8fee2 3997 goto hw_init_failed;
1da177e4 3998 }
3a3d5756 3999 s2io_start_all_tx_queue(sp);
1da177e4 4000 return 0;
20346722 4001
20346722 4002hw_init_failed:
eaae7f72 4003 if (sp->config.intr_type == MSI_X) {
491976b2 4004 if (sp->entries) {
cc6e7c44 4005 kfree(sp->entries);
ffb5df6c
JP
4006 swstats->mem_freed += sp->num_entries *
4007 sizeof(struct msix_entry);
491976b2
SH
4008 }
4009 if (sp->s2io_entries) {
cc6e7c44 4010 kfree(sp->s2io_entries);
ffb5df6c
JP
4011 swstats->mem_freed += sp->num_entries *
4012 sizeof(struct s2io_msix_entry);
491976b2 4013 }
cc6e7c44 4014 }
20346722 4015 return err;
1da177e4
LT
4016}
4017
4018/**
4019 * s2io_close -close entry point of the driver
4020 * @dev : device pointer.
4021 * Description:
4022 * This is the stop entry point of the driver. It needs to undo exactly
4023 * whatever was done by the open entry point,thus it's usually referred to
4024 * as the close function.Among other things this function mainly stops the
4025 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4026 * Return value:
4027 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4028 * file on failure.
4029 */
4030
ac1f60db 4031static int s2io_close(struct net_device *dev)
1da177e4 4032{
4cf1653a 4033 struct s2io_nic *sp = netdev_priv(dev);
faa4f796
SH
4034 struct config_param *config = &sp->config;
4035 u64 tmp64;
4036 int offset;
cc6e7c44 4037
9f74ffde 4038 /* Return if the device is already closed *
d44570e4
JP
4039 * Can happen when s2io_card_up failed in change_mtu *
4040 */
9f74ffde
SH
4041 if (!is_s2io_card_up(sp))
4042 return 0;
4043
3a3d5756 4044 s2io_stop_all_tx_queue(sp);
faa4f796
SH
4045 /* delete all populated mac entries */
4046 for (offset = 1; offset < config->max_mc_addr; offset++) {
4047 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4048 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4049 do_s2io_delete_unicast_mc(sp, tmp64);
4050 }
4051
e6a8fee2 4052 s2io_card_down(sp);
cc6e7c44 4053
1da177e4
LT
4054 return 0;
4055}
4056
4057/**
4058 * s2io_xmit - Tx entry point of te driver
4059 * @skb : the socket buffer containing the Tx data.
4060 * @dev : device pointer.
4061 * Description :
4062 * This function is the Tx entry point of the driver. S2IO NIC supports
4063 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
4064 * NOTE: when device cant queue the pkt,just the trans_start variable will
4065 * not be upadted.
4066 * Return value:
4067 * 0 on success & 1 on failure.
4068 */
4069
61357325 4070static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 4071{
4cf1653a 4072 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
4073 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4074 register u64 val64;
1ee6dd77
RB
4075 struct TxD *txdp;
4076 struct TxFIFO_element __iomem *tx_fifo;
2fda096d 4077 unsigned long flags = 0;
be3a6b02 4078 u16 vlan_tag = 0;
2fda096d 4079 struct fifo_info *fifo = NULL;
6cfc482b 4080 int do_spin_lock = 1;
75c30b13 4081 int offload_type;
6cfc482b 4082 int enable_per_list_interrupt = 0;
ffb5df6c
JP
4083 struct config_param *config = &sp->config;
4084 struct mac_info *mac_control = &sp->mac_control;
4085 struct stat_block *stats = mac_control->stats_info;
4086 struct swStat *swstats = &stats->sw_stat;
1da177e4 4087
20346722 4088 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
491976b2
SH
4089
4090 if (unlikely(skb->len <= 0)) {
9e39f7c5 4091 DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
491976b2 4092 dev_kfree_skb_any(skb);
6ed10654 4093 return NETDEV_TX_OK;
2fda096d 4094 }
491976b2 4095
92b84437 4096 if (!is_s2io_card_up(sp)) {
20346722 4097 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
1da177e4 4098 dev->name);
20346722 4099 dev_kfree_skb(skb);
6ed10654 4100 return NETDEV_TX_OK;
1da177e4
LT
4101 }
4102
4103 queue = 0;
eab6d18d 4104 if (vlan_tx_tag_present(skb))
be3a6b02 4105 vlan_tag = vlan_tx_tag_get(skb);
6cfc482b
SH
4106 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4107 if (skb->protocol == htons(ETH_P_IP)) {
4108 struct iphdr *ip;
4109 struct tcphdr *th;
4110 ip = ip_hdr(skb);
4111
4112 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4113 th = (struct tcphdr *)(((unsigned char *)ip) +
d44570e4 4114 ip->ihl*4);
6cfc482b
SH
4115
4116 if (ip->protocol == IPPROTO_TCP) {
4117 queue_len = sp->total_tcp_fifos;
4118 queue = (ntohs(th->source) +
d44570e4
JP
4119 ntohs(th->dest)) &
4120 sp->fifo_selector[queue_len - 1];
6cfc482b
SH
4121 if (queue >= queue_len)
4122 queue = queue_len - 1;
4123 } else if (ip->protocol == IPPROTO_UDP) {
4124 queue_len = sp->total_udp_fifos;
4125 queue = (ntohs(th->source) +
d44570e4
JP
4126 ntohs(th->dest)) &
4127 sp->fifo_selector[queue_len - 1];
6cfc482b
SH
4128 if (queue >= queue_len)
4129 queue = queue_len - 1;
4130 queue += sp->udp_fifo_idx;
4131 if (skb->len > 1024)
4132 enable_per_list_interrupt = 1;
4133 do_spin_lock = 0;
4134 }
4135 }
4136 }
4137 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4138 /* get fifo number based on skb->priority value */
4139 queue = config->fifo_mapping
d44570e4 4140 [skb->priority & (MAX_TX_FIFOS - 1)];
6cfc482b 4141 fifo = &mac_control->fifos[queue];
3a3d5756 4142
6cfc482b
SH
4143 if (do_spin_lock)
4144 spin_lock_irqsave(&fifo->tx_lock, flags);
4145 else {
4146 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4147 return NETDEV_TX_LOCKED;
4148 }
be3a6b02 4149
3a3d5756
SH
4150 if (sp->config.multiq) {
4151 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4152 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4153 return NETDEV_TX_BUSY;
4154 }
b19fa1fa 4155 } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
3a3d5756
SH
4156 if (netif_queue_stopped(dev)) {
4157 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4158 return NETDEV_TX_BUSY;
4159 }
4160 }
4161
d44570e4
JP
4162 put_off = (u16)fifo->tx_curr_put_info.offset;
4163 get_off = (u16)fifo->tx_curr_get_info.offset;
4164 txdp = (struct TxD *)fifo->list_info[put_off].list_virt_addr;
20346722 4165
2fda096d 4166 queue_len = fifo->tx_curr_put_info.fifo_len + 1;
1da177e4 4167 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9 4168 if (txdp->Host_Control ||
d44570e4 4169 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
776bd20f 4170 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3a3d5756 4171 s2io_stop_tx_queue(sp, fifo->fifo_no);
1da177e4 4172 dev_kfree_skb(skb);
2fda096d 4173 spin_unlock_irqrestore(&fifo->tx_lock, flags);
6ed10654 4174 return NETDEV_TX_OK;
1da177e4 4175 }
0b1f7ebe 4176
75c30b13 4177 offload_type = s2io_offload_type(skb);
75c30b13 4178 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1da177e4 4179 txdp->Control_1 |= TXD_TCP_LSO_EN;
75c30b13 4180 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
1da177e4 4181 }
84fa7933 4182 if (skb->ip_summed == CHECKSUM_PARTIAL) {
d44570e4
JP
4183 txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4184 TXD_TX_CKO_TCP_EN |
4185 TXD_TX_CKO_UDP_EN);
1da177e4 4186 }
fed5eccd
AR
4187 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4188 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2fda096d 4189 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
6cfc482b
SH
4190 if (enable_per_list_interrupt)
4191 if (put_off & (queue_len >> 5))
4192 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
3a3d5756 4193 if (vlan_tag) {
be3a6b02
K
4194 txdp->Control_2 |= TXD_VLAN_ENABLE;
4195 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4196 }
4197
e743d313 4198 frg_len = skb_headlen(skb);
75c30b13 4199 if (offload_type == SKB_GSO_UDP) {
fed5eccd
AR
4200 int ufo_size;
4201
75c30b13 4202 ufo_size = s2io_udp_mss(skb);
fed5eccd
AR
4203 ufo_size &= ~7;
4204 txdp->Control_1 |= TXD_UFO_EN;
4205 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4206 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4207#ifdef __BIG_ENDIAN
3459feb8 4208 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
2fda096d 4209 fifo->ufo_in_band_v[put_off] =
d44570e4 4210 (__force u64)skb_shinfo(skb)->ip6_frag_id;
fed5eccd 4211#else
2fda096d 4212 fifo->ufo_in_band_v[put_off] =
d44570e4 4213 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
fed5eccd 4214#endif
2fda096d 4215 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
fed5eccd 4216 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
d44570e4
JP
4217 fifo->ufo_in_band_v,
4218 sizeof(u64),
4219 PCI_DMA_TODEVICE);
8d8bb39b 4220 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
491abf25 4221 goto pci_map_failed;
fed5eccd 4222 txdp++;
fed5eccd 4223 }
1da177e4 4224
d44570e4
JP
4225 txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
4226 frg_len, PCI_DMA_TODEVICE);
8d8bb39b 4227 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
491abf25
VP
4228 goto pci_map_failed;
4229
d44570e4 4230 txdp->Host_Control = (unsigned long)skb;
fed5eccd 4231 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
75c30b13 4232 if (offload_type == SKB_GSO_UDP)
fed5eccd
AR
4233 txdp->Control_1 |= TXD_UFO_EN;
4234
4235 frg_cnt = skb_shinfo(skb)->nr_frags;
1da177e4
LT
4236 /* For fragmented SKB. */
4237 for (i = 0; i < frg_cnt; i++) {
4238 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0b1f7ebe
K
4239 /* A '0' length fragment will be ignored */
4240 if (!frag->size)
4241 continue;
1da177e4 4242 txdp++;
d44570e4
JP
4243 txdp->Buffer_Pointer = (u64)pci_map_page(sp->pdev, frag->page,
4244 frag->page_offset,
4245 frag->size,
4246 PCI_DMA_TODEVICE);
efd51b5c 4247 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
75c30b13 4248 if (offload_type == SKB_GSO_UDP)
fed5eccd 4249 txdp->Control_1 |= TXD_UFO_EN;
1da177e4
LT
4250 }
4251 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4252
75c30b13 4253 if (offload_type == SKB_GSO_UDP)
fed5eccd
AR
4254 frg_cnt++; /* as Txd0 was used for inband header */
4255
1da177e4 4256 tx_fifo = mac_control->tx_FIFO_start[queue];
2fda096d 4257 val64 = fifo->list_info[put_off].list_phy_addr;
1da177e4
LT
4258 writeq(val64, &tx_fifo->TxDL_Pointer);
4259
4260 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4261 TX_FIFO_LAST_LIST);
75c30b13 4262 if (offload_type)
fed5eccd 4263 val64 |= TX_FIFO_SPECIAL_FUNC;
75c30b13 4264
1da177e4
LT
4265 writeq(val64, &tx_fifo->List_Control);
4266
303bcb4b
K
4267 mmiowb();
4268
1da177e4 4269 put_off++;
2fda096d 4270 if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
863c11a9 4271 put_off = 0;
2fda096d 4272 fifo->tx_curr_put_info.offset = put_off;
1da177e4
LT
4273
4274 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9 4275 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
ffb5df6c 4276 swstats->fifo_full_cnt++;
1da177e4
LT
4277 DBG_PRINT(TX_DBG,
4278 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4279 put_off, get_off);
3a3d5756 4280 s2io_stop_tx_queue(sp, fifo->fifo_no);
1da177e4 4281 }
ffb5df6c 4282 swstats->mem_allocated += skb->truesize;
2fda096d 4283 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1da177e4 4284
f6f4bfa3
SH
4285 if (sp->config.intr_type == MSI_X)
4286 tx_intr_handler(fifo);
4287
6ed10654 4288 return NETDEV_TX_OK;
ffb5df6c 4289
491abf25 4290pci_map_failed:
ffb5df6c 4291 swstats->pci_map_fail_cnt++;
3a3d5756 4292 s2io_stop_tx_queue(sp, fifo->fifo_no);
ffb5df6c 4293 swstats->mem_freed += skb->truesize;
491abf25 4294 dev_kfree_skb(skb);
2fda096d 4295 spin_unlock_irqrestore(&fifo->tx_lock, flags);
6ed10654 4296 return NETDEV_TX_OK;
1da177e4
LT
4297}
4298
25fff88e
K
4299static void
4300s2io_alarm_handle(unsigned long data)
4301{
1ee6dd77 4302 struct s2io_nic *sp = (struct s2io_nic *)data;
8116f3cf 4303 struct net_device *dev = sp->dev;
25fff88e 4304
8116f3cf 4305 s2io_handle_errors(dev);
25fff88e
K
4306 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4307}
4308
7d12e780 4309static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
cc6e7c44 4310{
1ee6dd77
RB
4311 struct ring_info *ring = (struct ring_info *)dev_id;
4312 struct s2io_nic *sp = ring->nic;
f61e0a35 4313 struct XENA_dev_config __iomem *bar0 = sp->bar0;
cc6e7c44 4314
f61e0a35 4315 if (unlikely(!is_s2io_card_up(sp)))
92b84437 4316 return IRQ_HANDLED;
92b84437 4317
f61e0a35 4318 if (sp->config.napi) {
1a79d1c3
AV
4319 u8 __iomem *addr = NULL;
4320 u8 val8 = 0;
f61e0a35 4321
1a79d1c3 4322 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
f61e0a35
SH
4323 addr += (7 - ring->ring_no);
4324 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4325 writeb(val8, addr);
4326 val8 = readb(addr);
288379f0 4327 napi_schedule(&ring->napi);
f61e0a35
SH
4328 } else {
4329 rx_intr_handler(ring, 0);
8d8bb39b 4330 s2io_chk_rx_buffers(sp, ring);
f61e0a35 4331 }
7d3d0439 4332
cc6e7c44
RA
4333 return IRQ_HANDLED;
4334}
4335
7d12e780 4336static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
cc6e7c44 4337{
ac731ab6
SH
4338 int i;
4339 struct fifo_info *fifos = (struct fifo_info *)dev_id;
4340 struct s2io_nic *sp = fifos->nic;
4341 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4342 struct config_param *config = &sp->config;
4343 u64 reason;
cc6e7c44 4344
ac731ab6
SH
4345 if (unlikely(!is_s2io_card_up(sp)))
4346 return IRQ_NONE;
4347
4348 reason = readq(&bar0->general_int_status);
4349 if (unlikely(reason == S2IO_MINUS_ONE))
4350 /* Nothing much can be done. Get out */
92b84437 4351 return IRQ_HANDLED;
92b84437 4352
01e16faa
SH
4353 if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4354 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
ac731ab6 4355
01e16faa
SH
4356 if (reason & GEN_INTR_TXPIC)
4357 s2io_txpic_intr_handle(sp);
ac731ab6 4358
01e16faa
SH
4359 if (reason & GEN_INTR_TXTRAFFIC)
4360 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
ac731ab6 4361
01e16faa
SH
4362 for (i = 0; i < config->tx_fifo_num; i++)
4363 tx_intr_handler(&fifos[i]);
ac731ab6 4364
01e16faa
SH
4365 writeq(sp->general_int_mask, &bar0->general_int_mask);
4366 readl(&bar0->general_int_status);
4367 return IRQ_HANDLED;
4368 }
4369 /* The interrupt was not raised by us */
4370 return IRQ_NONE;
cc6e7c44 4371}
ac731ab6 4372
1ee6dd77 4373static void s2io_txpic_intr_handle(struct s2io_nic *sp)
a371a07d 4374{
1ee6dd77 4375 struct XENA_dev_config __iomem *bar0 = sp->bar0;
a371a07d
K
4376 u64 val64;
4377
4378 val64 = readq(&bar0->pic_int_status);
4379 if (val64 & PIC_INT_GPIO) {
4380 val64 = readq(&bar0->gpio_int_reg);
4381 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4382 (val64 & GPIO_INT_REG_LINK_UP)) {
c92ca04b
AR
4383 /*
4384 * This is unstable state so clear both up/down
4385 * interrupt and adapter to re-evaluate the link state.
4386 */
d44570e4 4387 val64 |= GPIO_INT_REG_LINK_DOWN;
a371a07d
K
4388 val64 |= GPIO_INT_REG_LINK_UP;
4389 writeq(val64, &bar0->gpio_int_reg);
a371a07d 4390 val64 = readq(&bar0->gpio_int_mask);
c92ca04b
AR
4391 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4392 GPIO_INT_MASK_LINK_DOWN);
a371a07d 4393 writeq(val64, &bar0->gpio_int_mask);
d44570e4 4394 } else if (val64 & GPIO_INT_REG_LINK_UP) {
c92ca04b 4395 val64 = readq(&bar0->adapter_status);
d44570e4 4396 /* Enable Adapter */
19a60522
SS
4397 val64 = readq(&bar0->adapter_control);
4398 val64 |= ADAPTER_CNTL_EN;
4399 writeq(val64, &bar0->adapter_control);
4400 val64 |= ADAPTER_LED_ON;
4401 writeq(val64, &bar0->adapter_control);
4402 if (!sp->device_enabled_once)
4403 sp->device_enabled_once = 1;
c92ca04b 4404
19a60522
SS
4405 s2io_link(sp, LINK_UP);
4406 /*
4407 * unmask link down interrupt and mask link-up
4408 * intr
4409 */
4410 val64 = readq(&bar0->gpio_int_mask);
4411 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4412 val64 |= GPIO_INT_MASK_LINK_UP;
4413 writeq(val64, &bar0->gpio_int_mask);
c92ca04b 4414
d44570e4 4415 } else if (val64 & GPIO_INT_REG_LINK_DOWN) {
c92ca04b 4416 val64 = readq(&bar0->adapter_status);
19a60522
SS
4417 s2io_link(sp, LINK_DOWN);
4418 /* Link is down so unmaks link up interrupt */
4419 val64 = readq(&bar0->gpio_int_mask);
4420 val64 &= ~GPIO_INT_MASK_LINK_UP;
4421 val64 |= GPIO_INT_MASK_LINK_DOWN;
4422 writeq(val64, &bar0->gpio_int_mask);
ac1f90d6
SS
4423
4424 /* turn off LED */
4425 val64 = readq(&bar0->adapter_control);
d44570e4 4426 val64 = val64 & (~ADAPTER_LED_ON);
ac1f90d6 4427 writeq(val64, &bar0->adapter_control);
a371a07d
K
4428 }
4429 }
c92ca04b 4430 val64 = readq(&bar0->gpio_int_mask);
a371a07d
K
4431}
4432
8116f3cf
SS
4433/**
4434 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4435 * @value: alarm bits
4436 * @addr: address value
4437 * @cnt: counter variable
4438 * Description: Check for alarm and increment the counter
4439 * Return Value:
4440 * 1 - if alarm bit set
4441 * 0 - if alarm bit is not set
4442 */
d44570e4
JP
4443static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4444 unsigned long long *cnt)
8116f3cf
SS
4445{
4446 u64 val64;
4447 val64 = readq(addr);
d44570e4 4448 if (val64 & value) {
8116f3cf
SS
4449 writeq(val64, addr);
4450 (*cnt)++;
4451 return 1;
4452 }
4453 return 0;
4454
4455}
4456
4457/**
4458 * s2io_handle_errors - Xframe error indication handler
4459 * @nic: device private variable
4460 * Description: Handle alarms such as loss of link, single or
4461 * double ECC errors, critical and serious errors.
4462 * Return Value:
4463 * NONE
4464 */
d44570e4 4465static void s2io_handle_errors(void *dev_id)
8116f3cf 4466{
d44570e4 4467 struct net_device *dev = (struct net_device *)dev_id;
4cf1653a 4468 struct s2io_nic *sp = netdev_priv(dev);
8116f3cf 4469 struct XENA_dev_config __iomem *bar0 = sp->bar0;
d44570e4 4470 u64 temp64 = 0, val64 = 0;
8116f3cf
SS
4471 int i = 0;
4472
4473 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4474 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4475
92b84437 4476 if (!is_s2io_card_up(sp))
8116f3cf
SS
4477 return;
4478
4479 if (pci_channel_offline(sp->pdev))
4480 return;
4481
4482 memset(&sw_stat->ring_full_cnt, 0,
d44570e4 4483 sizeof(sw_stat->ring_full_cnt));
8116f3cf
SS
4484
4485 /* Handling the XPAK counters update */
d44570e4 4486 if (stats->xpak_timer_count < 72000) {
8116f3cf
SS
4487 /* waiting for an hour */
4488 stats->xpak_timer_count++;
4489 } else {
4490 s2io_updt_xpak_counter(dev);
4491 /* reset the count to zero */
4492 stats->xpak_timer_count = 0;
4493 }
4494
4495 /* Handling link status change error Intr */
4496 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4497 val64 = readq(&bar0->mac_rmac_err_reg);
4498 writeq(val64, &bar0->mac_rmac_err_reg);
4499 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4500 schedule_work(&sp->set_link_task);
4501 }
4502
4503 /* In case of a serious error, the device will be Reset. */
4504 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
d44570e4 4505 &sw_stat->serious_err_cnt))
8116f3cf
SS
4506 goto reset;
4507
4508 /* Check for data parity error */
4509 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
d44570e4 4510 &sw_stat->parity_err_cnt))
8116f3cf
SS
4511 goto reset;
4512
4513 /* Check for ring full counter */
4514 if (sp->device_type == XFRAME_II_DEVICE) {
4515 val64 = readq(&bar0->ring_bump_counter1);
d44570e4
JP
4516 for (i = 0; i < 4; i++) {
4517 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
8116f3cf
SS
4518 temp64 >>= 64 - ((i+1)*16);
4519 sw_stat->ring_full_cnt[i] += temp64;
4520 }
4521
4522 val64 = readq(&bar0->ring_bump_counter2);
d44570e4
JP
4523 for (i = 0; i < 4; i++) {
4524 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
8116f3cf 4525 temp64 >>= 64 - ((i+1)*16);
d44570e4 4526 sw_stat->ring_full_cnt[i+4] += temp64;
8116f3cf
SS
4527 }
4528 }
4529
4530 val64 = readq(&bar0->txdma_int_status);
4531 /*check for pfc_err*/
4532 if (val64 & TXDMA_PFC_INT) {
d44570e4
JP
4533 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4534 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4535 PFC_PCIX_ERR,
4536 &bar0->pfc_err_reg,
4537 &sw_stat->pfc_err_cnt))
8116f3cf 4538 goto reset;
d44570e4
JP
4539 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4540 &bar0->pfc_err_reg,
4541 &sw_stat->pfc_err_cnt);
8116f3cf
SS
4542 }
4543
4544 /*check for tda_err*/
4545 if (val64 & TXDMA_TDA_INT) {
d44570e4
JP
4546 if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4547 TDA_SM0_ERR_ALARM |
4548 TDA_SM1_ERR_ALARM,
4549 &bar0->tda_err_reg,
4550 &sw_stat->tda_err_cnt))
8116f3cf
SS
4551 goto reset;
4552 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
d44570e4
JP
4553 &bar0->tda_err_reg,
4554 &sw_stat->tda_err_cnt);
8116f3cf
SS
4555 }
4556 /*check for pcc_err*/
4557 if (val64 & TXDMA_PCC_INT) {
d44570e4
JP
4558 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4559 PCC_N_SERR | PCC_6_COF_OV_ERR |
4560 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4561 PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4562 PCC_TXB_ECC_DB_ERR,
4563 &bar0->pcc_err_reg,
4564 &sw_stat->pcc_err_cnt))
8116f3cf
SS
4565 goto reset;
4566 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
d44570e4
JP
4567 &bar0->pcc_err_reg,
4568 &sw_stat->pcc_err_cnt);
8116f3cf
SS
4569 }
4570
4571 /*check for tti_err*/
4572 if (val64 & TXDMA_TTI_INT) {
d44570e4
JP
4573 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4574 &bar0->tti_err_reg,
4575 &sw_stat->tti_err_cnt))
8116f3cf
SS
4576 goto reset;
4577 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
d44570e4
JP
4578 &bar0->tti_err_reg,
4579 &sw_stat->tti_err_cnt);
8116f3cf
SS
4580 }
4581
4582 /*check for lso_err*/
4583 if (val64 & TXDMA_LSO_INT) {
d44570e4
JP
4584 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4585 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4586 &bar0->lso_err_reg,
4587 &sw_stat->lso_err_cnt))
8116f3cf
SS
4588 goto reset;
4589 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
d44570e4
JP
4590 &bar0->lso_err_reg,
4591 &sw_stat->lso_err_cnt);
8116f3cf
SS
4592 }
4593
4594 /*check for tpa_err*/
4595 if (val64 & TXDMA_TPA_INT) {
d44570e4
JP
4596 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4597 &bar0->tpa_err_reg,
4598 &sw_stat->tpa_err_cnt))
8116f3cf 4599 goto reset;
d44570e4
JP
4600 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4601 &bar0->tpa_err_reg,
4602 &sw_stat->tpa_err_cnt);
8116f3cf
SS
4603 }
4604
4605 /*check for sm_err*/
4606 if (val64 & TXDMA_SM_INT) {
d44570e4
JP
4607 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4608 &bar0->sm_err_reg,
4609 &sw_stat->sm_err_cnt))
8116f3cf
SS
4610 goto reset;
4611 }
4612
4613 val64 = readq(&bar0->mac_int_status);
4614 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4615 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
d44570e4
JP
4616 &bar0->mac_tmac_err_reg,
4617 &sw_stat->mac_tmac_err_cnt))
8116f3cf 4618 goto reset;
d44570e4
JP
4619 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4620 TMAC_DESC_ECC_SG_ERR |
4621 TMAC_DESC_ECC_DB_ERR,
4622 &bar0->mac_tmac_err_reg,
4623 &sw_stat->mac_tmac_err_cnt);
8116f3cf
SS
4624 }
4625
4626 val64 = readq(&bar0->xgxs_int_status);
4627 if (val64 & XGXS_INT_STATUS_TXGXS) {
4628 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
d44570e4
JP
4629 &bar0->xgxs_txgxs_err_reg,
4630 &sw_stat->xgxs_txgxs_err_cnt))
8116f3cf
SS
4631 goto reset;
4632 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
d44570e4
JP
4633 &bar0->xgxs_txgxs_err_reg,
4634 &sw_stat->xgxs_txgxs_err_cnt);
8116f3cf
SS
4635 }
4636
4637 val64 = readq(&bar0->rxdma_int_status);
4638 if (val64 & RXDMA_INT_RC_INT_M) {
d44570e4
JP
4639 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4640 RC_FTC_ECC_DB_ERR |
4641 RC_PRCn_SM_ERR_ALARM |
4642 RC_FTC_SM_ERR_ALARM,
4643 &bar0->rc_err_reg,
4644 &sw_stat->rc_err_cnt))
8116f3cf 4645 goto reset;
d44570e4
JP
4646 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4647 RC_FTC_ECC_SG_ERR |
4648 RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4649 &sw_stat->rc_err_cnt);
4650 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4651 PRC_PCI_AB_WR_Rn |
4652 PRC_PCI_AB_F_WR_Rn,
4653 &bar0->prc_pcix_err_reg,
4654 &sw_stat->prc_pcix_err_cnt))
8116f3cf 4655 goto reset;
d44570e4
JP
4656 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4657 PRC_PCI_DP_WR_Rn |
4658 PRC_PCI_DP_F_WR_Rn,
4659 &bar0->prc_pcix_err_reg,
4660 &sw_stat->prc_pcix_err_cnt);
8116f3cf
SS
4661 }
4662
4663 if (val64 & RXDMA_INT_RPA_INT_M) {
4664 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
d44570e4
JP
4665 &bar0->rpa_err_reg,
4666 &sw_stat->rpa_err_cnt))
8116f3cf
SS
4667 goto reset;
4668 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
d44570e4
JP
4669 &bar0->rpa_err_reg,
4670 &sw_stat->rpa_err_cnt);
8116f3cf
SS
4671 }
4672
4673 if (val64 & RXDMA_INT_RDA_INT_M) {
d44570e4
JP
4674 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4675 RDA_FRM_ECC_DB_N_AERR |
4676 RDA_SM1_ERR_ALARM |
4677 RDA_SM0_ERR_ALARM |
4678 RDA_RXD_ECC_DB_SERR,
4679 &bar0->rda_err_reg,
4680 &sw_stat->rda_err_cnt))
8116f3cf 4681 goto reset;
d44570e4
JP
4682 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4683 RDA_FRM_ECC_SG_ERR |
4684 RDA_MISC_ERR |
4685 RDA_PCIX_ERR,
4686 &bar0->rda_err_reg,
4687 &sw_stat->rda_err_cnt);
8116f3cf
SS
4688 }
4689
4690 if (val64 & RXDMA_INT_RTI_INT_M) {
d44570e4
JP
4691 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4692 &bar0->rti_err_reg,
4693 &sw_stat->rti_err_cnt))
8116f3cf
SS
4694 goto reset;
4695 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
d44570e4
JP
4696 &bar0->rti_err_reg,
4697 &sw_stat->rti_err_cnt);
8116f3cf
SS
4698 }
4699
4700 val64 = readq(&bar0->mac_int_status);
4701 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4702 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
d44570e4
JP
4703 &bar0->mac_rmac_err_reg,
4704 &sw_stat->mac_rmac_err_cnt))
8116f3cf 4705 goto reset;
d44570e4
JP
4706 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4707 RMAC_SINGLE_ECC_ERR |
4708 RMAC_DOUBLE_ECC_ERR,
4709 &bar0->mac_rmac_err_reg,
4710 &sw_stat->mac_rmac_err_cnt);
8116f3cf
SS
4711 }
4712
4713 val64 = readq(&bar0->xgxs_int_status);
4714 if (val64 & XGXS_INT_STATUS_RXGXS) {
4715 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
d44570e4
JP
4716 &bar0->xgxs_rxgxs_err_reg,
4717 &sw_stat->xgxs_rxgxs_err_cnt))
8116f3cf
SS
4718 goto reset;
4719 }
4720
4721 val64 = readq(&bar0->mc_int_status);
d44570e4
JP
4722 if (val64 & MC_INT_STATUS_MC_INT) {
4723 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4724 &bar0->mc_err_reg,
4725 &sw_stat->mc_err_cnt))
8116f3cf
SS
4726 goto reset;
4727
4728 /* Handling Ecc errors */
4729 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4730 writeq(val64, &bar0->mc_err_reg);
4731 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4732 sw_stat->double_ecc_errs++;
4733 if (sp->device_type != XFRAME_II_DEVICE) {
4734 /*
4735 * Reset XframeI only if critical error
4736 */
4737 if (val64 &
d44570e4
JP
4738 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4739 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4740 goto reset;
4741 }
8116f3cf
SS
4742 } else
4743 sw_stat->single_ecc_errs++;
4744 }
4745 }
4746 return;
4747
4748reset:
3a3d5756 4749 s2io_stop_all_tx_queue(sp);
8116f3cf
SS
4750 schedule_work(&sp->rst_timer_task);
4751 sw_stat->soft_reset_cnt++;
8116f3cf
SS
4752}
4753
1da177e4
LT
4754/**
4755 * s2io_isr - ISR handler of the device .
4756 * @irq: the irq of the device.
4757 * @dev_id: a void pointer to the dev structure of the NIC.
20346722
K
4758 * Description: This function is the ISR handler of the device. It
4759 * identifies the reason for the interrupt and calls the relevant
4760 * service routines. As a contongency measure, this ISR allocates the
1da177e4
LT
4761 * recv buffers, if their numbers are below the panic value which is
4762 * presently set to 25% of the original number of rcv buffers allocated.
4763 * Return value:
20346722 4764 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
1da177e4
LT
4765 * IRQ_NONE: will be returned if interrupt is not from our device
4766 */
7d12e780 4767static irqreturn_t s2io_isr(int irq, void *dev_id)
1da177e4 4768{
d44570e4 4769 struct net_device *dev = (struct net_device *)dev_id;
4cf1653a 4770 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 4771 struct XENA_dev_config __iomem *bar0 = sp->bar0;
20346722 4772 int i;
19a60522 4773 u64 reason = 0;
1ee6dd77 4774 struct mac_info *mac_control;
1da177e4
LT
4775 struct config_param *config;
4776
d796fdb7
LV
4777 /* Pretend we handled any irq's from a disconnected card */
4778 if (pci_channel_offline(sp->pdev))
4779 return IRQ_NONE;
4780
596c5c97 4781 if (!is_s2io_card_up(sp))
92b84437 4782 return IRQ_NONE;
92b84437 4783
1da177e4 4784 config = &sp->config;
ffb5df6c 4785 mac_control = &sp->mac_control;
1da177e4 4786
20346722 4787 /*
1da177e4
LT
4788 * Identify the cause for interrupt and call the appropriate
4789 * interrupt handler. Causes for the interrupt could be;
4790 * 1. Rx of packet.
4791 * 2. Tx complete.
4792 * 3. Link down.
1da177e4
LT
4793 */
4794 reason = readq(&bar0->general_int_status);
4795
d44570e4
JP
4796 if (unlikely(reason == S2IO_MINUS_ONE))
4797 return IRQ_HANDLED; /* Nothing much can be done. Get out */
5d3213cc 4798
d44570e4
JP
4799 if (reason &
4800 (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
596c5c97
SS
4801 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4802
4803 if (config->napi) {
4804 if (reason & GEN_INTR_RXTRAFFIC) {
288379f0 4805 napi_schedule(&sp->napi);
f61e0a35
SH
4806 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4807 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4808 readl(&bar0->rx_traffic_int);
db874e65 4809 }
596c5c97
SS
4810 } else {
4811 /*
4812 * rx_traffic_int reg is an R1 register, writing all 1's
4813 * will ensure that the actual interrupt causing bit
4814 * get's cleared and hence a read can be avoided.
4815 */
4816 if (reason & GEN_INTR_RXTRAFFIC)
19a60522 4817 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
596c5c97 4818
13d866a9
JP
4819 for (i = 0; i < config->rx_ring_num; i++) {
4820 struct ring_info *ring = &mac_control->rings[i];
4821
4822 rx_intr_handler(ring, 0);
4823 }
db874e65 4824 }
596c5c97 4825
db874e65 4826 /*
596c5c97 4827 * tx_traffic_int reg is an R1 register, writing all 1's
db874e65
SS
4828 * will ensure that the actual interrupt causing bit get's
4829 * cleared and hence a read can be avoided.
4830 */
596c5c97
SS
4831 if (reason & GEN_INTR_TXTRAFFIC)
4832 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
19a60522 4833
596c5c97
SS
4834 for (i = 0; i < config->tx_fifo_num; i++)
4835 tx_intr_handler(&mac_control->fifos[i]);
1da177e4 4836
596c5c97
SS
4837 if (reason & GEN_INTR_TXPIC)
4838 s2io_txpic_intr_handle(sp);
fe113638 4839
596c5c97
SS
4840 /*
4841 * Reallocate the buffers from the interrupt handler itself.
4842 */
4843 if (!config->napi) {
13d866a9
JP
4844 for (i = 0; i < config->rx_ring_num; i++) {
4845 struct ring_info *ring = &mac_control->rings[i];
4846
4847 s2io_chk_rx_buffers(sp, ring);
4848 }
596c5c97
SS
4849 }
4850 writeq(sp->general_int_mask, &bar0->general_int_mask);
4851 readl(&bar0->general_int_status);
20346722 4852
596c5c97 4853 return IRQ_HANDLED;
db874e65 4854
d44570e4 4855 } else if (!reason) {
596c5c97
SS
4856 /* The interrupt was not raised by us */
4857 return IRQ_NONE;
4858 }
db874e65 4859
1da177e4
LT
4860 return IRQ_HANDLED;
4861}
4862
7ba013ac
K
4863/**
4864 * s2io_updt_stats -
4865 */
1ee6dd77 4866static void s2io_updt_stats(struct s2io_nic *sp)
7ba013ac 4867{
1ee6dd77 4868 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7ba013ac
K
4869 u64 val64;
4870 int cnt = 0;
4871
92b84437 4872 if (is_s2io_card_up(sp)) {
7ba013ac
K
4873 /* Apprx 30us on a 133 MHz bus */
4874 val64 = SET_UPDT_CLICKS(10) |
4875 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4876 writeq(val64, &bar0->stat_cfg);
4877 do {
4878 udelay(100);
4879 val64 = readq(&bar0->stat_cfg);
b7b5a128 4880 if (!(val64 & s2BIT(0)))
7ba013ac
K
4881 break;
4882 cnt++;
4883 if (cnt == 5)
4884 break; /* Updt failed */
d44570e4 4885 } while (1);
8a4bdbaa 4886 }
7ba013ac
K
4887}
4888
1da177e4 4889/**
20346722 4890 * s2io_get_stats - Updates the device statistics structure.
1da177e4
LT
4891 * @dev : pointer to the device structure.
4892 * Description:
20346722 4893 * This function updates the device statistics structure in the s2io_nic
1da177e4
LT
4894 * structure and returns a pointer to the same.
4895 * Return value:
4896 * pointer to the updated net_device_stats structure.
4897 */
ac1f60db 4898static struct net_device_stats *s2io_get_stats(struct net_device *dev)
1da177e4 4899{
4cf1653a 4900 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c
JP
4901 struct mac_info *mac_control = &sp->mac_control;
4902 struct stat_block *stats = mac_control->stats_info;
4a490432 4903 u64 delta;
1da177e4 4904
7ba013ac
K
4905 /* Configure Stats for immediate updt */
4906 s2io_updt_stats(sp);
4907
4a490432
JM
4908 /* A device reset will cause the on-adapter statistics to be zero'ed.
4909 * This can be done while running by changing the MTU. To prevent the
4910 * system from having the stats zero'ed, the driver keeps a copy of the
4911 * last update to the system (which is also zero'ed on reset). This
4912 * enables the driver to accurately know the delta between the last
4913 * update and the current update.
4914 */
4915 delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4916 le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4917 sp->stats.rx_packets += delta;
4918 dev->stats.rx_packets += delta;
4919
4920 delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4921 le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4922 sp->stats.tx_packets += delta;
4923 dev->stats.tx_packets += delta;
4924
4925 delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4926 le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4927 sp->stats.rx_bytes += delta;
4928 dev->stats.rx_bytes += delta;
4929
4930 delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4931 le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4932 sp->stats.tx_bytes += delta;
4933 dev->stats.tx_bytes += delta;
4934
4935 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4936 sp->stats.rx_errors += delta;
4937 dev->stats.rx_errors += delta;
4938
4939 delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4940 le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4941 sp->stats.tx_errors += delta;
4942 dev->stats.tx_errors += delta;
4943
4944 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4945 sp->stats.rx_dropped += delta;
4946 dev->stats.rx_dropped += delta;
4947
4948 delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4949 sp->stats.tx_dropped += delta;
4950 dev->stats.tx_dropped += delta;
4951
4952 /* The adapter MAC interprets pause frames as multicast packets, but
4953 * does not pass them up. This erroneously increases the multicast
4954 * packet count and needs to be deducted when the multicast frame count
4955 * is queried.
4956 */
4957 delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4958 le32_to_cpu(stats->rmac_vld_mcst_frms);
4959 delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4960 delta -= sp->stats.multicast;
4961 sp->stats.multicast += delta;
4962 dev->stats.multicast += delta;
1da177e4 4963
4a490432
JM
4964 delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4965 le32_to_cpu(stats->rmac_usized_frms)) +
4966 le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4967 sp->stats.rx_length_errors += delta;
4968 dev->stats.rx_length_errors += delta;
13d866a9 4969
4a490432
JM
4970 delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4971 sp->stats.rx_crc_errors += delta;
4972 dev->stats.rx_crc_errors += delta;
0425b46a 4973
d44570e4 4974 return &dev->stats;
1da177e4
LT
4975}
4976
4977/**
4978 * s2io_set_multicast - entry point for multicast address enable/disable.
4979 * @dev : pointer to the device structure
4980 * Description:
20346722
K
4981 * This function is a driver entry point which gets called by the kernel
4982 * whenever multicast addresses must be enabled/disabled. This also gets
1da177e4
LT
4983 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4984 * determine, if multicast address must be enabled or if promiscuous mode
4985 * is to be disabled etc.
4986 * Return value:
4987 * void.
4988 */
4989
4990static void s2io_set_multicast(struct net_device *dev)
4991{
4992 int i, j, prev_cnt;
22bedad3 4993 struct netdev_hw_addr *ha;
4cf1653a 4994 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 4995 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 4996 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
d44570e4 4997 0xfeffffffffffULL;
faa4f796 4998 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
1da177e4 4999 void __iomem *add;
faa4f796 5000 struct config_param *config = &sp->config;
1da177e4
LT
5001
5002 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
5003 /* Enable all Multicast addresses */
5004 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
5005 &bar0->rmac_addr_data0_mem);
5006 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
5007 &bar0->rmac_addr_data1_mem);
5008 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
5009 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5010 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
1da177e4
LT
5011 writeq(val64, &bar0->rmac_addr_cmd_mem);
5012 /* Wait till command completes */
c92ca04b 5013 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5014 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5015 S2IO_BIT_RESET);
1da177e4
LT
5016
5017 sp->m_cast_flg = 1;
faa4f796 5018 sp->all_multi_pos = config->max_mc_addr - 1;
1da177e4
LT
5019 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
5020 /* Disable all Multicast addresses */
5021 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5022 &bar0->rmac_addr_data0_mem);
5e25b9dd
K
5023 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
5024 &bar0->rmac_addr_data1_mem);
1da177e4 5025 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
5026 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5027 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
1da177e4
LT
5028 writeq(val64, &bar0->rmac_addr_cmd_mem);
5029 /* Wait till command completes */
c92ca04b 5030 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5031 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5032 S2IO_BIT_RESET);
1da177e4
LT
5033
5034 sp->m_cast_flg = 0;
5035 sp->all_multi_pos = 0;
5036 }
5037
5038 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
5039 /* Put the NIC into promiscuous mode */
5040 add = &bar0->mac_cfg;
5041 val64 = readq(&bar0->mac_cfg);
5042 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
5043
5044 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
d44570e4 5045 writel((u32)val64, add);
1da177e4
LT
5046 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5047 writel((u32) (val64 >> 32), (add + 4));
5048
926930b2
SS
5049 if (vlan_tag_strip != 1) {
5050 val64 = readq(&bar0->rx_pa_cfg);
5051 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
5052 writeq(val64, &bar0->rx_pa_cfg);
cd0fce03 5053 sp->vlan_strip_flag = 0;
926930b2
SS
5054 }
5055
1da177e4
LT
5056 val64 = readq(&bar0->mac_cfg);
5057 sp->promisc_flg = 1;
776bd20f 5058 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
1da177e4
LT
5059 dev->name);
5060 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5061 /* Remove the NIC from promiscuous mode */
5062 add = &bar0->mac_cfg;
5063 val64 = readq(&bar0->mac_cfg);
5064 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5065
5066 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
d44570e4 5067 writel((u32)val64, add);
1da177e4
LT
5068 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5069 writel((u32) (val64 >> 32), (add + 4));
5070
926930b2
SS
5071 if (vlan_tag_strip != 0) {
5072 val64 = readq(&bar0->rx_pa_cfg);
5073 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5074 writeq(val64, &bar0->rx_pa_cfg);
cd0fce03 5075 sp->vlan_strip_flag = 1;
926930b2
SS
5076 }
5077
1da177e4
LT
5078 val64 = readq(&bar0->mac_cfg);
5079 sp->promisc_flg = 0;
9e39f7c5 5080 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
1da177e4
LT
5081 }
5082
5083 /* Update individual M_CAST address list */
4cd24eaf
JP
5084 if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
5085 if (netdev_mc_count(dev) >
faa4f796 5086 (config->max_mc_addr - config->max_mac_addr)) {
9e39f7c5
JP
5087 DBG_PRINT(ERR_DBG,
5088 "%s: No more Rx filters can be added - "
5089 "please enable ALL_MULTI instead\n",
1da177e4 5090 dev->name);
1da177e4
LT
5091 return;
5092 }
5093
5094 prev_cnt = sp->mc_addr_count;
4cd24eaf 5095 sp->mc_addr_count = netdev_mc_count(dev);
1da177e4
LT
5096
5097 /* Clear out the previous list of Mc in the H/W. */
5098 for (i = 0; i < prev_cnt; i++) {
5099 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5100 &bar0->rmac_addr_data0_mem);
5101 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
d44570e4 5102 &bar0->rmac_addr_data1_mem);
1da177e4 5103 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
5104 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5105 RMAC_ADDR_CMD_MEM_OFFSET
5106 (config->mc_start_offset + i);
1da177e4
LT
5107 writeq(val64, &bar0->rmac_addr_cmd_mem);
5108
5109 /* Wait for command completes */
c92ca04b 5110 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5111 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5112 S2IO_BIT_RESET)) {
9e39f7c5
JP
5113 DBG_PRINT(ERR_DBG,
5114 "%s: Adding Multicasts failed\n",
5115 dev->name);
1da177e4
LT
5116 return;
5117 }
5118 }
5119
5120 /* Create the new Rx filter list and update the same in H/W. */
5508590c 5121 i = 0;
22bedad3 5122 netdev_for_each_mc_addr(ha, dev) {
a7a80d5a 5123 mac_addr = 0;
1da177e4 5124 for (j = 0; j < ETH_ALEN; j++) {
22bedad3 5125 mac_addr |= ha->addr[j];
1da177e4
LT
5126 mac_addr <<= 8;
5127 }
5128 mac_addr >>= 8;
5129 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5130 &bar0->rmac_addr_data0_mem);
5131 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
d44570e4 5132 &bar0->rmac_addr_data1_mem);
1da177e4 5133 val64 = RMAC_ADDR_CMD_MEM_WE |
d44570e4
JP
5134 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5135 RMAC_ADDR_CMD_MEM_OFFSET
5136 (i + config->mc_start_offset);
1da177e4
LT
5137 writeq(val64, &bar0->rmac_addr_cmd_mem);
5138
5139 /* Wait for command completes */
c92ca04b 5140 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5141 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5142 S2IO_BIT_RESET)) {
9e39f7c5
JP
5143 DBG_PRINT(ERR_DBG,
5144 "%s: Adding Multicasts failed\n",
5145 dev->name);
1da177e4
LT
5146 return;
5147 }
5508590c 5148 i++;
1da177e4
LT
5149 }
5150 }
5151}
5152
faa4f796
SH
5153/* read from CAM unicast & multicast addresses and store it in
5154 * def_mac_addr structure
5155 */
dac499f9 5156static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
faa4f796
SH
5157{
5158 int offset;
5159 u64 mac_addr = 0x0;
5160 struct config_param *config = &sp->config;
5161
5162 /* store unicast & multicast mac addresses */
5163 for (offset = 0; offset < config->max_mc_addr; offset++) {
5164 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5165 /* if read fails disable the entry */
5166 if (mac_addr == FAILURE)
5167 mac_addr = S2IO_DISABLE_MAC_ENTRY;
5168 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5169 }
5170}
5171
5172/* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5173static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5174{
5175 int offset;
5176 struct config_param *config = &sp->config;
5177 /* restore unicast mac address */
5178 for (offset = 0; offset < config->max_mac_addr; offset++)
5179 do_s2io_prog_unicast(sp->dev,
d44570e4 5180 sp->def_mac_addr[offset].mac_addr);
faa4f796
SH
5181
5182 /* restore multicast mac address */
5183 for (offset = config->mc_start_offset;
d44570e4 5184 offset < config->max_mc_addr; offset++)
faa4f796
SH
5185 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5186}
5187
5188/* add a multicast MAC address to CAM */
5189static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5190{
5191 int i;
5192 u64 mac_addr = 0;
5193 struct config_param *config = &sp->config;
5194
5195 for (i = 0; i < ETH_ALEN; i++) {
5196 mac_addr <<= 8;
5197 mac_addr |= addr[i];
5198 }
5199 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5200 return SUCCESS;
5201
5202 /* check if the multicast mac already preset in CAM */
5203 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5204 u64 tmp64;
5205 tmp64 = do_s2io_read_unicast_mc(sp, i);
5206 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5207 break;
5208
5209 if (tmp64 == mac_addr)
5210 return SUCCESS;
5211 }
5212 if (i == config->max_mc_addr) {
5213 DBG_PRINT(ERR_DBG,
d44570e4 5214 "CAM full no space left for multicast MAC\n");
faa4f796
SH
5215 return FAILURE;
5216 }
5217 /* Update the internal structure with this new mac address */
5218 do_s2io_copy_mac_addr(sp, i, mac_addr);
5219
d44570e4 5220 return do_s2io_add_mac(sp, mac_addr, i);
faa4f796
SH
5221}
5222
5223/* add MAC address to CAM */
5224static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
2fd37688
SS
5225{
5226 u64 val64;
5227 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5228
5229 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
d44570e4 5230 &bar0->rmac_addr_data0_mem);
2fd37688 5231
d44570e4 5232 val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2fd37688
SS
5233 RMAC_ADDR_CMD_MEM_OFFSET(off);
5234 writeq(val64, &bar0->rmac_addr_cmd_mem);
5235
5236 /* Wait till command completes */
5237 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5238 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5239 S2IO_BIT_RESET)) {
faa4f796 5240 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
2fd37688
SS
5241 return FAILURE;
5242 }
5243 return SUCCESS;
5244}
faa4f796
SH
5245/* deletes a specified unicast/multicast mac entry from CAM */
5246static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5247{
5248 int offset;
5249 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5250 struct config_param *config = &sp->config;
5251
5252 for (offset = 1;
d44570e4 5253 offset < config->max_mc_addr; offset++) {
faa4f796
SH
5254 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5255 if (tmp64 == addr) {
5256 /* disable the entry by writing 0xffffffffffffULL */
5257 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5258 return FAILURE;
5259 /* store the new mac list from CAM */
5260 do_s2io_store_unicast_mc(sp);
5261 return SUCCESS;
5262 }
5263 }
5264 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
d44570e4 5265 (unsigned long long)addr);
faa4f796
SH
5266 return FAILURE;
5267}
5268
5269/* read mac entries from CAM */
5270static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5271{
5272 u64 tmp64 = 0xffffffffffff0000ULL, val64;
5273 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5274
5275 /* read mac addr */
d44570e4 5276 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
faa4f796
SH
5277 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5278 writeq(val64, &bar0->rmac_addr_cmd_mem);
5279
5280 /* Wait till command completes */
5281 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
5282 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5283 S2IO_BIT_RESET)) {
faa4f796
SH
5284 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5285 return FAILURE;
5286 }
5287 tmp64 = readq(&bar0->rmac_addr_data0_mem);
d44570e4
JP
5288
5289 return tmp64 >> 16;
faa4f796 5290}
2fd37688
SS
5291
5292/**
5293 * s2io_set_mac_addr driver entry point
5294 */
faa4f796 5295
2fd37688
SS
5296static int s2io_set_mac_addr(struct net_device *dev, void *p)
5297{
5298 struct sockaddr *addr = p;
5299
5300 if (!is_valid_ether_addr(addr->sa_data))
5301 return -EINVAL;
5302
5303 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5304
5305 /* store the MAC address in CAM */
d44570e4 5306 return do_s2io_prog_unicast(dev, dev->dev_addr);
2fd37688 5307}
1da177e4 5308/**
2fd37688 5309 * do_s2io_prog_unicast - Programs the Xframe mac address
1da177e4
LT
5310 * @dev : pointer to the device structure.
5311 * @addr: a uchar pointer to the new mac address which is to be set.
20346722 5312 * Description : This procedure will program the Xframe to receive
1da177e4 5313 * frames with new Mac Address
20346722 5314 * Return value: SUCCESS on success and an appropriate (-)ve integer
1da177e4
LT
5315 * as defined in errno.h file on failure.
5316 */
faa4f796 5317
2fd37688 5318static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
1da177e4 5319{
4cf1653a 5320 struct s2io_nic *sp = netdev_priv(dev);
2fd37688 5321 register u64 mac_addr = 0, perm_addr = 0;
1da177e4 5322 int i;
faa4f796
SH
5323 u64 tmp64;
5324 struct config_param *config = &sp->config;
1da177e4 5325
20346722 5326 /*
d44570e4
JP
5327 * Set the new MAC address as the new unicast filter and reflect this
5328 * change on the device address registered with the OS. It will be
5329 * at offset 0.
5330 */
1da177e4
LT
5331 for (i = 0; i < ETH_ALEN; i++) {
5332 mac_addr <<= 8;
5333 mac_addr |= addr[i];
2fd37688
SS
5334 perm_addr <<= 8;
5335 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
d8d70caf
SS
5336 }
5337
2fd37688
SS
5338 /* check if the dev_addr is different than perm_addr */
5339 if (mac_addr == perm_addr)
d8d70caf
SS
5340 return SUCCESS;
5341
faa4f796
SH
5342 /* check if the mac already preset in CAM */
5343 for (i = 1; i < config->max_mac_addr; i++) {
5344 tmp64 = do_s2io_read_unicast_mc(sp, i);
5345 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5346 break;
5347
5348 if (tmp64 == mac_addr) {
5349 DBG_PRINT(INFO_DBG,
d44570e4
JP
5350 "MAC addr:0x%llx already present in CAM\n",
5351 (unsigned long long)mac_addr);
faa4f796
SH
5352 return SUCCESS;
5353 }
5354 }
5355 if (i == config->max_mac_addr) {
5356 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5357 return FAILURE;
5358 }
d8d70caf 5359 /* Update the internal structure with this new mac address */
faa4f796 5360 do_s2io_copy_mac_addr(sp, i, mac_addr);
d44570e4
JP
5361
5362 return do_s2io_add_mac(sp, mac_addr, i);
1da177e4
LT
5363}
5364
5365/**
20346722 5366 * s2io_ethtool_sset - Sets different link parameters.
1da177e4
LT
5367 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5368 * @info: pointer to the structure with parameters given by ethtool to set
5369 * link information.
5370 * Description:
20346722 5371 * The function sets different link parameters provided by the user onto
1da177e4
LT
5372 * the NIC.
5373 * Return value:
5374 * 0 on success.
d44570e4 5375 */
1da177e4
LT
5376
5377static int s2io_ethtool_sset(struct net_device *dev,
5378 struct ethtool_cmd *info)
5379{
4cf1653a 5380 struct s2io_nic *sp = netdev_priv(dev);
1da177e4 5381 if ((info->autoneg == AUTONEG_ENABLE) ||
d44570e4
JP
5382 (info->speed != SPEED_10000) ||
5383 (info->duplex != DUPLEX_FULL))
1da177e4
LT
5384 return -EINVAL;
5385 else {
5386 s2io_close(sp->dev);
5387 s2io_open(sp->dev);
5388 }
5389
5390 return 0;
5391}
5392
5393/**
20346722 5394 * s2io_ethtol_gset - Return link specific information.
1da177e4
LT
5395 * @sp : private member of the device structure, pointer to the
5396 * s2io_nic structure.
5397 * @info : pointer to the structure with parameters given by ethtool
5398 * to return link information.
5399 * Description:
5400 * Returns link specific information like speed, duplex etc.. to ethtool.
5401 * Return value :
5402 * return 0 on success.
5403 */
5404
5405static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5406{
4cf1653a 5407 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5408 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5409 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5410 info->port = PORT_FIBRE;
1a7eb72b
SS
5411
5412 /* info->transceiver */
5413 info->transceiver = XCVR_EXTERNAL;
1da177e4
LT
5414
5415 if (netif_carrier_ok(sp->dev)) {
5416 info->speed = 10000;
5417 info->duplex = DUPLEX_FULL;
5418 } else {
5419 info->speed = -1;
5420 info->duplex = -1;
5421 }
5422
5423 info->autoneg = AUTONEG_DISABLE;
5424 return 0;
5425}
5426
5427/**
20346722
K
5428 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5429 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5430 * s2io_nic structure.
5431 * @info : pointer to the structure with parameters given by ethtool to
5432 * return driver information.
5433 * Description:
5434 * Returns driver specefic information like name, version etc.. to ethtool.
5435 * Return value:
5436 * void
5437 */
5438
5439static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5440 struct ethtool_drvinfo *info)
5441{
4cf1653a 5442 struct s2io_nic *sp = netdev_priv(dev);
1da177e4 5443
dbc2309d
JL
5444 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5445 strncpy(info->version, s2io_driver_version, sizeof(info->version));
5446 strncpy(info->fw_version, "", sizeof(info->fw_version));
5447 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
1da177e4
LT
5448 info->regdump_len = XENA_REG_SPACE;
5449 info->eedump_len = XENA_EEPROM_SPACE;
1da177e4
LT
5450}
5451
5452/**
5453 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
20346722 5454 * @sp: private member of the device structure, which is a pointer to the
1da177e4 5455 * s2io_nic structure.
20346722 5456 * @regs : pointer to the structure with parameters given by ethtool for
1da177e4
LT
5457 * dumping the registers.
5458 * @reg_space: The input argumnet into which all the registers are dumped.
5459 * Description:
5460 * Dumps the entire register space of xFrame NIC into the user given
5461 * buffer area.
5462 * Return value :
5463 * void .
d44570e4 5464 */
1da177e4
LT
5465
5466static void s2io_ethtool_gregs(struct net_device *dev,
5467 struct ethtool_regs *regs, void *space)
5468{
5469 int i;
5470 u64 reg;
d44570e4 5471 u8 *reg_space = (u8 *)space;
4cf1653a 5472 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5473
5474 regs->len = XENA_REG_SPACE;
5475 regs->version = sp->pdev->subsystem_device;
5476
5477 for (i = 0; i < regs->len; i += 8) {
5478 reg = readq(sp->bar0 + i);
5479 memcpy((reg_space + i), &reg, 8);
5480 }
5481}
5482
5483/**
5484 * s2io_phy_id - timer function that alternates adapter LED.
20346722 5485 * @data : address of the private member of the device structure, which
1da177e4 5486 * is a pointer to the s2io_nic structure, provided as an u32.
20346722
K
5487 * Description: This is actually the timer function that alternates the
5488 * adapter LED bit of the adapter control bit to set/reset every time on
5489 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
1da177e4 5490 * once every second.
d44570e4 5491 */
1da177e4
LT
5492static void s2io_phy_id(unsigned long data)
5493{
d44570e4 5494 struct s2io_nic *sp = (struct s2io_nic *)data;
1ee6dd77 5495 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5496 u64 val64 = 0;
5497 u16 subid;
5498
5499 subid = sp->pdev->subsystem_device;
541ae68f 5500 if ((sp->device_type == XFRAME_II_DEVICE) ||
d44570e4 5501 ((subid & 0xFF) >= 0x07)) {
1da177e4
LT
5502 val64 = readq(&bar0->gpio_control);
5503 val64 ^= GPIO_CTRL_GPIO_0;
5504 writeq(val64, &bar0->gpio_control);
5505 } else {
5506 val64 = readq(&bar0->adapter_control);
5507 val64 ^= ADAPTER_LED_ON;
5508 writeq(val64, &bar0->adapter_control);
5509 }
5510
5511 mod_timer(&sp->id_timer, jiffies + HZ / 2);
5512}
5513
5514/**
5515 * s2io_ethtool_idnic - To physically identify the nic on the system.
5516 * @sp : private member of the device structure, which is a pointer to the
5517 * s2io_nic structure.
20346722 5518 * @id : pointer to the structure with identification parameters given by
1da177e4
LT
5519 * ethtool.
5520 * Description: Used to physically identify the NIC on the system.
20346722 5521 * The Link LED will blink for a time specified by the user for
1da177e4 5522 * identification.
20346722 5523 * NOTE: The Link has to be Up to be able to blink the LED. Hence
1da177e4
LT
5524 * identification is possible only if it's link is up.
5525 * Return value:
5526 * int , returns 0 on success
5527 */
5528
5529static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5530{
5531 u64 val64 = 0, last_gpio_ctrl_val;
4cf1653a 5532 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 5533 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5534 u16 subid;
5535
5536 subid = sp->pdev->subsystem_device;
5537 last_gpio_ctrl_val = readq(&bar0->gpio_control);
d44570e4 5538 if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
1da177e4
LT
5539 val64 = readq(&bar0->adapter_control);
5540 if (!(val64 & ADAPTER_CNTL_EN)) {
6cef2b8e 5541 pr_err("Adapter Link down, cannot blink LED\n");
1da177e4
LT
5542 return -EFAULT;
5543 }
5544 }
5545 if (sp->id_timer.function == NULL) {
5546 init_timer(&sp->id_timer);
5547 sp->id_timer.function = s2io_phy_id;
d44570e4 5548 sp->id_timer.data = (unsigned long)sp;
1da177e4
LT
5549 }
5550 mod_timer(&sp->id_timer, jiffies);
5551 if (data)
20346722 5552 msleep_interruptible(data * HZ);
1da177e4 5553 else
20346722 5554 msleep_interruptible(MAX_FLICKER_TIME);
1da177e4
LT
5555 del_timer_sync(&sp->id_timer);
5556
541ae68f 5557 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
1da177e4
LT
5558 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5559 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5560 }
5561
5562 return 0;
5563}
5564
0cec35eb 5565static void s2io_ethtool_gringparam(struct net_device *dev,
d44570e4 5566 struct ethtool_ringparam *ering)
0cec35eb 5567{
4cf1653a 5568 struct s2io_nic *sp = netdev_priv(dev);
d44570e4 5569 int i, tx_desc_count = 0, rx_desc_count = 0;
0cec35eb
SH
5570
5571 if (sp->rxd_mode == RXD_MODE_1)
5572 ering->rx_max_pending = MAX_RX_DESC_1;
5573 else if (sp->rxd_mode == RXD_MODE_3B)
5574 ering->rx_max_pending = MAX_RX_DESC_2;
0cec35eb
SH
5575
5576 ering->tx_max_pending = MAX_TX_DESC;
8a4bdbaa 5577 for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
0cec35eb 5578 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
8a4bdbaa 5579
9e39f7c5 5580 DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
0cec35eb
SH
5581 ering->tx_pending = tx_desc_count;
5582 rx_desc_count = 0;
8a4bdbaa 5583 for (i = 0 ; i < sp->config.rx_ring_num ; i++)
0cec35eb 5584 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
b6627672 5585
0cec35eb
SH
5586 ering->rx_pending = rx_desc_count;
5587
5588 ering->rx_mini_max_pending = 0;
5589 ering->rx_mini_pending = 0;
d44570e4 5590 if (sp->rxd_mode == RXD_MODE_1)
0cec35eb
SH
5591 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5592 else if (sp->rxd_mode == RXD_MODE_3B)
5593 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5594 ering->rx_jumbo_pending = rx_desc_count;
5595}
5596
1da177e4
LT
5597/**
5598 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
20346722
K
5599 * @sp : private member of the device structure, which is a pointer to the
5600 * s2io_nic structure.
1da177e4
LT
5601 * @ep : pointer to the structure with pause parameters given by ethtool.
5602 * Description:
5603 * Returns the Pause frame generation and reception capability of the NIC.
5604 * Return value:
5605 * void
5606 */
5607static void s2io_ethtool_getpause_data(struct net_device *dev,
5608 struct ethtool_pauseparam *ep)
5609{
5610 u64 val64;
4cf1653a 5611 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 5612 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5613
5614 val64 = readq(&bar0->rmac_pause_cfg);
5615 if (val64 & RMAC_PAUSE_GEN_ENABLE)
f957bcf0 5616 ep->tx_pause = true;
1da177e4 5617 if (val64 & RMAC_PAUSE_RX_ENABLE)
f957bcf0
TK
5618 ep->rx_pause = true;
5619 ep->autoneg = false;
1da177e4
LT
5620}
5621
5622/**
5623 * s2io_ethtool_setpause_data - set/reset pause frame generation.
20346722 5624 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5625 * s2io_nic structure.
5626 * @ep : pointer to the structure with pause parameters given by ethtool.
5627 * Description:
5628 * It can be used to set or reset Pause frame generation or reception
5629 * support of the NIC.
5630 * Return value:
5631 * int, returns 0 on Success
5632 */
5633
5634static int s2io_ethtool_setpause_data(struct net_device *dev,
d44570e4 5635 struct ethtool_pauseparam *ep)
1da177e4
LT
5636{
5637 u64 val64;
4cf1653a 5638 struct s2io_nic *sp = netdev_priv(dev);
1ee6dd77 5639 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5640
5641 val64 = readq(&bar0->rmac_pause_cfg);
5642 if (ep->tx_pause)
5643 val64 |= RMAC_PAUSE_GEN_ENABLE;
5644 else
5645 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5646 if (ep->rx_pause)
5647 val64 |= RMAC_PAUSE_RX_ENABLE;
5648 else
5649 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5650 writeq(val64, &bar0->rmac_pause_cfg);
5651 return 0;
5652}
5653
5654/**
5655 * read_eeprom - reads 4 bytes of data from user given offset.
20346722 5656 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5657 * s2io_nic structure.
5658 * @off : offset at which the data must be written
5659 * @data : Its an output parameter where the data read at the given
20346722 5660 * offset is stored.
1da177e4 5661 * Description:
20346722 5662 * Will read 4 bytes of data from the user given offset and return the
1da177e4
LT
5663 * read data.
5664 * NOTE: Will allow to read only part of the EEPROM visible through the
5665 * I2C bus.
5666 * Return value:
5667 * -1 on failure and 0 on success.
5668 */
5669
5670#define S2IO_DEV_ID 5
d44570e4 5671static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
1da177e4
LT
5672{
5673 int ret = -1;
5674 u32 exit_cnt = 0;
5675 u64 val64;
1ee6dd77 5676 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 5677
ad4ebed0 5678 if (sp->device_type == XFRAME_I_DEVICE) {
d44570e4
JP
5679 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5680 I2C_CONTROL_ADDR(off) |
5681 I2C_CONTROL_BYTE_CNT(0x3) |
5682 I2C_CONTROL_READ |
5683 I2C_CONTROL_CNTL_START;
ad4ebed0 5684 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
1da177e4 5685
ad4ebed0 5686 while (exit_cnt < 5) {
5687 val64 = readq(&bar0->i2c_control);
5688 if (I2C_CONTROL_CNTL_END(val64)) {
5689 *data = I2C_CONTROL_GET_DATA(val64);
5690 ret = 0;
5691 break;
5692 }
5693 msleep(50);
5694 exit_cnt++;
1da177e4 5695 }
1da177e4
LT
5696 }
5697
ad4ebed0 5698 if (sp->device_type == XFRAME_II_DEVICE) {
5699 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 5700 SPI_CONTROL_BYTECNT(0x3) |
ad4ebed0 5701 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5702 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5703 val64 |= SPI_CONTROL_REQ;
5704 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5705 while (exit_cnt < 5) {
5706 val64 = readq(&bar0->spi_control);
5707 if (val64 & SPI_CONTROL_NACK) {
5708 ret = 1;
5709 break;
5710 } else if (val64 & SPI_CONTROL_DONE) {
5711 *data = readq(&bar0->spi_data);
5712 *data &= 0xffffff;
5713 ret = 0;
5714 break;
5715 }
5716 msleep(50);
5717 exit_cnt++;
5718 }
5719 }
1da177e4
LT
5720 return ret;
5721}
5722
5723/**
5724 * write_eeprom - actually writes the relevant part of the data value.
5725 * @sp : private member of the device structure, which is a pointer to the
5726 * s2io_nic structure.
5727 * @off : offset at which the data must be written
5728 * @data : The data that is to be written
20346722 5729 * @cnt : Number of bytes of the data that are actually to be written into
1da177e4
LT
5730 * the Eeprom. (max of 3)
5731 * Description:
5732 * Actually writes the relevant part of the data value into the Eeprom
5733 * through the I2C bus.
5734 * Return value:
5735 * 0 on success, -1 on failure.
5736 */
5737
d44570e4 5738static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
1da177e4
LT
5739{
5740 int exit_cnt = 0, ret = -1;
5741 u64 val64;
1ee6dd77 5742 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 5743
ad4ebed0 5744 if (sp->device_type == XFRAME_I_DEVICE) {
d44570e4
JP
5745 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5746 I2C_CONTROL_ADDR(off) |
5747 I2C_CONTROL_BYTE_CNT(cnt) |
5748 I2C_CONTROL_SET_DATA((u32)data) |
5749 I2C_CONTROL_CNTL_START;
ad4ebed0 5750 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5751
5752 while (exit_cnt < 5) {
5753 val64 = readq(&bar0->i2c_control);
5754 if (I2C_CONTROL_CNTL_END(val64)) {
5755 if (!(val64 & I2C_CONTROL_NACK))
5756 ret = 0;
5757 break;
5758 }
5759 msleep(50);
5760 exit_cnt++;
5761 }
5762 }
1da177e4 5763
ad4ebed0 5764 if (sp->device_type == XFRAME_II_DEVICE) {
5765 int write_cnt = (cnt == 8) ? 0 : cnt;
d44570e4 5766 writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
ad4ebed0 5767
5768 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 5769 SPI_CONTROL_BYTECNT(write_cnt) |
ad4ebed0 5770 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5771 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5772 val64 |= SPI_CONTROL_REQ;
5773 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5774 while (exit_cnt < 5) {
5775 val64 = readq(&bar0->spi_control);
5776 if (val64 & SPI_CONTROL_NACK) {
5777 ret = 1;
5778 break;
5779 } else if (val64 & SPI_CONTROL_DONE) {
1da177e4 5780 ret = 0;
ad4ebed0 5781 break;
5782 }
5783 msleep(50);
5784 exit_cnt++;
1da177e4 5785 }
1da177e4 5786 }
1da177e4
LT
5787 return ret;
5788}
1ee6dd77 5789static void s2io_vpd_read(struct s2io_nic *nic)
9dc737a7 5790{
b41477f3
AR
5791 u8 *vpd_data;
5792 u8 data;
9c179780 5793 int i = 0, cnt, len, fail = 0;
9dc737a7 5794 int vpd_addr = 0x80;
ffb5df6c 5795 struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
9dc737a7
AR
5796
5797 if (nic->device_type == XFRAME_II_DEVICE) {
5798 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5799 vpd_addr = 0x80;
d44570e4 5800 } else {
9dc737a7
AR
5801 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5802 vpd_addr = 0x50;
5803 }
19a60522 5804 strcpy(nic->serial_num, "NOT AVAILABLE");
9dc737a7 5805
b41477f3 5806 vpd_data = kmalloc(256, GFP_KERNEL);
c53d4945 5807 if (!vpd_data) {
ffb5df6c 5808 swstats->mem_alloc_fail_cnt++;
b41477f3 5809 return;
c53d4945 5810 }
ffb5df6c 5811 swstats->mem_allocated += 256;
b41477f3 5812
d44570e4 5813 for (i = 0; i < 256; i += 4) {
9dc737a7
AR
5814 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5815 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5816 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
d44570e4 5817 for (cnt = 0; cnt < 5; cnt++) {
9dc737a7
AR
5818 msleep(2);
5819 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5820 if (data == 0x80)
5821 break;
5822 }
5823 if (cnt >= 5) {
5824 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5825 fail = 1;
5826 break;
5827 }
5828 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5829 (u32 *)&vpd_data[i]);
5830 }
19a60522 5831
d44570e4 5832 if (!fail) {
19a60522 5833 /* read serial number of adapter */
9c179780 5834 for (cnt = 0; cnt < 252; cnt++) {
d44570e4 5835 if ((vpd_data[cnt] == 'S') &&
9c179780
KV
5836 (vpd_data[cnt+1] == 'N')) {
5837 len = vpd_data[cnt+2];
5838 if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5839 memcpy(nic->serial_num,
5840 &vpd_data[cnt + 3],
5841 len);
5842 memset(nic->serial_num+len,
5843 0,
5844 VPD_STRING_LEN-len);
5845 break;
5846 }
19a60522
SS
5847 }
5848 }
5849 }
5850
9c179780
KV
5851 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5852 len = vpd_data[1];
5853 memcpy(nic->product_name, &vpd_data[3], len);
5854 nic->product_name[len] = 0;
5855 }
b41477f3 5856 kfree(vpd_data);
ffb5df6c 5857 swstats->mem_freed += 256;
9dc737a7
AR
5858}
5859
1da177e4
LT
5860/**
5861 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5862 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
20346722 5863 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5864 * containing all relevant information.
5865 * @data_buf : user defined value to be written into Eeprom.
5866 * Description: Reads the values stored in the Eeprom at given offset
5867 * for a given length. Stores these values int the input argument data
5868 * buffer 'data_buf' and returns these to the caller (ethtool.)
5869 * Return value:
5870 * int 0 on success
5871 */
5872
5873static int s2io_ethtool_geeprom(struct net_device *dev,
d44570e4 5874 struct ethtool_eeprom *eeprom, u8 * data_buf)
1da177e4 5875{
ad4ebed0 5876 u32 i, valid;
5877 u64 data;
4cf1653a 5878 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5879
5880 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5881
5882 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5883 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5884
5885 for (i = 0; i < eeprom->len; i += 4) {
5886 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5887 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5888 return -EFAULT;
5889 }
5890 valid = INV(data);
5891 memcpy((data_buf + i), &valid, 4);
5892 }
5893 return 0;
5894}
5895
5896/**
5897 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5898 * @sp : private member of the device structure, which is a pointer to the
5899 * s2io_nic structure.
20346722 5900 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5901 * containing all relevant information.
5902 * @data_buf ; user defined value to be written into Eeprom.
5903 * Description:
5904 * Tries to write the user provided value in the Eeprom, at the offset
5905 * given by the user.
5906 * Return value:
5907 * 0 on success, -EFAULT on failure.
5908 */
5909
5910static int s2io_ethtool_seeprom(struct net_device *dev,
5911 struct ethtool_eeprom *eeprom,
d44570e4 5912 u8 *data_buf)
1da177e4
LT
5913{
5914 int len = eeprom->len, cnt = 0;
ad4ebed0 5915 u64 valid = 0, data;
4cf1653a 5916 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
5917
5918 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5919 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
5920 "ETHTOOL_WRITE_EEPROM Err: "
5921 "Magic value is wrong, it is 0x%x should be 0x%x\n",
5922 (sp->pdev->vendor | (sp->pdev->device << 16)),
5923 eeprom->magic);
1da177e4
LT
5924 return -EFAULT;
5925 }
5926
5927 while (len) {
d44570e4
JP
5928 data = (u32)data_buf[cnt] & 0x000000FF;
5929 if (data)
5930 valid = (u32)(data << 24);
5931 else
1da177e4
LT
5932 valid = data;
5933
5934 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5935 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
5936 "ETHTOOL_WRITE_EEPROM Err: "
5937 "Cannot write into the specified offset\n");
1da177e4
LT
5938 return -EFAULT;
5939 }
5940 cnt++;
5941 len--;
5942 }
5943
5944 return 0;
5945}
5946
5947/**
20346722
K
5948 * s2io_register_test - reads and writes into all clock domains.
5949 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5950 * s2io_nic structure.
5951 * @data : variable that returns the result of each of the test conducted b
5952 * by the driver.
5953 * Description:
5954 * Read and write into all clock domains. The NIC has 3 clock domains,
5955 * see that registers in all the three regions are accessible.
5956 * Return value:
5957 * 0 on success.
5958 */
5959
d44570e4 5960static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
1da177e4 5961{
1ee6dd77 5962 struct XENA_dev_config __iomem *bar0 = sp->bar0;
ad4ebed0 5963 u64 val64 = 0, exp_val;
1da177e4
LT
5964 int fail = 0;
5965
20346722
K
5966 val64 = readq(&bar0->pif_rd_swapper_fb);
5967 if (val64 != 0x123456789abcdefULL) {
1da177e4 5968 fail = 1;
9e39f7c5 5969 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
1da177e4
LT
5970 }
5971
5972 val64 = readq(&bar0->rmac_pause_cfg);
5973 if (val64 != 0xc000ffff00000000ULL) {
5974 fail = 1;
9e39f7c5 5975 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
1da177e4
LT
5976 }
5977
5978 val64 = readq(&bar0->rx_queue_cfg);
ad4ebed0 5979 if (sp->device_type == XFRAME_II_DEVICE)
5980 exp_val = 0x0404040404040404ULL;
5981 else
5982 exp_val = 0x0808080808080808ULL;
5983 if (val64 != exp_val) {
1da177e4 5984 fail = 1;
9e39f7c5 5985 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
1da177e4
LT
5986 }
5987
5988 val64 = readq(&bar0->xgxs_efifo_cfg);
5989 if (val64 != 0x000000001923141EULL) {
5990 fail = 1;
9e39f7c5 5991 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
1da177e4
LT
5992 }
5993
5994 val64 = 0x5A5A5A5A5A5A5A5AULL;
5995 writeq(val64, &bar0->xmsi_data);
5996 val64 = readq(&bar0->xmsi_data);
5997 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5998 fail = 1;
9e39f7c5 5999 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
1da177e4
LT
6000 }
6001
6002 val64 = 0xA5A5A5A5A5A5A5A5ULL;
6003 writeq(val64, &bar0->xmsi_data);
6004 val64 = readq(&bar0->xmsi_data);
6005 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
6006 fail = 1;
9e39f7c5 6007 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
1da177e4
LT
6008 }
6009
6010 *data = fail;
ad4ebed0 6011 return fail;
1da177e4
LT
6012}
6013
6014/**
20346722 6015 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
1da177e4
LT
6016 * @sp : private member of the device structure, which is a pointer to the
6017 * s2io_nic structure.
6018 * @data:variable that returns the result of each of the test conducted by
6019 * the driver.
6020 * Description:
20346722 6021 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
1da177e4
LT
6022 * register.
6023 * Return value:
6024 * 0 on success.
6025 */
6026
d44570e4 6027static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
1da177e4
LT
6028{
6029 int fail = 0;
ad4ebed0 6030 u64 ret_data, org_4F0, org_7F0;
6031 u8 saved_4F0 = 0, saved_7F0 = 0;
6032 struct net_device *dev = sp->dev;
1da177e4
LT
6033
6034 /* Test Write Error at offset 0 */
ad4ebed0 6035 /* Note that SPI interface allows write access to all areas
6036 * of EEPROM. Hence doing all negative testing only for Xframe I.
6037 */
6038 if (sp->device_type == XFRAME_I_DEVICE)
6039 if (!write_eeprom(sp, 0, 0, 3))
6040 fail = 1;
6041
6042 /* Save current values at offsets 0x4F0 and 0x7F0 */
6043 if (!read_eeprom(sp, 0x4F0, &org_4F0))
6044 saved_4F0 = 1;
6045 if (!read_eeprom(sp, 0x7F0, &org_7F0))
6046 saved_7F0 = 1;
1da177e4
LT
6047
6048 /* Test Write at offset 4f0 */
ad4ebed0 6049 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
1da177e4
LT
6050 fail = 1;
6051 if (read_eeprom(sp, 0x4F0, &ret_data))
6052 fail = 1;
6053
ad4ebed0 6054 if (ret_data != 0x012345) {
26b7625c 6055 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
d44570e4
JP
6056 "Data written %llx Data read %llx\n",
6057 dev->name, (unsigned long long)0x12345,
6058 (unsigned long long)ret_data);
1da177e4 6059 fail = 1;
ad4ebed0 6060 }
1da177e4
LT
6061
6062 /* Reset the EEPROM data go FFFF */
ad4ebed0 6063 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
1da177e4
LT
6064
6065 /* Test Write Request Error at offset 0x7c */
ad4ebed0 6066 if (sp->device_type == XFRAME_I_DEVICE)
6067 if (!write_eeprom(sp, 0x07C, 0, 3))
6068 fail = 1;
1da177e4 6069
ad4ebed0 6070 /* Test Write Request at offset 0x7f0 */
6071 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
1da177e4 6072 fail = 1;
ad4ebed0 6073 if (read_eeprom(sp, 0x7F0, &ret_data))
1da177e4
LT
6074 fail = 1;
6075
ad4ebed0 6076 if (ret_data != 0x012345) {
26b7625c 6077 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
d44570e4
JP
6078 "Data written %llx Data read %llx\n",
6079 dev->name, (unsigned long long)0x12345,
6080 (unsigned long long)ret_data);
1da177e4 6081 fail = 1;
ad4ebed0 6082 }
1da177e4
LT
6083
6084 /* Reset the EEPROM data go FFFF */
ad4ebed0 6085 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
1da177e4 6086
ad4ebed0 6087 if (sp->device_type == XFRAME_I_DEVICE) {
6088 /* Test Write Error at offset 0x80 */
6089 if (!write_eeprom(sp, 0x080, 0, 3))
6090 fail = 1;
1da177e4 6091
ad4ebed0 6092 /* Test Write Error at offset 0xfc */
6093 if (!write_eeprom(sp, 0x0FC, 0, 3))
6094 fail = 1;
1da177e4 6095
ad4ebed0 6096 /* Test Write Error at offset 0x100 */
6097 if (!write_eeprom(sp, 0x100, 0, 3))
6098 fail = 1;
1da177e4 6099
ad4ebed0 6100 /* Test Write Error at offset 4ec */
6101 if (!write_eeprom(sp, 0x4EC, 0, 3))
6102 fail = 1;
6103 }
6104
6105 /* Restore values at offsets 0x4F0 and 0x7F0 */
6106 if (saved_4F0)
6107 write_eeprom(sp, 0x4F0, org_4F0, 3);
6108 if (saved_7F0)
6109 write_eeprom(sp, 0x7F0, org_7F0, 3);
1da177e4
LT
6110
6111 *data = fail;
ad4ebed0 6112 return fail;
1da177e4
LT
6113}
6114
6115/**
6116 * s2io_bist_test - invokes the MemBist test of the card .
20346722 6117 * @sp : private member of the device structure, which is a pointer to the
1da177e4 6118 * s2io_nic structure.
20346722 6119 * @data:variable that returns the result of each of the test conducted by
1da177e4
LT
6120 * the driver.
6121 * Description:
6122 * This invokes the MemBist test of the card. We give around
6123 * 2 secs time for the Test to complete. If it's still not complete
20346722 6124 * within this peiod, we consider that the test failed.
1da177e4
LT
6125 * Return value:
6126 * 0 on success and -1 on failure.
6127 */
6128
d44570e4 6129static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
1da177e4
LT
6130{
6131 u8 bist = 0;
6132 int cnt = 0, ret = -1;
6133
6134 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6135 bist |= PCI_BIST_START;
6136 pci_write_config_word(sp->pdev, PCI_BIST, bist);
6137
6138 while (cnt < 20) {
6139 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6140 if (!(bist & PCI_BIST_START)) {
6141 *data = (bist & PCI_BIST_CODE_MASK);
6142 ret = 0;
6143 break;
6144 }
6145 msleep(100);
6146 cnt++;
6147 }
6148
6149 return ret;
6150}
6151
6152/**
20346722
K
6153 * s2io-link_test - verifies the link state of the nic
6154 * @sp ; private member of the device structure, which is a pointer to the
1da177e4
LT
6155 * s2io_nic structure.
6156 * @data: variable that returns the result of each of the test conducted by
6157 * the driver.
6158 * Description:
20346722 6159 * The function verifies the link state of the NIC and updates the input
1da177e4
LT
6160 * argument 'data' appropriately.
6161 * Return value:
6162 * 0 on success.
6163 */
6164
d44570e4 6165static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
1da177e4 6166{
1ee6dd77 6167 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
6168 u64 val64;
6169
6170 val64 = readq(&bar0->adapter_status);
d44570e4 6171 if (!(LINK_IS_UP(val64)))
1da177e4 6172 *data = 1;
c92ca04b
AR
6173 else
6174 *data = 0;
1da177e4 6175
b41477f3 6176 return *data;
1da177e4
LT
6177}
6178
6179/**
20346722
K
6180 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6181 * @sp - private member of the device structure, which is a pointer to the
1da177e4 6182 * s2io_nic structure.
20346722 6183 * @data - variable that returns the result of each of the test
1da177e4
LT
6184 * conducted by the driver.
6185 * Description:
20346722 6186 * This is one of the offline test that tests the read and write
1da177e4
LT
6187 * access to the RldRam chip on the NIC.
6188 * Return value:
6189 * 0 on success.
6190 */
6191
d44570e4 6192static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
1da177e4 6193{
1ee6dd77 6194 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 6195 u64 val64;
ad4ebed0 6196 int cnt, iteration = 0, test_fail = 0;
1da177e4
LT
6197
6198 val64 = readq(&bar0->adapter_control);
6199 val64 &= ~ADAPTER_ECC_EN;
6200 writeq(val64, &bar0->adapter_control);
6201
6202 val64 = readq(&bar0->mc_rldram_test_ctrl);
6203 val64 |= MC_RLDRAM_TEST_MODE;
ad4ebed0 6204 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6205
6206 val64 = readq(&bar0->mc_rldram_mrs);
6207 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6208 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6209
6210 val64 |= MC_RLDRAM_MRS_ENABLE;
6211 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6212
6213 while (iteration < 2) {
6214 val64 = 0x55555555aaaa0000ULL;
d44570e4 6215 if (iteration == 1)
1da177e4 6216 val64 ^= 0xFFFFFFFFFFFF0000ULL;
1da177e4
LT
6217 writeq(val64, &bar0->mc_rldram_test_d0);
6218
6219 val64 = 0xaaaa5a5555550000ULL;
d44570e4 6220 if (iteration == 1)
1da177e4 6221 val64 ^= 0xFFFFFFFFFFFF0000ULL;
1da177e4
LT
6222 writeq(val64, &bar0->mc_rldram_test_d1);
6223
6224 val64 = 0x55aaaaaaaa5a0000ULL;
d44570e4 6225 if (iteration == 1)
1da177e4 6226 val64 ^= 0xFFFFFFFFFFFF0000ULL;
1da177e4
LT
6227 writeq(val64, &bar0->mc_rldram_test_d2);
6228
ad4ebed0 6229 val64 = (u64) (0x0000003ffffe0100ULL);
1da177e4
LT
6230 writeq(val64, &bar0->mc_rldram_test_add);
6231
d44570e4
JP
6232 val64 = MC_RLDRAM_TEST_MODE |
6233 MC_RLDRAM_TEST_WRITE |
6234 MC_RLDRAM_TEST_GO;
ad4ebed0 6235 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6236
6237 for (cnt = 0; cnt < 5; cnt++) {
6238 val64 = readq(&bar0->mc_rldram_test_ctrl);
6239 if (val64 & MC_RLDRAM_TEST_DONE)
6240 break;
6241 msleep(200);
6242 }
6243
6244 if (cnt == 5)
6245 break;
6246
ad4ebed0 6247 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6248 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6249
6250 for (cnt = 0; cnt < 5; cnt++) {
6251 val64 = readq(&bar0->mc_rldram_test_ctrl);
6252 if (val64 & MC_RLDRAM_TEST_DONE)
6253 break;
6254 msleep(500);
6255 }
6256
6257 if (cnt == 5)
6258 break;
6259
6260 val64 = readq(&bar0->mc_rldram_test_ctrl);
ad4ebed0 6261 if (!(val64 & MC_RLDRAM_TEST_PASS))
6262 test_fail = 1;
1da177e4
LT
6263
6264 iteration++;
6265 }
6266
ad4ebed0 6267 *data = test_fail;
1da177e4 6268
ad4ebed0 6269 /* Bring the adapter out of test mode */
6270 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6271
6272 return test_fail;
1da177e4
LT
6273}
6274
6275/**
6276 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6277 * @sp : private member of the device structure, which is a pointer to the
6278 * s2io_nic structure.
6279 * @ethtest : pointer to a ethtool command specific structure that will be
6280 * returned to the user.
20346722 6281 * @data : variable that returns the result of each of the test
1da177e4
LT
6282 * conducted by the driver.
6283 * Description:
6284 * This function conducts 6 tests ( 4 offline and 2 online) to determine
6285 * the health of the card.
6286 * Return value:
6287 * void
6288 */
6289
6290static void s2io_ethtool_test(struct net_device *dev,
6291 struct ethtool_test *ethtest,
d44570e4 6292 uint64_t *data)
1da177e4 6293{
4cf1653a 6294 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
6295 int orig_state = netif_running(sp->dev);
6296
6297 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6298 /* Offline Tests. */
20346722 6299 if (orig_state)
1da177e4 6300 s2io_close(sp->dev);
1da177e4
LT
6301
6302 if (s2io_register_test(sp, &data[0]))
6303 ethtest->flags |= ETH_TEST_FL_FAILED;
6304
6305 s2io_reset(sp);
1da177e4
LT
6306
6307 if (s2io_rldram_test(sp, &data[3]))
6308 ethtest->flags |= ETH_TEST_FL_FAILED;
6309
6310 s2io_reset(sp);
1da177e4
LT
6311
6312 if (s2io_eeprom_test(sp, &data[1]))
6313 ethtest->flags |= ETH_TEST_FL_FAILED;
6314
6315 if (s2io_bist_test(sp, &data[4]))
6316 ethtest->flags |= ETH_TEST_FL_FAILED;
6317
6318 if (orig_state)
6319 s2io_open(sp->dev);
6320
6321 data[2] = 0;
6322 } else {
6323 /* Online Tests. */
6324 if (!orig_state) {
d44570e4 6325 DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
1da177e4
LT
6326 dev->name);
6327 data[0] = -1;
6328 data[1] = -1;
6329 data[2] = -1;
6330 data[3] = -1;
6331 data[4] = -1;
6332 }
6333
6334 if (s2io_link_test(sp, &data[2]))
6335 ethtest->flags |= ETH_TEST_FL_FAILED;
6336
6337 data[0] = 0;
6338 data[1] = 0;
6339 data[3] = 0;
6340 data[4] = 0;
6341 }
6342}
6343
6344static void s2io_get_ethtool_stats(struct net_device *dev,
6345 struct ethtool_stats *estats,
d44570e4 6346 u64 *tmp_stats)
1da177e4 6347{
8116f3cf 6348 int i = 0, k;
4cf1653a 6349 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c
JP
6350 struct stat_block *stats = sp->mac_control.stats_info;
6351 struct swStat *swstats = &stats->sw_stat;
6352 struct xpakStat *xstats = &stats->xpak_stat;
1da177e4 6353
7ba013ac 6354 s2io_updt_stats(sp);
541ae68f 6355 tmp_stats[i++] =
ffb5df6c
JP
6356 (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32 |
6357 le32_to_cpu(stats->tmac_frms);
541ae68f 6358 tmp_stats[i++] =
ffb5df6c
JP
6359 (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6360 le32_to_cpu(stats->tmac_data_octets);
6361 tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
541ae68f 6362 tmp_stats[i++] =
ffb5df6c
JP
6363 (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6364 le32_to_cpu(stats->tmac_mcst_frms);
541ae68f 6365 tmp_stats[i++] =
ffb5df6c
JP
6366 (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6367 le32_to_cpu(stats->tmac_bcst_frms);
6368 tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
bd1034f0 6369 tmp_stats[i++] =
ffb5df6c
JP
6370 (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6371 le32_to_cpu(stats->tmac_ttl_octets);
bd1034f0 6372 tmp_stats[i++] =
ffb5df6c
JP
6373 (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6374 le32_to_cpu(stats->tmac_ucst_frms);
d44570e4 6375 tmp_stats[i++] =
ffb5df6c
JP
6376 (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6377 le32_to_cpu(stats->tmac_nucst_frms);
541ae68f 6378 tmp_stats[i++] =
ffb5df6c
JP
6379 (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6380 le32_to_cpu(stats->tmac_any_err_frms);
6381 tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6382 tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
541ae68f 6383 tmp_stats[i++] =
ffb5df6c
JP
6384 (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6385 le32_to_cpu(stats->tmac_vld_ip);
541ae68f 6386 tmp_stats[i++] =
ffb5df6c
JP
6387 (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6388 le32_to_cpu(stats->tmac_drop_ip);
541ae68f 6389 tmp_stats[i++] =
ffb5df6c
JP
6390 (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6391 le32_to_cpu(stats->tmac_icmp);
541ae68f 6392 tmp_stats[i++] =
ffb5df6c
JP
6393 (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6394 le32_to_cpu(stats->tmac_rst_tcp);
6395 tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6396 tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6397 le32_to_cpu(stats->tmac_udp);
541ae68f 6398 tmp_stats[i++] =
ffb5df6c
JP
6399 (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6400 le32_to_cpu(stats->rmac_vld_frms);
541ae68f 6401 tmp_stats[i++] =
ffb5df6c
JP
6402 (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6403 le32_to_cpu(stats->rmac_data_octets);
6404 tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6405 tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
541ae68f 6406 tmp_stats[i++] =
ffb5df6c
JP
6407 (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6408 le32_to_cpu(stats->rmac_vld_mcst_frms);
541ae68f 6409 tmp_stats[i++] =
ffb5df6c
JP
6410 (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6411 le32_to_cpu(stats->rmac_vld_bcst_frms);
6412 tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6413 tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6414 tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6415 tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6416 tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
d44570e4 6417 tmp_stats[i++] =
ffb5df6c
JP
6418 (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6419 le32_to_cpu(stats->rmac_ttl_octets);
bd1034f0 6420 tmp_stats[i++] =
ffb5df6c
JP
6421 (u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6422 | le32_to_cpu(stats->rmac_accepted_ucst_frms);
d44570e4 6423 tmp_stats[i++] =
ffb5df6c
JP
6424 (u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6425 << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
541ae68f 6426 tmp_stats[i++] =
ffb5df6c
JP
6427 (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6428 le32_to_cpu(stats->rmac_discarded_frms);
d44570e4 6429 tmp_stats[i++] =
ffb5df6c
JP
6430 (u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6431 << 32 | le32_to_cpu(stats->rmac_drop_events);
6432 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6433 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
541ae68f 6434 tmp_stats[i++] =
ffb5df6c
JP
6435 (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6436 le32_to_cpu(stats->rmac_usized_frms);
541ae68f 6437 tmp_stats[i++] =
ffb5df6c
JP
6438 (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6439 le32_to_cpu(stats->rmac_osized_frms);
541ae68f 6440 tmp_stats[i++] =
ffb5df6c
JP
6441 (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6442 le32_to_cpu(stats->rmac_frag_frms);
541ae68f 6443 tmp_stats[i++] =
ffb5df6c
JP
6444 (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6445 le32_to_cpu(stats->rmac_jabber_frms);
6446 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6447 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6448 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6449 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6450 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6451 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
bd1034f0 6452 tmp_stats[i++] =
ffb5df6c
JP
6453 (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6454 le32_to_cpu(stats->rmac_ip);
6455 tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6456 tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
bd1034f0 6457 tmp_stats[i++] =
ffb5df6c
JP
6458 (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6459 le32_to_cpu(stats->rmac_drop_ip);
bd1034f0 6460 tmp_stats[i++] =
ffb5df6c
JP
6461 (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6462 le32_to_cpu(stats->rmac_icmp);
6463 tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
bd1034f0 6464 tmp_stats[i++] =
ffb5df6c
JP
6465 (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6466 le32_to_cpu(stats->rmac_udp);
541ae68f 6467 tmp_stats[i++] =
ffb5df6c
JP
6468 (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6469 le32_to_cpu(stats->rmac_err_drp_udp);
6470 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6471 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6472 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6473 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6474 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6475 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6476 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6477 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6478 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6479 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6480 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6481 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6482 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6483 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6484 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6485 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6486 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
541ae68f 6487 tmp_stats[i++] =
ffb5df6c
JP
6488 (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6489 le32_to_cpu(stats->rmac_pause_cnt);
6490 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6491 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
541ae68f 6492 tmp_stats[i++] =
ffb5df6c
JP
6493 (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6494 le32_to_cpu(stats->rmac_accepted_ip);
6495 tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6496 tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6497 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6498 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6499 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6500 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6501 tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6502 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6503 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6504 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6505 tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6506 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6507 tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6508 tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6509 tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6510 tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6511 tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6512 tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6513 tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
fa1f0cb3
SS
6514
6515 /* Enhanced statistics exist only for Hercules */
d44570e4 6516 if (sp->device_type == XFRAME_II_DEVICE) {
fa1f0cb3 6517 tmp_stats[i++] =
ffb5df6c 6518 le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
fa1f0cb3 6519 tmp_stats[i++] =
ffb5df6c 6520 le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
fa1f0cb3 6521 tmp_stats[i++] =
ffb5df6c
JP
6522 le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6523 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6524 tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6525 tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6526 tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6527 tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6528 tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6529 tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6530 tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6531 tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6532 tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6533 tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6534 tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6535 tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
fa1f0cb3
SS
6536 }
6537
7ba013ac 6538 tmp_stats[i++] = 0;
ffb5df6c
JP
6539 tmp_stats[i++] = swstats->single_ecc_errs;
6540 tmp_stats[i++] = swstats->double_ecc_errs;
6541 tmp_stats[i++] = swstats->parity_err_cnt;
6542 tmp_stats[i++] = swstats->serious_err_cnt;
6543 tmp_stats[i++] = swstats->soft_reset_cnt;
6544 tmp_stats[i++] = swstats->fifo_full_cnt;
8116f3cf 6545 for (k = 0; k < MAX_RX_RINGS; k++)
ffb5df6c
JP
6546 tmp_stats[i++] = swstats->ring_full_cnt[k];
6547 tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6548 tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6549 tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6550 tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6551 tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6552 tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6553 tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6554 tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6555 tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6556 tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6557 tmp_stats[i++] = xstats->warn_laser_output_power_high;
6558 tmp_stats[i++] = xstats->warn_laser_output_power_low;
6559 tmp_stats[i++] = swstats->clubbed_frms_cnt;
6560 tmp_stats[i++] = swstats->sending_both;
6561 tmp_stats[i++] = swstats->outof_sequence_pkts;
6562 tmp_stats[i++] = swstats->flush_max_pkts;
6563 if (swstats->num_aggregations) {
6564 u64 tmp = swstats->sum_avg_pkts_aggregated;
bd1034f0 6565 int count = 0;
6aa20a22 6566 /*
bd1034f0
AR
6567 * Since 64-bit divide does not work on all platforms,
6568 * do repeated subtraction.
6569 */
ffb5df6c
JP
6570 while (tmp >= swstats->num_aggregations) {
6571 tmp -= swstats->num_aggregations;
bd1034f0
AR
6572 count++;
6573 }
6574 tmp_stats[i++] = count;
d44570e4 6575 } else
bd1034f0 6576 tmp_stats[i++] = 0;
ffb5df6c
JP
6577 tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6578 tmp_stats[i++] = swstats->pci_map_fail_cnt;
6579 tmp_stats[i++] = swstats->watchdog_timer_cnt;
6580 tmp_stats[i++] = swstats->mem_allocated;
6581 tmp_stats[i++] = swstats->mem_freed;
6582 tmp_stats[i++] = swstats->link_up_cnt;
6583 tmp_stats[i++] = swstats->link_down_cnt;
6584 tmp_stats[i++] = swstats->link_up_time;
6585 tmp_stats[i++] = swstats->link_down_time;
6586
6587 tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6588 tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6589 tmp_stats[i++] = swstats->tx_parity_err_cnt;
6590 tmp_stats[i++] = swstats->tx_link_loss_cnt;
6591 tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6592
6593 tmp_stats[i++] = swstats->rx_parity_err_cnt;
6594 tmp_stats[i++] = swstats->rx_abort_cnt;
6595 tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6596 tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6597 tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6598 tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6599 tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6600 tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6601 tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6602 tmp_stats[i++] = swstats->tda_err_cnt;
6603 tmp_stats[i++] = swstats->pfc_err_cnt;
6604 tmp_stats[i++] = swstats->pcc_err_cnt;
6605 tmp_stats[i++] = swstats->tti_err_cnt;
6606 tmp_stats[i++] = swstats->tpa_err_cnt;
6607 tmp_stats[i++] = swstats->sm_err_cnt;
6608 tmp_stats[i++] = swstats->lso_err_cnt;
6609 tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6610 tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6611 tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6612 tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6613 tmp_stats[i++] = swstats->rc_err_cnt;
6614 tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6615 tmp_stats[i++] = swstats->rpa_err_cnt;
6616 tmp_stats[i++] = swstats->rda_err_cnt;
6617 tmp_stats[i++] = swstats->rti_err_cnt;
6618 tmp_stats[i++] = swstats->mc_err_cnt;
1da177e4
LT
6619}
6620
ac1f60db 6621static int s2io_ethtool_get_regs_len(struct net_device *dev)
1da177e4 6622{
d44570e4 6623 return XENA_REG_SPACE;
1da177e4
LT
6624}
6625
6626
d44570e4 6627static u32 s2io_ethtool_get_rx_csum(struct net_device *dev)
1da177e4 6628{
4cf1653a 6629 struct s2io_nic *sp = netdev_priv(dev);
1da177e4 6630
d44570e4 6631 return sp->rx_csum;
1da177e4 6632}
ac1f60db
AB
6633
6634static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
1da177e4 6635{
4cf1653a 6636 struct s2io_nic *sp = netdev_priv(dev);
1da177e4
LT
6637
6638 if (data)
6639 sp->rx_csum = 1;
6640 else
6641 sp->rx_csum = 0;
6642
6643 return 0;
6644}
ac1f60db
AB
6645
6646static int s2io_get_eeprom_len(struct net_device *dev)
1da177e4 6647{
d44570e4 6648 return XENA_EEPROM_SPACE;
1da177e4
LT
6649}
6650
b9f2c044 6651static int s2io_get_sset_count(struct net_device *dev, int sset)
1da177e4 6652{
4cf1653a 6653 struct s2io_nic *sp = netdev_priv(dev);
b9f2c044
JG
6654
6655 switch (sset) {
6656 case ETH_SS_TEST:
6657 return S2IO_TEST_LEN;
6658 case ETH_SS_STATS:
d44570e4 6659 switch (sp->device_type) {
b9f2c044
JG
6660 case XFRAME_I_DEVICE:
6661 return XFRAME_I_STAT_LEN;
6662 case XFRAME_II_DEVICE:
6663 return XFRAME_II_STAT_LEN;
6664 default:
6665 return 0;
6666 }
6667 default:
6668 return -EOPNOTSUPP;
6669 }
1da177e4 6670}
ac1f60db
AB
6671
6672static void s2io_ethtool_get_strings(struct net_device *dev,
d44570e4 6673 u32 stringset, u8 *data)
1da177e4 6674{
fa1f0cb3 6675 int stat_size = 0;
4cf1653a 6676 struct s2io_nic *sp = netdev_priv(dev);
fa1f0cb3 6677
1da177e4
LT
6678 switch (stringset) {
6679 case ETH_SS_TEST:
6680 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6681 break;
6682 case ETH_SS_STATS:
fa1f0cb3 6683 stat_size = sizeof(ethtool_xena_stats_keys);
d44570e4
JP
6684 memcpy(data, &ethtool_xena_stats_keys, stat_size);
6685 if (sp->device_type == XFRAME_II_DEVICE) {
fa1f0cb3 6686 memcpy(data + stat_size,
d44570e4
JP
6687 &ethtool_enhanced_stats_keys,
6688 sizeof(ethtool_enhanced_stats_keys));
fa1f0cb3
SS
6689 stat_size += sizeof(ethtool_enhanced_stats_keys);
6690 }
6691
6692 memcpy(data + stat_size, &ethtool_driver_stats_keys,
d44570e4 6693 sizeof(ethtool_driver_stats_keys));
1da177e4
LT
6694 }
6695}
1da177e4 6696
ac1f60db 6697static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
1da177e4
LT
6698{
6699 if (data)
6700 dev->features |= NETIF_F_IP_CSUM;
6701 else
6702 dev->features &= ~NETIF_F_IP_CSUM;
6703
6704 return 0;
6705}
6706
75c30b13
AR
6707static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6708{
6709 return (dev->features & NETIF_F_TSO) != 0;
6710}
958de193 6711
75c30b13
AR
6712static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6713{
6714 if (data)
6715 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6716 else
6717 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6718
6719 return 0;
6720}
1da177e4 6721
958de193
JM
6722static int s2io_ethtool_set_flags(struct net_device *dev, u32 data)
6723{
6724 struct s2io_nic *sp = netdev_priv(dev);
6725 int rc = 0;
6726 int changed = 0;
6727
6728 if (data & ~ETH_FLAG_LRO)
97d1935a 6729 return -EINVAL;
958de193
JM
6730
6731 if (data & ETH_FLAG_LRO) {
f0c54ace
AW
6732 if (!(dev->features & NETIF_F_LRO)) {
6733 dev->features |= NETIF_F_LRO;
6734 changed = 1;
6735 }
958de193
JM
6736 } else if (dev->features & NETIF_F_LRO) {
6737 dev->features &= ~NETIF_F_LRO;
6738 changed = 1;
6739 }
6740
6741 if (changed && netif_running(dev)) {
6742 s2io_stop_all_tx_queue(sp);
6743 s2io_card_down(sp);
958de193
JM
6744 rc = s2io_card_up(sp);
6745 if (rc)
6746 s2io_reset(sp);
6747 else
6748 s2io_start_all_tx_queue(sp);
6749 }
6750
6751 return rc;
6752}
6753
7282d491 6754static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
6755 .get_settings = s2io_ethtool_gset,
6756 .set_settings = s2io_ethtool_sset,
6757 .get_drvinfo = s2io_ethtool_gdrvinfo,
6758 .get_regs_len = s2io_ethtool_get_regs_len,
6759 .get_regs = s2io_ethtool_gregs,
6760 .get_link = ethtool_op_get_link,
6761 .get_eeprom_len = s2io_get_eeprom_len,
6762 .get_eeprom = s2io_ethtool_geeprom,
6763 .set_eeprom = s2io_ethtool_seeprom,
0cec35eb 6764 .get_ringparam = s2io_ethtool_gringparam,
1da177e4
LT
6765 .get_pauseparam = s2io_ethtool_getpause_data,
6766 .set_pauseparam = s2io_ethtool_setpause_data,
6767 .get_rx_csum = s2io_ethtool_get_rx_csum,
6768 .set_rx_csum = s2io_ethtool_set_rx_csum,
1da177e4 6769 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
958de193
JM
6770 .set_flags = s2io_ethtool_set_flags,
6771 .get_flags = ethtool_op_get_flags,
1da177e4 6772 .set_sg = ethtool_op_set_sg,
75c30b13
AR
6773 .get_tso = s2io_ethtool_op_get_tso,
6774 .set_tso = s2io_ethtool_op_set_tso,
fed5eccd 6775 .set_ufo = ethtool_op_set_ufo,
1da177e4
LT
6776 .self_test = s2io_ethtool_test,
6777 .get_strings = s2io_ethtool_get_strings,
6778 .phys_id = s2io_ethtool_idnic,
b9f2c044
JG
6779 .get_ethtool_stats = s2io_get_ethtool_stats,
6780 .get_sset_count = s2io_get_sset_count,
1da177e4
LT
6781};
6782
6783/**
20346722 6784 * s2io_ioctl - Entry point for the Ioctl
1da177e4
LT
6785 * @dev : Device pointer.
6786 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6787 * a proprietary structure used to pass information to the driver.
6788 * @cmd : This is used to distinguish between the different commands that
6789 * can be passed to the IOCTL functions.
6790 * Description:
20346722
K
6791 * Currently there are no special functionality supported in IOCTL, hence
6792 * function always return EOPNOTSUPPORTED
1da177e4
LT
6793 */
6794
ac1f60db 6795static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1da177e4
LT
6796{
6797 return -EOPNOTSUPP;
6798}
6799
6800/**
6801 * s2io_change_mtu - entry point to change MTU size for the device.
6802 * @dev : device pointer.
6803 * @new_mtu : the new MTU size for the device.
6804 * Description: A driver entry point to change MTU size for the device.
6805 * Before changing the MTU the device must be stopped.
6806 * Return value:
6807 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6808 * file on failure.
6809 */
6810
ac1f60db 6811static int s2io_change_mtu(struct net_device *dev, int new_mtu)
1da177e4 6812{
4cf1653a 6813 struct s2io_nic *sp = netdev_priv(dev);
9f74ffde 6814 int ret = 0;
1da177e4
LT
6815
6816 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
d44570e4 6817 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", dev->name);
1da177e4
LT
6818 return -EPERM;
6819 }
6820
1da177e4 6821 dev->mtu = new_mtu;
d8892c6e 6822 if (netif_running(dev)) {
3a3d5756 6823 s2io_stop_all_tx_queue(sp);
e6a8fee2 6824 s2io_card_down(sp);
9f74ffde
SH
6825 ret = s2io_card_up(sp);
6826 if (ret) {
d8892c6e 6827 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
b39d66a8 6828 __func__);
9f74ffde 6829 return ret;
d8892c6e 6830 }
3a3d5756 6831 s2io_wake_all_tx_queue(sp);
d8892c6e 6832 } else { /* Device is down */
1ee6dd77 6833 struct XENA_dev_config __iomem *bar0 = sp->bar0;
d8892c6e
K
6834 u64 val64 = new_mtu;
6835
6836 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6837 }
1da177e4 6838
9f74ffde 6839 return ret;
1da177e4
LT
6840}
6841
1da177e4
LT
6842/**
6843 * s2io_set_link - Set the LInk status
6844 * @data: long pointer to device private structue
6845 * Description: Sets the link status for the adapter
6846 */
6847
c4028958 6848static void s2io_set_link(struct work_struct *work)
1da177e4 6849{
d44570e4
JP
6850 struct s2io_nic *nic = container_of(work, struct s2io_nic,
6851 set_link_task);
1da177e4 6852 struct net_device *dev = nic->dev;
1ee6dd77 6853 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
6854 register u64 val64;
6855 u16 subid;
6856
22747d6b
FR
6857 rtnl_lock();
6858
6859 if (!netif_running(dev))
6860 goto out_unlock;
6861
92b84437 6862 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
1da177e4 6863 /* The card is being reset, no point doing anything */
22747d6b 6864 goto out_unlock;
1da177e4
LT
6865 }
6866
6867 subid = nic->pdev->subsystem_device;
a371a07d
K
6868 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6869 /*
6870 * Allow a small delay for the NICs self initiated
6871 * cleanup to complete.
6872 */
6873 msleep(100);
6874 }
1da177e4
LT
6875
6876 val64 = readq(&bar0->adapter_status);
19a60522
SS
6877 if (LINK_IS_UP(val64)) {
6878 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6879 if (verify_xena_quiescence(nic)) {
6880 val64 = readq(&bar0->adapter_control);
6881 val64 |= ADAPTER_CNTL_EN;
1da177e4 6882 writeq(val64, &bar0->adapter_control);
19a60522 6883 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
d44570e4 6884 nic->device_type, subid)) {
19a60522
SS
6885 val64 = readq(&bar0->gpio_control);
6886 val64 |= GPIO_CTRL_GPIO_0;
6887 writeq(val64, &bar0->gpio_control);
6888 val64 = readq(&bar0->gpio_control);
6889 } else {
6890 val64 |= ADAPTER_LED_ON;
6891 writeq(val64, &bar0->adapter_control);
a371a07d 6892 }
f957bcf0 6893 nic->device_enabled_once = true;
19a60522 6894 } else {
9e39f7c5
JP
6895 DBG_PRINT(ERR_DBG,
6896 "%s: Error: device is not Quiescent\n",
6897 dev->name);
3a3d5756 6898 s2io_stop_all_tx_queue(nic);
1da177e4 6899 }
19a60522 6900 }
92c48799
SS
6901 val64 = readq(&bar0->adapter_control);
6902 val64 |= ADAPTER_LED_ON;
6903 writeq(val64, &bar0->adapter_control);
6904 s2io_link(nic, LINK_UP);
19a60522
SS
6905 } else {
6906 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6907 subid)) {
6908 val64 = readq(&bar0->gpio_control);
6909 val64 &= ~GPIO_CTRL_GPIO_0;
6910 writeq(val64, &bar0->gpio_control);
6911 val64 = readq(&bar0->gpio_control);
1da177e4 6912 }
92c48799
SS
6913 /* turn off LED */
6914 val64 = readq(&bar0->adapter_control);
d44570e4 6915 val64 = val64 & (~ADAPTER_LED_ON);
92c48799 6916 writeq(val64, &bar0->adapter_control);
19a60522 6917 s2io_link(nic, LINK_DOWN);
1da177e4 6918 }
92b84437 6919 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
22747d6b
FR
6920
6921out_unlock:
d8d70caf 6922 rtnl_unlock();
1da177e4
LT
6923}
6924
1ee6dd77 6925static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
d44570e4
JP
6926 struct buffAdd *ba,
6927 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6928 u64 *temp2, int size)
5d3213cc
AR
6929{
6930 struct net_device *dev = sp->dev;
491abf25 6931 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
5d3213cc
AR
6932
6933 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6d517a27 6934 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
5d3213cc
AR
6935 /* allocate skb */
6936 if (*skb) {
6937 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6938 /*
6939 * As Rx frame are not going to be processed,
6940 * using same mapped address for the Rxd
6941 * buffer pointer
6942 */
6d517a27 6943 rxdp1->Buffer0_ptr = *temp0;
5d3213cc
AR
6944 } else {
6945 *skb = dev_alloc_skb(size);
6946 if (!(*skb)) {
9e39f7c5
JP
6947 DBG_PRINT(INFO_DBG,
6948 "%s: Out of memory to allocate %s\n",
6949 dev->name, "1 buf mode SKBs");
ffb5df6c 6950 stats->mem_alloc_fail_cnt++;
5d3213cc
AR
6951 return -ENOMEM ;
6952 }
ffb5df6c 6953 stats->mem_allocated += (*skb)->truesize;
5d3213cc
AR
6954 /* storing the mapped addr in a temp variable
6955 * such it will be used for next rxd whose
6956 * Host Control is NULL
6957 */
6d517a27 6958 rxdp1->Buffer0_ptr = *temp0 =
d44570e4
JP
6959 pci_map_single(sp->pdev, (*skb)->data,
6960 size - NET_IP_ALIGN,
6961 PCI_DMA_FROMDEVICE);
8d8bb39b 6962 if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
491abf25 6963 goto memalloc_failed;
5d3213cc
AR
6964 rxdp->Host_Control = (unsigned long) (*skb);
6965 }
6966 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6d517a27 6967 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
5d3213cc
AR
6968 /* Two buffer Mode */
6969 if (*skb) {
6d517a27
VP
6970 rxdp3->Buffer2_ptr = *temp2;
6971 rxdp3->Buffer0_ptr = *temp0;
6972 rxdp3->Buffer1_ptr = *temp1;
5d3213cc
AR
6973 } else {
6974 *skb = dev_alloc_skb(size);
2ceaac75 6975 if (!(*skb)) {
9e39f7c5
JP
6976 DBG_PRINT(INFO_DBG,
6977 "%s: Out of memory to allocate %s\n",
6978 dev->name,
6979 "2 buf mode SKBs");
ffb5df6c 6980 stats->mem_alloc_fail_cnt++;
2ceaac75
DR
6981 return -ENOMEM;
6982 }
ffb5df6c 6983 stats->mem_allocated += (*skb)->truesize;
6d517a27 6984 rxdp3->Buffer2_ptr = *temp2 =
5d3213cc
AR
6985 pci_map_single(sp->pdev, (*skb)->data,
6986 dev->mtu + 4,
6987 PCI_DMA_FROMDEVICE);
8d8bb39b 6988 if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
491abf25 6989 goto memalloc_failed;
6d517a27 6990 rxdp3->Buffer0_ptr = *temp0 =
d44570e4
JP
6991 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6992 PCI_DMA_FROMDEVICE);
8d8bb39b 6993 if (pci_dma_mapping_error(sp->pdev,
d44570e4
JP
6994 rxdp3->Buffer0_ptr)) {
6995 pci_unmap_single(sp->pdev,
6996 (dma_addr_t)rxdp3->Buffer2_ptr,
6997 dev->mtu + 4,
6998 PCI_DMA_FROMDEVICE);
491abf25
VP
6999 goto memalloc_failed;
7000 }
5d3213cc
AR
7001 rxdp->Host_Control = (unsigned long) (*skb);
7002
7003 /* Buffer-1 will be dummy buffer not used */
6d517a27 7004 rxdp3->Buffer1_ptr = *temp1 =
5d3213cc 7005 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
d44570e4 7006 PCI_DMA_FROMDEVICE);
8d8bb39b 7007 if (pci_dma_mapping_error(sp->pdev,
d44570e4
JP
7008 rxdp3->Buffer1_ptr)) {
7009 pci_unmap_single(sp->pdev,
7010 (dma_addr_t)rxdp3->Buffer0_ptr,
7011 BUF0_LEN, PCI_DMA_FROMDEVICE);
7012 pci_unmap_single(sp->pdev,
7013 (dma_addr_t)rxdp3->Buffer2_ptr,
7014 dev->mtu + 4,
7015 PCI_DMA_FROMDEVICE);
491abf25
VP
7016 goto memalloc_failed;
7017 }
5d3213cc
AR
7018 }
7019 }
7020 return 0;
d44570e4
JP
7021
7022memalloc_failed:
7023 stats->pci_map_fail_cnt++;
7024 stats->mem_freed += (*skb)->truesize;
7025 dev_kfree_skb(*skb);
7026 return -ENOMEM;
5d3213cc 7027}
491abf25 7028
1ee6dd77
RB
7029static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
7030 int size)
5d3213cc
AR
7031{
7032 struct net_device *dev = sp->dev;
7033 if (sp->rxd_mode == RXD_MODE_1) {
d44570e4 7034 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
5d3213cc
AR
7035 } else if (sp->rxd_mode == RXD_MODE_3B) {
7036 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
7037 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
d44570e4 7038 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
5d3213cc
AR
7039 }
7040}
7041
1ee6dd77 7042static int rxd_owner_bit_reset(struct s2io_nic *sp)
5d3213cc
AR
7043{
7044 int i, j, k, blk_cnt = 0, size;
5d3213cc 7045 struct config_param *config = &sp->config;
ffb5df6c 7046 struct mac_info *mac_control = &sp->mac_control;
5d3213cc 7047 struct net_device *dev = sp->dev;
1ee6dd77 7048 struct RxD_t *rxdp = NULL;
5d3213cc 7049 struct sk_buff *skb = NULL;
1ee6dd77 7050 struct buffAdd *ba = NULL;
5d3213cc
AR
7051 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
7052
7053 /* Calculate the size based on ring mode */
7054 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
7055 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
7056 if (sp->rxd_mode == RXD_MODE_1)
7057 size += NET_IP_ALIGN;
7058 else if (sp->rxd_mode == RXD_MODE_3B)
7059 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
5d3213cc
AR
7060
7061 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
7062 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7063 struct ring_info *ring = &mac_control->rings[i];
7064
d44570e4 7065 blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
5d3213cc
AR
7066
7067 for (j = 0; j < blk_cnt; j++) {
7068 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
d44570e4
JP
7069 rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
7070 if (sp->rxd_mode == RXD_MODE_3B)
13d866a9 7071 ba = &ring->ba[j][k];
d44570e4
JP
7072 if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
7073 (u64 *)&temp0_64,
7074 (u64 *)&temp1_64,
7075 (u64 *)&temp2_64,
7076 size) == -ENOMEM) {
ac1f90d6
SS
7077 return 0;
7078 }
5d3213cc
AR
7079
7080 set_rxd_buffer_size(sp, rxdp, size);
7081 wmb();
7082 /* flip the Ownership bit to Hardware */
7083 rxdp->Control_1 |= RXD_OWN_XENA;
7084 }
7085 }
7086 }
7087 return 0;
7088
7089}
7090
d44570e4 7091static int s2io_add_isr(struct s2io_nic *sp)
1da177e4 7092{
e6a8fee2 7093 int ret = 0;
c92ca04b 7094 struct net_device *dev = sp->dev;
e6a8fee2 7095 int err = 0;
1da177e4 7096
eaae7f72 7097 if (sp->config.intr_type == MSI_X)
e6a8fee2
AR
7098 ret = s2io_enable_msi_x(sp);
7099 if (ret) {
7100 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
eaae7f72 7101 sp->config.intr_type = INTA;
20346722 7102 }
1da177e4 7103
d44570e4
JP
7104 /*
7105 * Store the values of the MSIX table in
7106 * the struct s2io_nic structure
7107 */
e6a8fee2 7108 store_xmsi_data(sp);
c92ca04b 7109
e6a8fee2 7110 /* After proper initialization of H/W, register ISR */
eaae7f72 7111 if (sp->config.intr_type == MSI_X) {
ac731ab6
SH
7112 int i, msix_rx_cnt = 0;
7113
f61e0a35
SH
7114 for (i = 0; i < sp->num_entries; i++) {
7115 if (sp->s2io_entries[i].in_use == MSIX_FLG) {
7116 if (sp->s2io_entries[i].type ==
d44570e4 7117 MSIX_RING_TYPE) {
ac731ab6
SH
7118 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7119 dev->name, i);
7120 err = request_irq(sp->entries[i].vector,
d44570e4
JP
7121 s2io_msix_ring_handle,
7122 0,
7123 sp->desc[i],
7124 sp->s2io_entries[i].arg);
ac731ab6 7125 } else if (sp->s2io_entries[i].type ==
d44570e4 7126 MSIX_ALARM_TYPE) {
ac731ab6 7127 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
d44570e4 7128 dev->name, i);
ac731ab6 7129 err = request_irq(sp->entries[i].vector,
d44570e4
JP
7130 s2io_msix_fifo_handle,
7131 0,
7132 sp->desc[i],
7133 sp->s2io_entries[i].arg);
ac731ab6 7134
fb6a825b 7135 }
ac731ab6
SH
7136 /* if either data or addr is zero print it. */
7137 if (!(sp->msix_info[i].addr &&
d44570e4 7138 sp->msix_info[i].data)) {
ac731ab6 7139 DBG_PRINT(ERR_DBG,
d44570e4
JP
7140 "%s @Addr:0x%llx Data:0x%llx\n",
7141 sp->desc[i],
7142 (unsigned long long)
7143 sp->msix_info[i].addr,
7144 (unsigned long long)
7145 ntohl(sp->msix_info[i].data));
ac731ab6 7146 } else
fb6a825b 7147 msix_rx_cnt++;
ac731ab6
SH
7148 if (err) {
7149 remove_msix_isr(sp);
7150
7151 DBG_PRINT(ERR_DBG,
d44570e4
JP
7152 "%s:MSI-X-%d registration "
7153 "failed\n", dev->name, i);
ac731ab6
SH
7154
7155 DBG_PRINT(ERR_DBG,
d44570e4
JP
7156 "%s: Defaulting to INTA\n",
7157 dev->name);
ac731ab6
SH
7158 sp->config.intr_type = INTA;
7159 break;
fb6a825b 7160 }
ac731ab6
SH
7161 sp->s2io_entries[i].in_use =
7162 MSIX_REGISTERED_SUCCESS;
c92ca04b 7163 }
e6a8fee2 7164 }
18b2b7bd 7165 if (!err) {
6cef2b8e 7166 pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
9e39f7c5
JP
7167 DBG_PRINT(INFO_DBG,
7168 "MSI-X-TX entries enabled through alarm vector\n");
18b2b7bd 7169 }
e6a8fee2 7170 }
eaae7f72 7171 if (sp->config.intr_type == INTA) {
d44570e4
JP
7172 err = request_irq((int)sp->pdev->irq, s2io_isr, IRQF_SHARED,
7173 sp->name, dev);
e6a8fee2
AR
7174 if (err) {
7175 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7176 dev->name);
7177 return -1;
7178 }
7179 }
7180 return 0;
7181}
d44570e4
JP
7182
7183static void s2io_rem_isr(struct s2io_nic *sp)
e6a8fee2 7184{
18b2b7bd
SH
7185 if (sp->config.intr_type == MSI_X)
7186 remove_msix_isr(sp);
7187 else
7188 remove_inta_isr(sp);
e6a8fee2
AR
7189}
7190
d44570e4 7191static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
e6a8fee2
AR
7192{
7193 int cnt = 0;
1ee6dd77 7194 struct XENA_dev_config __iomem *bar0 = sp->bar0;
e6a8fee2 7195 register u64 val64 = 0;
5f490c96
SH
7196 struct config_param *config;
7197 config = &sp->config;
e6a8fee2 7198
9f74ffde
SH
7199 if (!is_s2io_card_up(sp))
7200 return;
7201
e6a8fee2
AR
7202 del_timer_sync(&sp->alarm_timer);
7203 /* If s2io_set_link task is executing, wait till it completes. */
d44570e4 7204 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
e6a8fee2 7205 msleep(50);
92b84437 7206 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
e6a8fee2 7207
5f490c96 7208 /* Disable napi */
f61e0a35
SH
7209 if (sp->config.napi) {
7210 int off = 0;
7211 if (config->intr_type == MSI_X) {
7212 for (; off < sp->config.rx_ring_num; off++)
7213 napi_disable(&sp->mac_control.rings[off].napi);
d44570e4 7214 }
f61e0a35
SH
7215 else
7216 napi_disable(&sp->napi);
7217 }
5f490c96 7218
e6a8fee2 7219 /* disable Tx and Rx traffic on the NIC */
d796fdb7
LV
7220 if (do_io)
7221 stop_nic(sp);
e6a8fee2
AR
7222
7223 s2io_rem_isr(sp);
1da177e4 7224
01e16faa
SH
7225 /* stop the tx queue, indicate link down */
7226 s2io_link(sp, LINK_DOWN);
7227
1da177e4 7228 /* Check if the device is Quiescent and then Reset the NIC */
d44570e4 7229 while (do_io) {
5d3213cc
AR
7230 /* As per the HW requirement we need to replenish the
7231 * receive buffer to avoid the ring bump. Since there is
7232 * no intention of processing the Rx frame at this pointwe are
7233 * just settting the ownership bit of rxd in Each Rx
7234 * ring to HW and set the appropriate buffer size
7235 * based on the ring mode
7236 */
7237 rxd_owner_bit_reset(sp);
7238
1da177e4 7239 val64 = readq(&bar0->adapter_status);
19a60522 7240 if (verify_xena_quiescence(sp)) {
d44570e4
JP
7241 if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7242 break;
1da177e4
LT
7243 }
7244
7245 msleep(50);
7246 cnt++;
7247 if (cnt == 10) {
9e39f7c5
JP
7248 DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7249 "adapter status reads 0x%llx\n",
d44570e4 7250 (unsigned long long)val64);
1da177e4
LT
7251 break;
7252 }
d796fdb7
LV
7253 }
7254 if (do_io)
7255 s2io_reset(sp);
1da177e4 7256
7ba013ac 7257 /* Free all Tx buffers */
1da177e4 7258 free_tx_buffers(sp);
7ba013ac
K
7259
7260 /* Free all Rx buffers */
1da177e4
LT
7261 free_rx_buffers(sp);
7262
92b84437 7263 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
1da177e4
LT
7264}
7265
d44570e4 7266static void s2io_card_down(struct s2io_nic *sp)
d796fdb7
LV
7267{
7268 do_s2io_card_down(sp, 1);
7269}
7270
d44570e4 7271static int s2io_card_up(struct s2io_nic *sp)
1da177e4 7272{
cc6e7c44 7273 int i, ret = 0;
1da177e4 7274 struct config_param *config;
ffb5df6c 7275 struct mac_info *mac_control;
d44570e4 7276 struct net_device *dev = (struct net_device *)sp->dev;
e6a8fee2 7277 u16 interruptible;
1da177e4
LT
7278
7279 /* Initialize the H/W I/O registers */
9f74ffde
SH
7280 ret = init_nic(sp);
7281 if (ret != 0) {
1da177e4
LT
7282 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7283 dev->name);
9f74ffde
SH
7284 if (ret != -EIO)
7285 s2io_reset(sp);
7286 return ret;
1da177e4
LT
7287 }
7288
20346722
K
7289 /*
7290 * Initializing the Rx buffers. For now we are considering only 1
1da177e4
LT
7291 * Rx ring and initializing buffers into 30 Rx blocks
7292 */
1da177e4 7293 config = &sp->config;
ffb5df6c 7294 mac_control = &sp->mac_control;
1da177e4
LT
7295
7296 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
7297 struct ring_info *ring = &mac_control->rings[i];
7298
7299 ring->mtu = dev->mtu;
f0c54ace 7300 ring->lro = !!(dev->features & NETIF_F_LRO);
13d866a9 7301 ret = fill_rx_buffers(sp, ring, 1);
0425b46a 7302 if (ret) {
1da177e4
LT
7303 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7304 dev->name);
7305 s2io_reset(sp);
7306 free_rx_buffers(sp);
7307 return -ENOMEM;
7308 }
7309 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
13d866a9 7310 ring->rx_bufs_left);
1da177e4 7311 }
5f490c96
SH
7312
7313 /* Initialise napi */
f61e0a35 7314 if (config->napi) {
f61e0a35
SH
7315 if (config->intr_type == MSI_X) {
7316 for (i = 0; i < sp->config.rx_ring_num; i++)
7317 napi_enable(&sp->mac_control.rings[i].napi);
7318 } else {
7319 napi_enable(&sp->napi);
7320 }
7321 }
5f490c96 7322
19a60522
SS
7323 /* Maintain the state prior to the open */
7324 if (sp->promisc_flg)
7325 sp->promisc_flg = 0;
7326 if (sp->m_cast_flg) {
7327 sp->m_cast_flg = 0;
d44570e4 7328 sp->all_multi_pos = 0;
19a60522 7329 }
1da177e4
LT
7330
7331 /* Setting its receive mode */
7332 s2io_set_multicast(dev);
7333
f0c54ace 7334 if (dev->features & NETIF_F_LRO) {
b41477f3 7335 /* Initialize max aggregatable pkts per session based on MTU */
7d3d0439 7336 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
d44570e4 7337 /* Check if we can use (if specified) user provided value */
7d3d0439
RA
7338 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7339 sp->lro_max_aggr_per_sess = lro_max_pkts;
7340 }
7341
1da177e4
LT
7342 /* Enable Rx Traffic and interrupts on the NIC */
7343 if (start_nic(sp)) {
7344 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
1da177e4 7345 s2io_reset(sp);
e6a8fee2
AR
7346 free_rx_buffers(sp);
7347 return -ENODEV;
7348 }
7349
7350 /* Add interrupt service routine */
7351 if (s2io_add_isr(sp) != 0) {
eaae7f72 7352 if (sp->config.intr_type == MSI_X)
e6a8fee2
AR
7353 s2io_rem_isr(sp);
7354 s2io_reset(sp);
1da177e4
LT
7355 free_rx_buffers(sp);
7356 return -ENODEV;
7357 }
7358
25fff88e
K
7359 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7360
01e16faa
SH
7361 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7362
e6a8fee2 7363 /* Enable select interrupts */
9caab458 7364 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
01e16faa
SH
7365 if (sp->config.intr_type != INTA) {
7366 interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7367 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7368 } else {
e6a8fee2 7369 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
9caab458 7370 interruptible |= TX_PIC_INTR;
e6a8fee2
AR
7371 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7372 }
7373
1da177e4
LT
7374 return 0;
7375}
7376
20346722 7377/**
1da177e4
LT
7378 * s2io_restart_nic - Resets the NIC.
7379 * @data : long pointer to the device private structure
7380 * Description:
7381 * This function is scheduled to be run by the s2io_tx_watchdog
20346722 7382 * function after 0.5 secs to reset the NIC. The idea is to reduce
1da177e4
LT
7383 * the run time of the watch dog routine which is run holding a
7384 * spin lock.
7385 */
7386
c4028958 7387static void s2io_restart_nic(struct work_struct *work)
1da177e4 7388{
1ee6dd77 7389 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
c4028958 7390 struct net_device *dev = sp->dev;
1da177e4 7391
22747d6b
FR
7392 rtnl_lock();
7393
7394 if (!netif_running(dev))
7395 goto out_unlock;
7396
e6a8fee2 7397 s2io_card_down(sp);
1da177e4 7398 if (s2io_card_up(sp)) {
d44570e4 7399 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
1da177e4 7400 }
3a3d5756 7401 s2io_wake_all_tx_queue(sp);
d44570e4 7402 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
22747d6b
FR
7403out_unlock:
7404 rtnl_unlock();
1da177e4
LT
7405}
7406
20346722
K
7407/**
7408 * s2io_tx_watchdog - Watchdog for transmit side.
1da177e4
LT
7409 * @dev : Pointer to net device structure
7410 * Description:
7411 * This function is triggered if the Tx Queue is stopped
7412 * for a pre-defined amount of time when the Interface is still up.
7413 * If the Interface is jammed in such a situation, the hardware is
7414 * reset (by s2io_close) and restarted again (by s2io_open) to
7415 * overcome any problem that might have been caused in the hardware.
7416 * Return value:
7417 * void
7418 */
7419
7420static void s2io_tx_watchdog(struct net_device *dev)
7421{
4cf1653a 7422 struct s2io_nic *sp = netdev_priv(dev);
ffb5df6c 7423 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
1da177e4
LT
7424
7425 if (netif_carrier_ok(dev)) {
ffb5df6c 7426 swstats->watchdog_timer_cnt++;
1da177e4 7427 schedule_work(&sp->rst_timer_task);
ffb5df6c 7428 swstats->soft_reset_cnt++;
1da177e4
LT
7429 }
7430}
7431
7432/**
7433 * rx_osm_handler - To perform some OS related operations on SKB.
7434 * @sp: private member of the device structure,pointer to s2io_nic structure.
7435 * @skb : the socket buffer pointer.
7436 * @len : length of the packet
7437 * @cksum : FCS checksum of the frame.
7438 * @ring_no : the ring from which this RxD was extracted.
20346722 7439 * Description:
b41477f3 7440 * This function is called by the Rx interrupt serivce routine to perform
1da177e4
LT
7441 * some OS related operations on the SKB before passing it to the upper
7442 * layers. It mainly checks if the checksum is OK, if so adds it to the
7443 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7444 * to the upper layer. If the checksum is wrong, it increments the Rx
7445 * packet error count, frees the SKB and returns error.
7446 * Return value:
7447 * SUCCESS on success and -1 on failure.
7448 */
1ee6dd77 7449static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
1da177e4 7450{
1ee6dd77 7451 struct s2io_nic *sp = ring_data->nic;
d44570e4 7452 struct net_device *dev = (struct net_device *)ring_data->dev;
20346722 7453 struct sk_buff *skb = (struct sk_buff *)
d44570e4 7454 ((unsigned long)rxdp->Host_Control);
20346722 7455 int ring_no = ring_data->ring_no;
1da177e4 7456 u16 l3_csum, l4_csum;
863c11a9 7457 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
2e6a684b 7458 struct lro *uninitialized_var(lro);
f9046eb3 7459 u8 err_mask;
ffb5df6c 7460 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
da6971d8 7461
20346722 7462 skb->dev = dev;
c92ca04b 7463
863c11a9 7464 if (err) {
bd1034f0 7465 /* Check for parity error */
d44570e4 7466 if (err & 0x1)
ffb5df6c 7467 swstats->parity_err_cnt++;
d44570e4 7468
f9046eb3 7469 err_mask = err >> 48;
d44570e4
JP
7470 switch (err_mask) {
7471 case 1:
ffb5df6c 7472 swstats->rx_parity_err_cnt++;
491976b2
SH
7473 break;
7474
d44570e4 7475 case 2:
ffb5df6c 7476 swstats->rx_abort_cnt++;
491976b2
SH
7477 break;
7478
d44570e4 7479 case 3:
ffb5df6c 7480 swstats->rx_parity_abort_cnt++;
491976b2
SH
7481 break;
7482
d44570e4 7483 case 4:
ffb5df6c 7484 swstats->rx_rda_fail_cnt++;
491976b2
SH
7485 break;
7486
d44570e4 7487 case 5:
ffb5df6c 7488 swstats->rx_unkn_prot_cnt++;
491976b2
SH
7489 break;
7490
d44570e4 7491 case 6:
ffb5df6c 7492 swstats->rx_fcs_err_cnt++;
491976b2 7493 break;
bd1034f0 7494
d44570e4 7495 case 7:
ffb5df6c 7496 swstats->rx_buf_size_err_cnt++;
491976b2
SH
7497 break;
7498
d44570e4 7499 case 8:
ffb5df6c 7500 swstats->rx_rxd_corrupt_cnt++;
491976b2
SH
7501 break;
7502
d44570e4 7503 case 15:
ffb5df6c 7504 swstats->rx_unkn_err_cnt++;
491976b2
SH
7505 break;
7506 }
863c11a9 7507 /*
d44570e4
JP
7508 * Drop the packet if bad transfer code. Exception being
7509 * 0x5, which could be due to unsupported IPv6 extension header.
7510 * In this case, we let stack handle the packet.
7511 * Note that in this case, since checksum will be incorrect,
7512 * stack will validate the same.
7513 */
f9046eb3
OH
7514 if (err_mask != 0x5) {
7515 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
d44570e4 7516 dev->name, err_mask);
dc56e634 7517 dev->stats.rx_crc_errors++;
ffb5df6c 7518 swstats->mem_freed
491976b2 7519 += skb->truesize;
863c11a9 7520 dev_kfree_skb(skb);
0425b46a 7521 ring_data->rx_bufs_left -= 1;
863c11a9
AR
7522 rxdp->Host_Control = 0;
7523 return 0;
7524 }
20346722 7525 }
1da177e4 7526
20346722 7527 rxdp->Host_Control = 0;
da6971d8
AR
7528 if (sp->rxd_mode == RXD_MODE_1) {
7529 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
20346722 7530
da6971d8 7531 skb_put(skb, len);
6d517a27 7532 } else if (sp->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
7533 int get_block = ring_data->rx_curr_get_info.block_index;
7534 int get_off = ring_data->rx_curr_get_info.offset;
7535 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7536 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7537 unsigned char *buff = skb_push(skb, buf0_len);
7538
1ee6dd77 7539 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
da6971d8 7540 memcpy(buff, ba->ba_0, buf0_len);
6d517a27 7541 skb_put(skb, buf2_len);
da6971d8 7542 }
20346722 7543
d44570e4
JP
7544 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7545 ((!ring_data->lro) ||
7546 (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
20346722
K
7547 (sp->rx_csum)) {
7548 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
1da177e4
LT
7549 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7550 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
20346722 7551 /*
1da177e4
LT
7552 * NIC verifies if the Checksum of the received
7553 * frame is Ok or not and accordingly returns
7554 * a flag in the RxD.
7555 */
7556 skb->ip_summed = CHECKSUM_UNNECESSARY;
0425b46a 7557 if (ring_data->lro) {
7d3d0439
RA
7558 u32 tcp_len;
7559 u8 *tcp;
7560 int ret = 0;
7561
0425b46a 7562 ret = s2io_club_tcp_session(ring_data,
d44570e4
JP
7563 skb->data, &tcp,
7564 &tcp_len, &lro,
7565 rxdp, sp);
7d3d0439 7566 switch (ret) {
d44570e4
JP
7567 case 3: /* Begin anew */
7568 lro->parent = skb;
7569 goto aggregate;
7570 case 1: /* Aggregate */
7571 lro_append_pkt(sp, lro, skb, tcp_len);
7572 goto aggregate;
7573 case 4: /* Flush session */
7574 lro_append_pkt(sp, lro, skb, tcp_len);
7575 queue_rx_frame(lro->parent,
7576 lro->vlan_tag);
7577 clear_lro_session(lro);
ffb5df6c 7578 swstats->flush_max_pkts++;
d44570e4
JP
7579 goto aggregate;
7580 case 2: /* Flush both */
7581 lro->parent->data_len = lro->frags_len;
ffb5df6c 7582 swstats->sending_both++;
d44570e4
JP
7583 queue_rx_frame(lro->parent,
7584 lro->vlan_tag);
7585 clear_lro_session(lro);
7586 goto send_up;
7587 case 0: /* sessions exceeded */
7588 case -1: /* non-TCP or not L2 aggregatable */
7589 case 5: /*
7590 * First pkt in session not
7591 * L3/L4 aggregatable
7592 */
7593 break;
7594 default:
7595 DBG_PRINT(ERR_DBG,
7596 "%s: Samadhana!!\n",
7597 __func__);
7598 BUG();
7d3d0439
RA
7599 }
7600 }
1da177e4 7601 } else {
20346722
K
7602 /*
7603 * Packet with erroneous checksum, let the
1da177e4
LT
7604 * upper layers deal with it.
7605 */
bc8acf2c 7606 skb_checksum_none_assert(skb);
1da177e4 7607 }
cdb5bf02 7608 } else
bc8acf2c 7609 skb_checksum_none_assert(skb);
cdb5bf02 7610
ffb5df6c 7611 swstats->mem_freed += skb->truesize;
7d3d0439 7612send_up:
0c8dfc83 7613 skb_record_rx_queue(skb, ring_no);
cdb5bf02 7614 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7d3d0439 7615aggregate:
0425b46a 7616 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
1da177e4
LT
7617 return SUCCESS;
7618}
7619
7620/**
7621 * s2io_link - stops/starts the Tx queue.
7622 * @sp : private member of the device structure, which is a pointer to the
7623 * s2io_nic structure.
7624 * @link : inidicates whether link is UP/DOWN.
7625 * Description:
7626 * This function stops/starts the Tx queue depending on whether the link
20346722
K
7627 * status of the NIC is is down or up. This is called by the Alarm
7628 * interrupt handler whenever a link change interrupt comes up.
1da177e4
LT
7629 * Return value:
7630 * void.
7631 */
7632
d44570e4 7633static void s2io_link(struct s2io_nic *sp, int link)
1da177e4 7634{
d44570e4 7635 struct net_device *dev = (struct net_device *)sp->dev;
ffb5df6c 7636 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
1da177e4
LT
7637
7638 if (link != sp->last_link_state) {
b7c5678f 7639 init_tti(sp, link);
1da177e4
LT
7640 if (link == LINK_DOWN) {
7641 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
3a3d5756 7642 s2io_stop_all_tx_queue(sp);
1da177e4 7643 netif_carrier_off(dev);
ffb5df6c
JP
7644 if (swstats->link_up_cnt)
7645 swstats->link_up_time =
7646 jiffies - sp->start_time;
7647 swstats->link_down_cnt++;
1da177e4
LT
7648 } else {
7649 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
ffb5df6c
JP
7650 if (swstats->link_down_cnt)
7651 swstats->link_down_time =
d44570e4 7652 jiffies - sp->start_time;
ffb5df6c 7653 swstats->link_up_cnt++;
1da177e4 7654 netif_carrier_on(dev);
3a3d5756 7655 s2io_wake_all_tx_queue(sp);
1da177e4
LT
7656 }
7657 }
7658 sp->last_link_state = link;
491976b2 7659 sp->start_time = jiffies;
1da177e4
LT
7660}
7661
20346722
K
7662/**
7663 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7664 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
7665 * s2io_nic structure.
7666 * Description:
7667 * This function initializes a few of the PCI and PCI-X configuration registers
7668 * with recommended values.
7669 * Return value:
7670 * void
7671 */
7672
d44570e4 7673static void s2io_init_pci(struct s2io_nic *sp)
1da177e4 7674{
20346722 7675 u16 pci_cmd = 0, pcix_cmd = 0;
1da177e4
LT
7676
7677 /* Enable Data Parity Error Recovery in PCI-X command register. */
7678 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7679 &(pcix_cmd));
1da177e4 7680 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7681 (pcix_cmd | 1));
1da177e4 7682 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7683 &(pcix_cmd));
1da177e4
LT
7684
7685 /* Set the PErr Response bit in PCI command register. */
7686 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7687 pci_write_config_word(sp->pdev, PCI_COMMAND,
7688 (pci_cmd | PCI_COMMAND_PARITY));
7689 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
1da177e4
LT
7690}
7691
3a3d5756 7692static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
d44570e4 7693 u8 *dev_multiq)
9dc737a7 7694{
d44570e4 7695 if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
9e39f7c5 7696 DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
d44570e4 7697 "(%d) not supported\n", tx_fifo_num);
6cfc482b
SH
7698
7699 if (tx_fifo_num < 1)
7700 tx_fifo_num = 1;
7701 else
7702 tx_fifo_num = MAX_TX_FIFOS;
7703
9e39f7c5 7704 DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
9dc737a7 7705 }
2fda096d 7706
6cfc482b 7707 if (multiq)
3a3d5756 7708 *dev_multiq = multiq;
6cfc482b
SH
7709
7710 if (tx_steering_type && (1 == tx_fifo_num)) {
7711 if (tx_steering_type != TX_DEFAULT_STEERING)
7712 DBG_PRINT(ERR_DBG,
9e39f7c5 7713 "Tx steering is not supported with "
d44570e4 7714 "one fifo. Disabling Tx steering.\n");
6cfc482b
SH
7715 tx_steering_type = NO_STEERING;
7716 }
7717
7718 if ((tx_steering_type < NO_STEERING) ||
d44570e4
JP
7719 (tx_steering_type > TX_DEFAULT_STEERING)) {
7720 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
7721 "Requested transmit steering not supported\n");
7722 DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
6cfc482b 7723 tx_steering_type = NO_STEERING;
3a3d5756
SH
7724 }
7725
0425b46a 7726 if (rx_ring_num > MAX_RX_RINGS) {
d44570e4 7727 DBG_PRINT(ERR_DBG,
9e39f7c5
JP
7728 "Requested number of rx rings not supported\n");
7729 DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
d44570e4 7730 MAX_RX_RINGS);
0425b46a 7731 rx_ring_num = MAX_RX_RINGS;
9dc737a7 7732 }
0425b46a 7733
eccb8628 7734 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
9e39f7c5 7735 DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
9dc737a7
AR
7736 "Defaulting to INTA\n");
7737 *dev_intr_type = INTA;
7738 }
596c5c97 7739
9dc737a7 7740 if ((*dev_intr_type == MSI_X) &&
d44570e4
JP
7741 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7742 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
9e39f7c5 7743 DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
d44570e4 7744 "Defaulting to INTA\n");
9dc737a7
AR
7745 *dev_intr_type = INTA;
7746 }
fb6a825b 7747
6d517a27 7748 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
9e39f7c5
JP
7749 DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7750 DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
6d517a27 7751 rx_ring_mode = 1;
9dc737a7
AR
7752 }
7753 return SUCCESS;
7754}
7755
9fc93a41
SS
7756/**
7757 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7758 * or Traffic class respectively.
b7c5678f 7759 * @nic: device private variable
9fc93a41
SS
7760 * Description: The function configures the receive steering to
7761 * desired receive ring.
7762 * Return Value: SUCCESS on success and
7763 * '-1' on failure (endian settings incorrect).
7764 */
7765static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7766{
7767 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7768 register u64 val64 = 0;
7769
7770 if (ds_codepoint > 63)
7771 return FAILURE;
7772
7773 val64 = RTS_DS_MEM_DATA(ring);
7774 writeq(val64, &bar0->rts_ds_mem_data);
7775
7776 val64 = RTS_DS_MEM_CTRL_WE |
7777 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7778 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7779
7780 writeq(val64, &bar0->rts_ds_mem_ctrl);
7781
7782 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
d44570e4
JP
7783 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7784 S2IO_BIT_RESET);
9fc93a41
SS
7785}
7786
04025095
SH
7787static const struct net_device_ops s2io_netdev_ops = {
7788 .ndo_open = s2io_open,
7789 .ndo_stop = s2io_close,
7790 .ndo_get_stats = s2io_get_stats,
7791 .ndo_start_xmit = s2io_xmit,
7792 .ndo_validate_addr = eth_validate_addr,
7793 .ndo_set_multicast_list = s2io_set_multicast,
7794 .ndo_do_ioctl = s2io_ioctl,
7795 .ndo_set_mac_address = s2io_set_mac_addr,
7796 .ndo_change_mtu = s2io_change_mtu,
7797 .ndo_vlan_rx_register = s2io_vlan_rx_register,
7798 .ndo_vlan_rx_kill_vid = s2io_vlan_rx_kill_vid,
7799 .ndo_tx_timeout = s2io_tx_watchdog,
7800#ifdef CONFIG_NET_POLL_CONTROLLER
7801 .ndo_poll_controller = s2io_netpoll,
7802#endif
7803};
7804
1da177e4 7805/**
20346722 7806 * s2io_init_nic - Initialization of the adapter .
1da177e4
LT
7807 * @pdev : structure containing the PCI related information of the device.
7808 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7809 * Description:
7810 * The function initializes an adapter identified by the pci_dec structure.
20346722
K
7811 * All OS related initialization including memory and device structure and
7812 * initlaization of the device private variable is done. Also the swapper
7813 * control register is initialized to enable read and write into the I/O
1da177e4
LT
7814 * registers of the device.
7815 * Return value:
7816 * returns 0 on success and negative on failure.
7817 */
7818
7819static int __devinit
7820s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7821{
1ee6dd77 7822 struct s2io_nic *sp;
1da177e4 7823 struct net_device *dev;
1da177e4 7824 int i, j, ret;
f957bcf0 7825 int dma_flag = false;
1da177e4
LT
7826 u32 mac_up, mac_down;
7827 u64 val64 = 0, tmp64 = 0;
1ee6dd77 7828 struct XENA_dev_config __iomem *bar0 = NULL;
1da177e4 7829 u16 subid;
1da177e4 7830 struct config_param *config;
ffb5df6c 7831 struct mac_info *mac_control;
541ae68f 7832 int mode;
cc6e7c44 7833 u8 dev_intr_type = intr_type;
3a3d5756 7834 u8 dev_multiq = 0;
1da177e4 7835
3a3d5756
SH
7836 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7837 if (ret)
9dc737a7 7838 return ret;
1da177e4 7839
d44570e4
JP
7840 ret = pci_enable_device(pdev);
7841 if (ret) {
1da177e4 7842 DBG_PRINT(ERR_DBG,
9e39f7c5 7843 "%s: pci_enable_device failed\n", __func__);
1da177e4
LT
7844 return ret;
7845 }
7846
6a35528a 7847 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
9e39f7c5 7848 DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
f957bcf0 7849 dma_flag = true;
d44570e4 7850 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1da177e4 7851 DBG_PRINT(ERR_DBG,
d44570e4
JP
7852 "Unable to obtain 64bit DMA "
7853 "for consistent allocations\n");
1da177e4
LT
7854 pci_disable_device(pdev);
7855 return -ENOMEM;
7856 }
284901a9 7857 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
9e39f7c5 7858 DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
1da177e4
LT
7859 } else {
7860 pci_disable_device(pdev);
7861 return -ENOMEM;
7862 }
d44570e4
JP
7863 ret = pci_request_regions(pdev, s2io_driver_name);
7864 if (ret) {
9e39f7c5 7865 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
d44570e4 7866 __func__, ret);
eccb8628
VP
7867 pci_disable_device(pdev);
7868 return -ENODEV;
1da177e4 7869 }
3a3d5756 7870 if (dev_multiq)
6cfc482b 7871 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
3a3d5756 7872 else
b19fa1fa 7873 dev = alloc_etherdev(sizeof(struct s2io_nic));
1da177e4
LT
7874 if (dev == NULL) {
7875 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7876 pci_disable_device(pdev);
7877 pci_release_regions(pdev);
7878 return -ENODEV;
7879 }
7880
7881 pci_set_master(pdev);
7882 pci_set_drvdata(pdev, dev);
1da177e4
LT
7883 SET_NETDEV_DEV(dev, &pdev->dev);
7884
7885 /* Private member variable initialized to s2io NIC structure */
4cf1653a 7886 sp = netdev_priv(dev);
1da177e4
LT
7887 sp->dev = dev;
7888 sp->pdev = pdev;
1da177e4 7889 sp->high_dma_flag = dma_flag;
f957bcf0 7890 sp->device_enabled_once = false;
da6971d8
AR
7891 if (rx_ring_mode == 1)
7892 sp->rxd_mode = RXD_MODE_1;
7893 if (rx_ring_mode == 2)
7894 sp->rxd_mode = RXD_MODE_3B;
da6971d8 7895
eaae7f72 7896 sp->config.intr_type = dev_intr_type;
1da177e4 7897
541ae68f 7898 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
d44570e4 7899 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
541ae68f
K
7900 sp->device_type = XFRAME_II_DEVICE;
7901 else
7902 sp->device_type = XFRAME_I_DEVICE;
7903
6aa20a22 7904
1da177e4
LT
7905 /* Initialize some PCI/PCI-X fields of the NIC. */
7906 s2io_init_pci(sp);
7907
20346722 7908 /*
1da177e4 7909 * Setting the device configuration parameters.
20346722
K
7910 * Most of these parameters can be specified by the user during
7911 * module insertion as they are module loadable parameters. If
7912 * these parameters are not not specified during load time, they
1da177e4
LT
7913 * are initialized with default values.
7914 */
1da177e4 7915 config = &sp->config;
ffb5df6c 7916 mac_control = &sp->mac_control;
1da177e4 7917
596c5c97 7918 config->napi = napi;
6cfc482b 7919 config->tx_steering_type = tx_steering_type;
596c5c97 7920
1da177e4 7921 /* Tx side parameters. */
6cfc482b
SH
7922 if (config->tx_steering_type == TX_PRIORITY_STEERING)
7923 config->tx_fifo_num = MAX_TX_FIFOS;
7924 else
7925 config->tx_fifo_num = tx_fifo_num;
7926
7927 /* Initialize the fifos used for tx steering */
7928 if (config->tx_fifo_num < 5) {
d44570e4
JP
7929 if (config->tx_fifo_num == 1)
7930 sp->total_tcp_fifos = 1;
7931 else
7932 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7933 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7934 sp->total_udp_fifos = 1;
7935 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
6cfc482b
SH
7936 } else {
7937 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
d44570e4 7938 FIFO_OTHER_MAX_NUM);
6cfc482b
SH
7939 sp->udp_fifo_idx = sp->total_tcp_fifos;
7940 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7941 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7942 }
7943
3a3d5756 7944 config->multiq = dev_multiq;
6cfc482b 7945 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
7946 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7947
7948 tx_cfg->fifo_len = tx_fifo_len[i];
7949 tx_cfg->fifo_priority = i;
1da177e4
LT
7950 }
7951
20346722
K
7952 /* mapping the QoS priority to the configured fifos */
7953 for (i = 0; i < MAX_TX_FIFOS; i++)
3a3d5756 7954 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
20346722 7955
6cfc482b
SH
7956 /* map the hashing selector table to the configured fifos */
7957 for (i = 0; i < config->tx_fifo_num; i++)
7958 sp->fifo_selector[i] = fifo_selector[i];
7959
7960
1da177e4
LT
7961 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7962 for (i = 0; i < config->tx_fifo_num; i++) {
13d866a9
JP
7963 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7964
7965 tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7966 if (tx_cfg->fifo_len < 65) {
1da177e4
LT
7967 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7968 break;
7969 }
7970 }
fed5eccd
AR
7971 /* + 2 because one Txd for skb->data and one Txd for UFO */
7972 config->max_txds = MAX_SKB_FRAGS + 2;
1da177e4
LT
7973
7974 /* Rx side parameters. */
1da177e4 7975 config->rx_ring_num = rx_ring_num;
0425b46a 7976 for (i = 0; i < config->rx_ring_num; i++) {
13d866a9
JP
7977 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7978 struct ring_info *ring = &mac_control->rings[i];
7979
7980 rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7981 rx_cfg->ring_priority = i;
7982 ring->rx_bufs_left = 0;
7983 ring->rxd_mode = sp->rxd_mode;
7984 ring->rxd_count = rxd_count[sp->rxd_mode];
7985 ring->pdev = sp->pdev;
7986 ring->dev = sp->dev;
1da177e4
LT
7987 }
7988
7989 for (i = 0; i < rx_ring_num; i++) {
13d866a9
JP
7990 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7991
7992 rx_cfg->ring_org = RING_ORG_BUFF1;
7993 rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
1da177e4
LT
7994 }
7995
7996 /* Setting Mac Control parameters */
7997 mac_control->rmac_pause_time = rmac_pause_time;
7998 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7999 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
8000
8001
1da177e4
LT
8002 /* initialize the shared memory used by the NIC and the host */
8003 if (init_shared_mem(sp)) {
d44570e4 8004 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
1da177e4
LT
8005 ret = -ENOMEM;
8006 goto mem_alloc_failed;
8007 }
8008
275f165f 8009 sp->bar0 = pci_ioremap_bar(pdev, 0);
1da177e4 8010 if (!sp->bar0) {
19a60522 8011 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
1da177e4
LT
8012 dev->name);
8013 ret = -ENOMEM;
8014 goto bar0_remap_failed;
8015 }
8016
275f165f 8017 sp->bar1 = pci_ioremap_bar(pdev, 2);
1da177e4 8018 if (!sp->bar1) {
19a60522 8019 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
1da177e4
LT
8020 dev->name);
8021 ret = -ENOMEM;
8022 goto bar1_remap_failed;
8023 }
8024
8025 dev->irq = pdev->irq;
d44570e4 8026 dev->base_addr = (unsigned long)sp->bar0;
1da177e4
LT
8027
8028 /* Initializing the BAR1 address as the start of the FIFO pointer. */
8029 for (j = 0; j < MAX_TX_FIFOS; j++) {
d44570e4
JP
8030 mac_control->tx_FIFO_start[j] =
8031 (struct TxFIFO_element __iomem *)
8032 (sp->bar1 + (j * 0x00020000));
1da177e4
LT
8033 }
8034
8035 /* Driver entry points */
04025095 8036 dev->netdev_ops = &s2io_netdev_ops;
1da177e4 8037 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
be3a6b02 8038 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
f0c54ace 8039 dev->features |= NETIF_F_LRO;
1da177e4 8040 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
f957bcf0 8041 if (sp->high_dma_flag == true)
1da177e4 8042 dev->features |= NETIF_F_HIGHDMA;
1da177e4 8043 dev->features |= NETIF_F_TSO;
f83ef8c0 8044 dev->features |= NETIF_F_TSO6;
db874e65 8045 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
fed5eccd
AR
8046 dev->features |= NETIF_F_UFO;
8047 dev->features |= NETIF_F_HW_CSUM;
8048 }
1da177e4 8049 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
c4028958
DH
8050 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
8051 INIT_WORK(&sp->set_link_task, s2io_set_link);
1da177e4 8052
e960fc5c 8053 pci_save_state(sp->pdev);
1da177e4
LT
8054
8055 /* Setting swapper control on the NIC, for proper reset operation */
8056 if (s2io_set_swapper(sp)) {
9e39f7c5 8057 DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
1da177e4
LT
8058 dev->name);
8059 ret = -EAGAIN;
8060 goto set_swap_failed;
8061 }
8062
541ae68f
K
8063 /* Verify if the Herc works on the slot its placed into */
8064 if (sp->device_type & XFRAME_II_DEVICE) {
8065 mode = s2io_verify_pci_mode(sp);
8066 if (mode < 0) {
9e39f7c5
JP
8067 DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
8068 __func__);
541ae68f
K
8069 ret = -EBADSLT;
8070 goto set_swap_failed;
8071 }
8072 }
8073
f61e0a35
SH
8074 if (sp->config.intr_type == MSI_X) {
8075 sp->num_entries = config->rx_ring_num + 1;
8076 ret = s2io_enable_msi_x(sp);
8077
8078 if (!ret) {
8079 ret = s2io_test_msi(sp);
8080 /* rollback MSI-X, will re-enable during add_isr() */
8081 remove_msix_isr(sp);
8082 }
8083 if (ret) {
8084
8085 DBG_PRINT(ERR_DBG,
9e39f7c5 8086 "MSI-X requested but failed to enable\n");
f61e0a35
SH
8087 sp->config.intr_type = INTA;
8088 }
8089 }
8090
8091 if (config->intr_type == MSI_X) {
13d866a9
JP
8092 for (i = 0; i < config->rx_ring_num ; i++) {
8093 struct ring_info *ring = &mac_control->rings[i];
8094
8095 netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
8096 }
f61e0a35
SH
8097 } else {
8098 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
8099 }
8100
541ae68f
K
8101 /* Not needed for Herc */
8102 if (sp->device_type & XFRAME_I_DEVICE) {
8103 /*
8104 * Fix for all "FFs" MAC address problems observed on
8105 * Alpha platforms
8106 */
8107 fix_mac_address(sp);
8108 s2io_reset(sp);
8109 }
1da177e4
LT
8110
8111 /*
1da177e4
LT
8112 * MAC address initialization.
8113 * For now only one mac address will be read and used.
8114 */
8115 bar0 = sp->bar0;
8116 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
d44570e4 8117 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
1da177e4 8118 writeq(val64, &bar0->rmac_addr_cmd_mem);
c92ca04b 8119 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
d44570e4
JP
8120 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
8121 S2IO_BIT_RESET);
1da177e4 8122 tmp64 = readq(&bar0->rmac_addr_data0_mem);
d44570e4 8123 mac_down = (u32)tmp64;
1da177e4
LT
8124 mac_up = (u32) (tmp64 >> 32);
8125
1da177e4
LT
8126 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8127 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8128 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8129 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8130 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8131 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8132
1da177e4
LT
8133 /* Set the factory defined MAC address initially */
8134 dev->addr_len = ETH_ALEN;
8135 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
2fd37688 8136 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
1da177e4 8137
faa4f796
SH
8138 /* initialize number of multicast & unicast MAC entries variables */
8139 if (sp->device_type == XFRAME_I_DEVICE) {
8140 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8141 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8142 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8143 } else if (sp->device_type == XFRAME_II_DEVICE) {
8144 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8145 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8146 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8147 }
8148
8149 /* store mac addresses from CAM to s2io_nic structure */
8150 do_s2io_store_unicast_mc(sp);
8151
f61e0a35
SH
8152 /* Configure MSIX vector for number of rings configured plus one */
8153 if ((sp->device_type == XFRAME_II_DEVICE) &&
d44570e4 8154 (config->intr_type == MSI_X))
f61e0a35
SH
8155 sp->num_entries = config->rx_ring_num + 1;
8156
d44570e4 8157 /* Store the values of the MSIX table in the s2io_nic structure */
c77dd43e 8158 store_xmsi_data(sp);
b41477f3
AR
8159 /* reset Nic and bring it to known state */
8160 s2io_reset(sp);
8161
1da177e4 8162 /*
99993af6 8163 * Initialize link state flags
541ae68f 8164 * and the card state parameter
1da177e4 8165 */
92b84437 8166 sp->state = 0;
1da177e4 8167
1da177e4 8168 /* Initialize spinlocks */
13d866a9
JP
8169 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8170 struct fifo_info *fifo = &mac_control->fifos[i];
8171
8172 spin_lock_init(&fifo->tx_lock);
8173 }
db874e65 8174
20346722
K
8175 /*
8176 * SXE-002: Configure link and activity LED to init state
8177 * on driver load.
1da177e4
LT
8178 */
8179 subid = sp->pdev->subsystem_device;
8180 if ((subid & 0xFF) >= 0x07) {
8181 val64 = readq(&bar0->gpio_control);
8182 val64 |= 0x0000800000000000ULL;
8183 writeq(val64, &bar0->gpio_control);
8184 val64 = 0x0411040400000000ULL;
d44570e4 8185 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
8186 val64 = readq(&bar0->gpio_control);
8187 }
8188
8189 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
8190
8191 if (register_netdev(dev)) {
8192 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8193 ret = -ENODEV;
8194 goto register_failed;
8195 }
9dc737a7 8196 s2io_vpd_read(sp);
926bd900 8197 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
d44570e4 8198 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
44c10138 8199 sp->product_name, pdev->revision);
b41477f3
AR
8200 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8201 s2io_driver_version);
9e39f7c5
JP
8202 DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8203 DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
9dc737a7 8204 if (sp->device_type & XFRAME_II_DEVICE) {
0b1f7ebe 8205 mode = s2io_print_pci_mode(sp);
541ae68f 8206 if (mode < 0) {
541ae68f 8207 ret = -EBADSLT;
9dc737a7 8208 unregister_netdev(dev);
541ae68f
K
8209 goto set_swap_failed;
8210 }
541ae68f 8211 }
d44570e4
JP
8212 switch (sp->rxd_mode) {
8213 case RXD_MODE_1:
8214 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8215 dev->name);
8216 break;
8217 case RXD_MODE_3B:
8218 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8219 dev->name);
8220 break;
9dc737a7 8221 }
db874e65 8222
f61e0a35
SH
8223 switch (sp->config.napi) {
8224 case 0:
8225 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8226 break;
8227 case 1:
db874e65 8228 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
f61e0a35
SH
8229 break;
8230 }
3a3d5756
SH
8231
8232 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
d44570e4 8233 sp->config.tx_fifo_num);
3a3d5756 8234
0425b46a
SH
8235 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8236 sp->config.rx_ring_num);
8237
d44570e4
JP
8238 switch (sp->config.intr_type) {
8239 case INTA:
8240 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8241 break;
8242 case MSI_X:
8243 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8244 break;
9dc737a7 8245 }
3a3d5756 8246 if (sp->config.multiq) {
13d866a9
JP
8247 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8248 struct fifo_info *fifo = &mac_control->fifos[i];
8249
8250 fifo->multiq = config->multiq;
8251 }
3a3d5756 8252 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
d44570e4 8253 dev->name);
3a3d5756
SH
8254 } else
8255 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
d44570e4 8256 dev->name);
3a3d5756 8257
6cfc482b
SH
8258 switch (sp->config.tx_steering_type) {
8259 case NO_STEERING:
d44570e4
JP
8260 DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8261 dev->name);
8262 break;
6cfc482b 8263 case TX_PRIORITY_STEERING:
d44570e4
JP
8264 DBG_PRINT(ERR_DBG,
8265 "%s: Priority steering enabled for transmit\n",
8266 dev->name);
6cfc482b
SH
8267 break;
8268 case TX_DEFAULT_STEERING:
d44570e4
JP
8269 DBG_PRINT(ERR_DBG,
8270 "%s: Default steering enabled for transmit\n",
8271 dev->name);
6cfc482b
SH
8272 }
8273
f0c54ace
AW
8274 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8275 dev->name);
db874e65 8276 if (ufo)
d44570e4
JP
8277 DBG_PRINT(ERR_DBG,
8278 "%s: UDP Fragmentation Offload(UFO) enabled\n",
8279 dev->name);
7ba013ac 8280 /* Initialize device name */
9dc737a7 8281 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7ba013ac 8282
cd0fce03
BL
8283 if (vlan_tag_strip)
8284 sp->vlan_strip_flag = 1;
8285 else
8286 sp->vlan_strip_flag = 0;
8287
20346722
K
8288 /*
8289 * Make Link state as off at this point, when the Link change
8290 * interrupt comes the state will be automatically changed to
1da177e4
LT
8291 * the right state.
8292 */
8293 netif_carrier_off(dev);
1da177e4
LT
8294
8295 return 0;
8296
d44570e4
JP
8297register_failed:
8298set_swap_failed:
1da177e4 8299 iounmap(sp->bar1);
d44570e4 8300bar1_remap_failed:
1da177e4 8301 iounmap(sp->bar0);
d44570e4
JP
8302bar0_remap_failed:
8303mem_alloc_failed:
1da177e4
LT
8304 free_shared_mem(sp);
8305 pci_disable_device(pdev);
eccb8628 8306 pci_release_regions(pdev);
1da177e4
LT
8307 pci_set_drvdata(pdev, NULL);
8308 free_netdev(dev);
8309
8310 return ret;
8311}
8312
8313/**
20346722 8314 * s2io_rem_nic - Free the PCI device
1da177e4 8315 * @pdev: structure containing the PCI related information of the device.
20346722 8316 * Description: This function is called by the Pci subsystem to release a
1da177e4 8317 * PCI device and free up all resource held up by the device. This could
20346722 8318 * be in response to a Hot plug event or when the driver is to be removed
1da177e4
LT
8319 * from memory.
8320 */
8321
8322static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8323{
a31ff388 8324 struct net_device *dev = pci_get_drvdata(pdev);
1ee6dd77 8325 struct s2io_nic *sp;
1da177e4
LT
8326
8327 if (dev == NULL) {
8328 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8329 return;
8330 }
8331
22747d6b
FR
8332 flush_scheduled_work();
8333
4cf1653a 8334 sp = netdev_priv(dev);
1da177e4
LT
8335 unregister_netdev(dev);
8336
8337 free_shared_mem(sp);
8338 iounmap(sp->bar0);
8339 iounmap(sp->bar1);
eccb8628 8340 pci_release_regions(pdev);
1da177e4 8341 pci_set_drvdata(pdev, NULL);
1da177e4 8342 free_netdev(dev);
19a60522 8343 pci_disable_device(pdev);
1da177e4
LT
8344}
8345
8346/**
8347 * s2io_starter - Entry point for the driver
8348 * Description: This function is the entry point for the driver. It verifies
8349 * the module loadable parameters and initializes PCI configuration space.
8350 */
8351
43b7c451 8352static int __init s2io_starter(void)
1da177e4 8353{
29917620 8354 return pci_register_driver(&s2io_driver);
1da177e4
LT
8355}
8356
8357/**
20346722 8358 * s2io_closer - Cleanup routine for the driver
1da177e4
LT
8359 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8360 */
8361
372cc597 8362static __exit void s2io_closer(void)
1da177e4
LT
8363{
8364 pci_unregister_driver(&s2io_driver);
8365 DBG_PRINT(INIT_DBG, "cleanup done\n");
8366}
8367
8368module_init(s2io_starter);
8369module_exit(s2io_closer);
7d3d0439 8370
6aa20a22 8371static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
d44570e4
JP
8372 struct tcphdr **tcp, struct RxD_t *rxdp,
8373 struct s2io_nic *sp)
7d3d0439
RA
8374{
8375 int ip_off;
8376 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8377
8378 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
d44570e4
JP
8379 DBG_PRINT(INIT_DBG,
8380 "%s: Non-TCP frames not supported for LRO\n",
b39d66a8 8381 __func__);
7d3d0439
RA
8382 return -1;
8383 }
8384
cdb5bf02 8385 /* Checking for DIX type or DIX type with VLAN */
d44570e4 8386 if ((l2_type == 0) || (l2_type == 4)) {
cdb5bf02
SH
8387 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8388 /*
8389 * If vlan stripping is disabled and the frame is VLAN tagged,
8390 * shift the offset by the VLAN header size bytes.
8391 */
cd0fce03 8392 if ((!sp->vlan_strip_flag) &&
d44570e4 8393 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
cdb5bf02
SH
8394 ip_off += HEADER_VLAN_SIZE;
8395 } else {
7d3d0439 8396 /* LLC, SNAP etc are considered non-mergeable */
cdb5bf02 8397 return -1;
7d3d0439
RA
8398 }
8399
8400 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8401 ip_len = (u8)((*ip)->ihl);
8402 ip_len <<= 2;
8403 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8404
8405 return 0;
8406}
8407
1ee6dd77 8408static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7d3d0439
RA
8409 struct tcphdr *tcp)
8410{
d44570e4
JP
8411 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8412 if ((lro->iph->saddr != ip->saddr) ||
8413 (lro->iph->daddr != ip->daddr) ||
8414 (lro->tcph->source != tcp->source) ||
8415 (lro->tcph->dest != tcp->dest))
7d3d0439
RA
8416 return -1;
8417 return 0;
8418}
8419
8420static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8421{
d44570e4 8422 return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
7d3d0439
RA
8423}
8424
1ee6dd77 8425static void initiate_new_session(struct lro *lro, u8 *l2h,
d44570e4
JP
8426 struct iphdr *ip, struct tcphdr *tcp,
8427 u32 tcp_pyld_len, u16 vlan_tag)
7d3d0439 8428{
d44570e4 8429 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
7d3d0439
RA
8430 lro->l2h = l2h;
8431 lro->iph = ip;
8432 lro->tcph = tcp;
8433 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
c8855953 8434 lro->tcp_ack = tcp->ack_seq;
7d3d0439
RA
8435 lro->sg_num = 1;
8436 lro->total_len = ntohs(ip->tot_len);
8437 lro->frags_len = 0;
cdb5bf02 8438 lro->vlan_tag = vlan_tag;
6aa20a22 8439 /*
d44570e4
JP
8440 * Check if we saw TCP timestamp.
8441 * Other consistency checks have already been done.
8442 */
7d3d0439 8443 if (tcp->doff == 8) {
c8855953
SR
8444 __be32 *ptr;
8445 ptr = (__be32 *)(tcp+1);
7d3d0439 8446 lro->saw_ts = 1;
c8855953 8447 lro->cur_tsval = ntohl(*(ptr+1));
7d3d0439
RA
8448 lro->cur_tsecr = *(ptr+2);
8449 }
8450 lro->in_use = 1;
8451}
8452
1ee6dd77 8453static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7d3d0439
RA
8454{
8455 struct iphdr *ip = lro->iph;
8456 struct tcphdr *tcp = lro->tcph;
bd4f3ae1 8457 __sum16 nchk;
ffb5df6c
JP
8458 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8459
d44570e4 8460 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
7d3d0439
RA
8461
8462 /* Update L3 header */
8463 ip->tot_len = htons(lro->total_len);
8464 ip->check = 0;
8465 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8466 ip->check = nchk;
8467
8468 /* Update L4 header */
8469 tcp->ack_seq = lro->tcp_ack;
8470 tcp->window = lro->window;
8471
8472 /* Update tsecr field if this session has timestamps enabled */
8473 if (lro->saw_ts) {
c8855953 8474 __be32 *ptr = (__be32 *)(tcp + 1);
7d3d0439
RA
8475 *(ptr+2) = lro->cur_tsecr;
8476 }
8477
8478 /* Update counters required for calculation of
8479 * average no. of packets aggregated.
8480 */
ffb5df6c
JP
8481 swstats->sum_avg_pkts_aggregated += lro->sg_num;
8482 swstats->num_aggregations++;
7d3d0439
RA
8483}
8484
1ee6dd77 8485static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
d44570e4 8486 struct tcphdr *tcp, u32 l4_pyld)
7d3d0439 8487{
d44570e4 8488 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
7d3d0439
RA
8489 lro->total_len += l4_pyld;
8490 lro->frags_len += l4_pyld;
8491 lro->tcp_next_seq += l4_pyld;
8492 lro->sg_num++;
8493
8494 /* Update ack seq no. and window ad(from this pkt) in LRO object */
8495 lro->tcp_ack = tcp->ack_seq;
8496 lro->window = tcp->window;
6aa20a22 8497
7d3d0439 8498 if (lro->saw_ts) {
c8855953 8499 __be32 *ptr;
7d3d0439 8500 /* Update tsecr and tsval from this packet */
c8855953
SR
8501 ptr = (__be32 *)(tcp+1);
8502 lro->cur_tsval = ntohl(*(ptr+1));
7d3d0439
RA
8503 lro->cur_tsecr = *(ptr + 2);
8504 }
8505}
8506
1ee6dd77 8507static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7d3d0439
RA
8508 struct tcphdr *tcp, u32 tcp_pyld_len)
8509{
7d3d0439
RA
8510 u8 *ptr;
8511
d44570e4 8512 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
79dc1901 8513
7d3d0439
RA
8514 if (!tcp_pyld_len) {
8515 /* Runt frame or a pure ack */
8516 return -1;
8517 }
8518
8519 if (ip->ihl != 5) /* IP has options */
8520 return -1;
8521
75c30b13
AR
8522 /* If we see CE codepoint in IP header, packet is not mergeable */
8523 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8524 return -1;
8525
8526 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
d44570e4
JP
8527 if (tcp->urg || tcp->psh || tcp->rst ||
8528 tcp->syn || tcp->fin ||
8529 tcp->ece || tcp->cwr || !tcp->ack) {
7d3d0439
RA
8530 /*
8531 * Currently recognize only the ack control word and
8532 * any other control field being set would result in
8533 * flushing the LRO session
8534 */
8535 return -1;
8536 }
8537
6aa20a22 8538 /*
7d3d0439
RA
8539 * Allow only one TCP timestamp option. Don't aggregate if
8540 * any other options are detected.
8541 */
8542 if (tcp->doff != 5 && tcp->doff != 8)
8543 return -1;
8544
8545 if (tcp->doff == 8) {
6aa20a22 8546 ptr = (u8 *)(tcp + 1);
7d3d0439
RA
8547 while (*ptr == TCPOPT_NOP)
8548 ptr++;
8549 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8550 return -1;
8551
8552 /* Ensure timestamp value increases monotonically */
8553 if (l_lro)
c8855953 8554 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
7d3d0439
RA
8555 return -1;
8556
8557 /* timestamp echo reply should be non-zero */
c8855953 8558 if (*((__be32 *)(ptr+6)) == 0)
7d3d0439
RA
8559 return -1;
8560 }
8561
8562 return 0;
8563}
8564
d44570e4
JP
8565static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8566 u8 **tcp, u32 *tcp_len, struct lro **lro,
8567 struct RxD_t *rxdp, struct s2io_nic *sp)
7d3d0439
RA
8568{
8569 struct iphdr *ip;
8570 struct tcphdr *tcph;
8571 int ret = 0, i;
cdb5bf02 8572 u16 vlan_tag = 0;
ffb5df6c 8573 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7d3d0439 8574
d44570e4
JP
8575 ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8576 rxdp, sp);
8577 if (ret)
7d3d0439 8578 return ret;
7d3d0439 8579
d44570e4
JP
8580 DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8581
cdb5bf02 8582 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
7d3d0439
RA
8583 tcph = (struct tcphdr *)*tcp;
8584 *tcp_len = get_l4_pyld_length(ip, tcph);
d44570e4 8585 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
0425b46a 8586 struct lro *l_lro = &ring_data->lro0_n[i];
7d3d0439
RA
8587 if (l_lro->in_use) {
8588 if (check_for_socket_match(l_lro, ip, tcph))
8589 continue;
8590 /* Sock pair matched */
8591 *lro = l_lro;
8592
8593 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
9e39f7c5
JP
8594 DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8595 "expected 0x%x, actual 0x%x\n",
8596 __func__,
7d3d0439
RA
8597 (*lro)->tcp_next_seq,
8598 ntohl(tcph->seq));
8599
ffb5df6c 8600 swstats->outof_sequence_pkts++;
7d3d0439
RA
8601 ret = 2;
8602 break;
8603 }
8604
d44570e4
JP
8605 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8606 *tcp_len))
7d3d0439
RA
8607 ret = 1; /* Aggregate */
8608 else
8609 ret = 2; /* Flush both */
8610 break;
8611 }
8612 }
8613
8614 if (ret == 0) {
8615 /* Before searching for available LRO objects,
8616 * check if the pkt is L3/L4 aggregatable. If not
8617 * don't create new LRO session. Just send this
8618 * packet up.
8619 */
d44570e4 8620 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
7d3d0439 8621 return 5;
7d3d0439 8622
d44570e4 8623 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
0425b46a 8624 struct lro *l_lro = &ring_data->lro0_n[i];
7d3d0439
RA
8625 if (!(l_lro->in_use)) {
8626 *lro = l_lro;
8627 ret = 3; /* Begin anew */
8628 break;
8629 }
8630 }
8631 }
8632
8633 if (ret == 0) { /* sessions exceeded */
9e39f7c5 8634 DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
b39d66a8 8635 __func__);
7d3d0439
RA
8636 *lro = NULL;
8637 return ret;
8638 }
8639
8640 switch (ret) {
d44570e4
JP
8641 case 3:
8642 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8643 vlan_tag);
8644 break;
8645 case 2:
8646 update_L3L4_header(sp, *lro);
8647 break;
8648 case 1:
8649 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8650 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7d3d0439 8651 update_L3L4_header(sp, *lro);
d44570e4
JP
8652 ret = 4; /* Flush the LRO */
8653 }
8654 break;
8655 default:
9e39f7c5 8656 DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
d44570e4 8657 break;
7d3d0439
RA
8658 }
8659
8660 return ret;
8661}
8662
1ee6dd77 8663static void clear_lro_session(struct lro *lro)
7d3d0439 8664{
1ee6dd77 8665 static u16 lro_struct_size = sizeof(struct lro);
7d3d0439
RA
8666
8667 memset(lro, 0, lro_struct_size);
8668}
8669
cdb5bf02 8670static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
7d3d0439
RA
8671{
8672 struct net_device *dev = skb->dev;
4cf1653a 8673 struct s2io_nic *sp = netdev_priv(dev);
7d3d0439
RA
8674
8675 skb->protocol = eth_type_trans(skb, dev);
d44570e4 8676 if (sp->vlgrp && vlan_tag && (sp->vlan_strip_flag)) {
cdb5bf02
SH
8677 /* Queueing the vlan frame to the upper layer */
8678 if (sp->config.napi)
8679 vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
8680 else
8681 vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag);
8682 } else {
8683 if (sp->config.napi)
8684 netif_receive_skb(skb);
8685 else
8686 netif_rx(skb);
8687 }
7d3d0439
RA
8688}
8689
1ee6dd77 8690static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
d44570e4 8691 struct sk_buff *skb, u32 tcp_len)
7d3d0439 8692{
75c30b13 8693 struct sk_buff *first = lro->parent;
ffb5df6c 8694 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7d3d0439
RA
8695
8696 first->len += tcp_len;
8697 first->data_len = lro->frags_len;
8698 skb_pull(skb, (skb->len - tcp_len));
75c30b13
AR
8699 if (skb_shinfo(first)->frag_list)
8700 lro->last_frag->next = skb;
7d3d0439
RA
8701 else
8702 skb_shinfo(first)->frag_list = skb;
372cc597 8703 first->truesize += skb->truesize;
75c30b13 8704 lro->last_frag = skb;
ffb5df6c 8705 swstats->clubbed_frms_cnt++;
7d3d0439 8706}
d796fdb7
LV
8707
8708/**
8709 * s2io_io_error_detected - called when PCI error is detected
8710 * @pdev: Pointer to PCI device
8453d43f 8711 * @state: The current pci connection state
d796fdb7
LV
8712 *
8713 * This function is called after a PCI bus error affecting
8714 * this device has been detected.
8715 */
8716static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
d44570e4 8717 pci_channel_state_t state)
d796fdb7
LV
8718{
8719 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 8720 struct s2io_nic *sp = netdev_priv(netdev);
d796fdb7
LV
8721
8722 netif_device_detach(netdev);
8723
1e3c8bd6
DN
8724 if (state == pci_channel_io_perm_failure)
8725 return PCI_ERS_RESULT_DISCONNECT;
8726
d796fdb7
LV
8727 if (netif_running(netdev)) {
8728 /* Bring down the card, while avoiding PCI I/O */
8729 do_s2io_card_down(sp, 0);
d796fdb7
LV
8730 }
8731 pci_disable_device(pdev);
8732
8733 return PCI_ERS_RESULT_NEED_RESET;
8734}
8735
8736/**
8737 * s2io_io_slot_reset - called after the pci bus has been reset.
8738 * @pdev: Pointer to PCI device
8739 *
8740 * Restart the card from scratch, as if from a cold-boot.
8741 * At this point, the card has exprienced a hard reset,
8742 * followed by fixups by BIOS, and has its config space
8743 * set up identically to what it was at cold boot.
8744 */
8745static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8746{
8747 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 8748 struct s2io_nic *sp = netdev_priv(netdev);
d796fdb7
LV
8749
8750 if (pci_enable_device(pdev)) {
6cef2b8e 8751 pr_err("Cannot re-enable PCI device after reset.\n");
d796fdb7
LV
8752 return PCI_ERS_RESULT_DISCONNECT;
8753 }
8754
8755 pci_set_master(pdev);
8756 s2io_reset(sp);
8757
8758 return PCI_ERS_RESULT_RECOVERED;
8759}
8760
8761/**
8762 * s2io_io_resume - called when traffic can start flowing again.
8763 * @pdev: Pointer to PCI device
8764 *
8765 * This callback is called when the error recovery driver tells
8766 * us that its OK to resume normal operation.
8767 */
8768static void s2io_io_resume(struct pci_dev *pdev)
8769{
8770 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 8771 struct s2io_nic *sp = netdev_priv(netdev);
d796fdb7
LV
8772
8773 if (netif_running(netdev)) {
8774 if (s2io_card_up(sp)) {
6cef2b8e 8775 pr_err("Can't bring device back up after reset.\n");
d796fdb7
LV
8776 return;
8777 }
8778
8779 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8780 s2io_card_down(sp);
6cef2b8e 8781 pr_err("Can't restore mac addr after reset.\n");
d796fdb7
LV
8782 return;
8783 }
8784 }
8785
8786 netif_device_attach(netdev);
fd2ea0a7 8787 netif_tx_wake_all_queues(netdev);
d796fdb7 8788}