]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/s2io.c
via-velocity: add velocity_set_rxbufsize helper
[net-next-2.6.git] / drivers / net / s2io.c
CommitLineData
1da177e4 1/************************************************************************
776bd20f 2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
0c61ed5f 3 * Copyright(c) 2002-2007 Neterion Inc.
1da177e4
LT
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
20346722
K
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
1da177e4
LT
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
20346722 21 * Francois Romieu : For pointing out all code part that were
1da177e4 22 * deprecated and also styling related comments.
20346722 23 * Grant Grundler : For helping me get rid of some Architecture
1da177e4
LT
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
20346722 26 *
1da177e4
LT
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
9dc737a7 29 *
20346722
K
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
9dc737a7
AR
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
da6971d8 34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
6d517a27 35 * values are 1, 2.
1da177e4 36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
20346722 37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
1da177e4 38 * Tx descriptors that can be associated with each corresponding FIFO.
9dc737a7 39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
8abc4d5b 40 * 2(MSI_X). Default value is '2(MSI_X)'
43b7c451 41 * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
9dc737a7
AR
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
926930b2
SS
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
3a3d5756
SH
53 * multiq: This parameter used to enable/disable MULTIQUEUE support.
54 * Possible values '1' for enable and '0' for disable. Default is '0'
1da177e4
LT
55 ************************************************************************/
56
1da177e4
LT
57#include <linux/module.h>
58#include <linux/types.h>
59#include <linux/errno.h>
60#include <linux/ioport.h>
61#include <linux/pci.h>
1e7f0bd8 62#include <linux/dma-mapping.h>
1da177e4
LT
63#include <linux/kernel.h>
64#include <linux/netdevice.h>
65#include <linux/etherdevice.h>
66#include <linux/skbuff.h>
67#include <linux/init.h>
68#include <linux/delay.h>
69#include <linux/stddef.h>
70#include <linux/ioctl.h>
71#include <linux/timex.h>
1da177e4 72#include <linux/ethtool.h>
1da177e4 73#include <linux/workqueue.h>
be3a6b02 74#include <linux/if_vlan.h>
7d3d0439
RA
75#include <linux/ip.h>
76#include <linux/tcp.h>
77#include <net/tcp.h>
1da177e4 78
1da177e4
LT
79#include <asm/system.h>
80#include <asm/uaccess.h>
20346722 81#include <asm/io.h>
fe931395 82#include <asm/div64.h>
330ce0de 83#include <asm/irq.h>
1da177e4
LT
84
85/* local include */
86#include "s2io.h"
87#include "s2io-regs.h"
88
0b5923cd 89#define DRV_VERSION "2.0.26.24"
6c1792f4 90
1da177e4 91/* S2io Driver name & version. */
20346722 92static char s2io_driver_name[] = "Neterion";
6c1792f4 93static char s2io_driver_version[] = DRV_VERSION;
1da177e4 94
6d517a27
VP
95static int rxd_size[2] = {32,48};
96static int rxd_count[2] = {127,85};
da6971d8 97
1ee6dd77 98static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
5e25b9dd
K
99{
100 int ret;
101
102 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
103 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
104
105 return ret;
106}
107
20346722 108/*
1da177e4
LT
109 * Cards with following subsystem_id have a link state indication
110 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
111 * macro below identifies these cards given the subsystem_id.
112 */
541ae68f
K
113#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
114 (dev_type == XFRAME_I_DEVICE) ? \
115 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
116 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
1da177e4
LT
117
118#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
119 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
1da177e4 120
92b84437
SS
121static inline int is_s2io_card_up(const struct s2io_nic * sp)
122{
123 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
124}
125
1da177e4
LT
126/* Ethtool related variables and Macros. */
127static char s2io_gstrings[][ETH_GSTRING_LEN] = {
128 "Register test\t(offline)",
129 "Eeprom test\t(offline)",
130 "Link test\t(online)",
131 "RLDRAM test\t(offline)",
132 "BIST Test\t(offline)"
133};
134
fa1f0cb3 135static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
1da177e4
LT
136 {"tmac_frms"},
137 {"tmac_data_octets"},
138 {"tmac_drop_frms"},
139 {"tmac_mcst_frms"},
140 {"tmac_bcst_frms"},
141 {"tmac_pause_ctrl_frms"},
bd1034f0
AR
142 {"tmac_ttl_octets"},
143 {"tmac_ucst_frms"},
144 {"tmac_nucst_frms"},
1da177e4 145 {"tmac_any_err_frms"},
bd1034f0 146 {"tmac_ttl_less_fb_octets"},
1da177e4
LT
147 {"tmac_vld_ip_octets"},
148 {"tmac_vld_ip"},
149 {"tmac_drop_ip"},
150 {"tmac_icmp"},
151 {"tmac_rst_tcp"},
152 {"tmac_tcp"},
153 {"tmac_udp"},
154 {"rmac_vld_frms"},
155 {"rmac_data_octets"},
156 {"rmac_fcs_err_frms"},
157 {"rmac_drop_frms"},
158 {"rmac_vld_mcst_frms"},
159 {"rmac_vld_bcst_frms"},
160 {"rmac_in_rng_len_err_frms"},
bd1034f0 161 {"rmac_out_rng_len_err_frms"},
1da177e4
LT
162 {"rmac_long_frms"},
163 {"rmac_pause_ctrl_frms"},
bd1034f0
AR
164 {"rmac_unsup_ctrl_frms"},
165 {"rmac_ttl_octets"},
166 {"rmac_accepted_ucst_frms"},
167 {"rmac_accepted_nucst_frms"},
1da177e4 168 {"rmac_discarded_frms"},
bd1034f0
AR
169 {"rmac_drop_events"},
170 {"rmac_ttl_less_fb_octets"},
171 {"rmac_ttl_frms"},
1da177e4
LT
172 {"rmac_usized_frms"},
173 {"rmac_osized_frms"},
174 {"rmac_frag_frms"},
175 {"rmac_jabber_frms"},
bd1034f0
AR
176 {"rmac_ttl_64_frms"},
177 {"rmac_ttl_65_127_frms"},
178 {"rmac_ttl_128_255_frms"},
179 {"rmac_ttl_256_511_frms"},
180 {"rmac_ttl_512_1023_frms"},
181 {"rmac_ttl_1024_1518_frms"},
1da177e4
LT
182 {"rmac_ip"},
183 {"rmac_ip_octets"},
184 {"rmac_hdr_err_ip"},
185 {"rmac_drop_ip"},
186 {"rmac_icmp"},
187 {"rmac_tcp"},
188 {"rmac_udp"},
189 {"rmac_err_drp_udp"},
bd1034f0
AR
190 {"rmac_xgmii_err_sym"},
191 {"rmac_frms_q0"},
192 {"rmac_frms_q1"},
193 {"rmac_frms_q2"},
194 {"rmac_frms_q3"},
195 {"rmac_frms_q4"},
196 {"rmac_frms_q5"},
197 {"rmac_frms_q6"},
198 {"rmac_frms_q7"},
199 {"rmac_full_q0"},
200 {"rmac_full_q1"},
201 {"rmac_full_q2"},
202 {"rmac_full_q3"},
203 {"rmac_full_q4"},
204 {"rmac_full_q5"},
205 {"rmac_full_q6"},
206 {"rmac_full_q7"},
1da177e4 207 {"rmac_pause_cnt"},
bd1034f0
AR
208 {"rmac_xgmii_data_err_cnt"},
209 {"rmac_xgmii_ctrl_err_cnt"},
1da177e4
LT
210 {"rmac_accepted_ip"},
211 {"rmac_err_tcp"},
bd1034f0
AR
212 {"rd_req_cnt"},
213 {"new_rd_req_cnt"},
214 {"new_rd_req_rtry_cnt"},
215 {"rd_rtry_cnt"},
216 {"wr_rtry_rd_ack_cnt"},
217 {"wr_req_cnt"},
218 {"new_wr_req_cnt"},
219 {"new_wr_req_rtry_cnt"},
220 {"wr_rtry_cnt"},
221 {"wr_disc_cnt"},
222 {"rd_rtry_wr_ack_cnt"},
223 {"txp_wr_cnt"},
224 {"txd_rd_cnt"},
225 {"txd_wr_cnt"},
226 {"rxd_rd_cnt"},
227 {"rxd_wr_cnt"},
228 {"txf_rd_cnt"},
fa1f0cb3
SS
229 {"rxf_wr_cnt"}
230};
231
232static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
bd1034f0
AR
233 {"rmac_ttl_1519_4095_frms"},
234 {"rmac_ttl_4096_8191_frms"},
235 {"rmac_ttl_8192_max_frms"},
236 {"rmac_ttl_gt_max_frms"},
237 {"rmac_osized_alt_frms"},
238 {"rmac_jabber_alt_frms"},
239 {"rmac_gt_max_alt_frms"},
240 {"rmac_vlan_frms"},
241 {"rmac_len_discard"},
242 {"rmac_fcs_discard"},
243 {"rmac_pf_discard"},
244 {"rmac_da_discard"},
245 {"rmac_red_discard"},
246 {"rmac_rts_discard"},
247 {"rmac_ingm_full_discard"},
fa1f0cb3
SS
248 {"link_fault_cnt"}
249};
250
251static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
7ba013ac
K
252 {"\n DRIVER STATISTICS"},
253 {"single_bit_ecc_errs"},
254 {"double_bit_ecc_errs"},
bd1034f0
AR
255 {"parity_err_cnt"},
256 {"serious_err_cnt"},
257 {"soft_reset_cnt"},
258 {"fifo_full_cnt"},
8116f3cf
SS
259 {"ring_0_full_cnt"},
260 {"ring_1_full_cnt"},
261 {"ring_2_full_cnt"},
262 {"ring_3_full_cnt"},
263 {"ring_4_full_cnt"},
264 {"ring_5_full_cnt"},
265 {"ring_6_full_cnt"},
266 {"ring_7_full_cnt"},
43b7c451
SH
267 {"alarm_transceiver_temp_high"},
268 {"alarm_transceiver_temp_low"},
269 {"alarm_laser_bias_current_high"},
270 {"alarm_laser_bias_current_low"},
271 {"alarm_laser_output_power_high"},
272 {"alarm_laser_output_power_low"},
273 {"warn_transceiver_temp_high"},
274 {"warn_transceiver_temp_low"},
275 {"warn_laser_bias_current_high"},
276 {"warn_laser_bias_current_low"},
277 {"warn_laser_output_power_high"},
278 {"warn_laser_output_power_low"},
279 {"lro_aggregated_pkts"},
280 {"lro_flush_both_count"},
281 {"lro_out_of_sequence_pkts"},
282 {"lro_flush_due_to_max_pkts"},
283 {"lro_avg_aggr_pkts"},
284 {"mem_alloc_fail_cnt"},
285 {"pci_map_fail_cnt"},
286 {"watchdog_timer_cnt"},
287 {"mem_allocated"},
288 {"mem_freed"},
289 {"link_up_cnt"},
290 {"link_down_cnt"},
291 {"link_up_time"},
292 {"link_down_time"},
293 {"tx_tcode_buf_abort_cnt"},
294 {"tx_tcode_desc_abort_cnt"},
295 {"tx_tcode_parity_err_cnt"},
296 {"tx_tcode_link_loss_cnt"},
297 {"tx_tcode_list_proc_err_cnt"},
298 {"rx_tcode_parity_err_cnt"},
299 {"rx_tcode_abort_cnt"},
300 {"rx_tcode_parity_abort_cnt"},
301 {"rx_tcode_rda_fail_cnt"},
302 {"rx_tcode_unkn_prot_cnt"},
303 {"rx_tcode_fcs_err_cnt"},
304 {"rx_tcode_buf_size_err_cnt"},
305 {"rx_tcode_rxd_corrupt_cnt"},
306 {"rx_tcode_unkn_err_cnt"},
8116f3cf
SS
307 {"tda_err_cnt"},
308 {"pfc_err_cnt"},
309 {"pcc_err_cnt"},
310 {"tti_err_cnt"},
311 {"tpa_err_cnt"},
312 {"sm_err_cnt"},
313 {"lso_err_cnt"},
314 {"mac_tmac_err_cnt"},
315 {"mac_rmac_err_cnt"},
316 {"xgxs_txgxs_err_cnt"},
317 {"xgxs_rxgxs_err_cnt"},
318 {"rc_err_cnt"},
319 {"prc_pcix_err_cnt"},
320 {"rpa_err_cnt"},
321 {"rda_err_cnt"},
322 {"rti_err_cnt"},
323 {"mc_err_cnt"}
1da177e4
LT
324};
325
4c3616cd
AMR
326#define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
327#define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
328#define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
fa1f0cb3
SS
329
330#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
331#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
332
333#define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
334#define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
1da177e4 335
4c3616cd 336#define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
1da177e4
LT
337#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
338
25fff88e
K
339#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
340 init_timer(&timer); \
341 timer.function = handle; \
342 timer.data = (unsigned long) arg; \
343 mod_timer(&timer, (jiffies + exp)) \
344
2fd37688
SS
345/* copy mac addr to def_mac_addr array */
346static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
347{
348 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
349 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
350 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
351 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
352 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
353 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
354}
be3a6b02
K
355/* Add the vlan */
356static void s2io_vlan_rx_register(struct net_device *dev,
357 struct vlan_group *grp)
358{
2fda096d 359 int i;
1ee6dd77 360 struct s2io_nic *nic = dev->priv;
2fda096d
SR
361 unsigned long flags[MAX_TX_FIFOS];
362 struct mac_info *mac_control = &nic->mac_control;
363 struct config_param *config = &nic->config;
364
365 for (i = 0; i < config->tx_fifo_num; i++)
366 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
be3a6b02 367
be3a6b02 368 nic->vlgrp = grp;
2fda096d
SR
369 for (i = config->tx_fifo_num - 1; i >= 0; i--)
370 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
371 flags[i]);
be3a6b02
K
372}
373
926930b2 374/* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
7b490343 375static int vlan_strip_flag;
926930b2 376
cdb5bf02
SH
377/* Unregister the vlan */
378static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
379{
380 int i;
381 struct s2io_nic *nic = dev->priv;
382 unsigned long flags[MAX_TX_FIFOS];
383 struct mac_info *mac_control = &nic->mac_control;
384 struct config_param *config = &nic->config;
385
386 for (i = 0; i < config->tx_fifo_num; i++)
387 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
388
389 if (nic->vlgrp)
390 vlan_group_set_device(nic->vlgrp, vid, NULL);
391
392 for (i = config->tx_fifo_num - 1; i >= 0; i--)
393 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
394 flags[i]);
395}
396
20346722 397/*
1da177e4
LT
398 * Constants to be programmed into the Xena's registers, to configure
399 * the XAUI.
400 */
401
1da177e4 402#define END_SIGN 0x0
f71e1309 403static const u64 herc_act_dtx_cfg[] = {
541ae68f 404 /* Set address */
e960fc5c 405 0x8000051536750000ULL, 0x80000515367500E0ULL,
541ae68f 406 /* Write data */
e960fc5c 407 0x8000051536750004ULL, 0x80000515367500E4ULL,
541ae68f
K
408 /* Set address */
409 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
410 /* Write data */
411 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
412 /* Set address */
e960fc5c 413 0x801205150D440000ULL, 0x801205150D4400E0ULL,
414 /* Write data */
415 0x801205150D440004ULL, 0x801205150D4400E4ULL,
416 /* Set address */
541ae68f
K
417 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
418 /* Write data */
419 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
420 /* Done */
421 END_SIGN
422};
423
f71e1309 424static const u64 xena_dtx_cfg[] = {
c92ca04b 425 /* Set address */
1da177e4 426 0x8000051500000000ULL, 0x80000515000000E0ULL,
c92ca04b
AR
427 /* Write data */
428 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
429 /* Set address */
430 0x8001051500000000ULL, 0x80010515000000E0ULL,
431 /* Write data */
432 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
433 /* Set address */
1da177e4 434 0x8002051500000000ULL, 0x80020515000000E0ULL,
c92ca04b
AR
435 /* Write data */
436 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1da177e4
LT
437 END_SIGN
438};
439
20346722 440/*
1da177e4
LT
441 * Constants for Fixing the MacAddress problem seen mostly on
442 * Alpha machines.
443 */
f71e1309 444static const u64 fix_mac[] = {
1da177e4
LT
445 0x0060000000000000ULL, 0x0060600000000000ULL,
446 0x0040600000000000ULL, 0x0000600000000000ULL,
447 0x0020600000000000ULL, 0x0060600000000000ULL,
448 0x0020600000000000ULL, 0x0060600000000000ULL,
449 0x0020600000000000ULL, 0x0060600000000000ULL,
450 0x0020600000000000ULL, 0x0060600000000000ULL,
451 0x0020600000000000ULL, 0x0060600000000000ULL,
452 0x0020600000000000ULL, 0x0060600000000000ULL,
453 0x0020600000000000ULL, 0x0060600000000000ULL,
454 0x0020600000000000ULL, 0x0060600000000000ULL,
455 0x0020600000000000ULL, 0x0060600000000000ULL,
456 0x0020600000000000ULL, 0x0060600000000000ULL,
457 0x0020600000000000ULL, 0x0000600000000000ULL,
458 0x0040600000000000ULL, 0x0060600000000000ULL,
459 END_SIGN
460};
461
b41477f3
AR
462MODULE_LICENSE("GPL");
463MODULE_VERSION(DRV_VERSION);
464
465
1da177e4 466/* Module Loadable parameters. */
6cfc482b 467S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
b41477f3 468S2IO_PARM_INT(rx_ring_num, 1);
3a3d5756 469S2IO_PARM_INT(multiq, 0);
b41477f3
AR
470S2IO_PARM_INT(rx_ring_mode, 1);
471S2IO_PARM_INT(use_continuous_tx_intrs, 1);
472S2IO_PARM_INT(rmac_pause_time, 0x100);
473S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
474S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
475S2IO_PARM_INT(shared_splits, 0);
476S2IO_PARM_INT(tmac_util_period, 5);
477S2IO_PARM_INT(rmac_util_period, 5);
b41477f3 478S2IO_PARM_INT(l3l4hdr_size, 128);
6cfc482b
SH
479/* 0 is no steering, 1 is Priority steering, 2 is Default steering */
480S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
303bcb4b 481/* Frequency of Rx desc syncs expressed as power of 2 */
b41477f3 482S2IO_PARM_INT(rxsync_frequency, 3);
eccb8628 483/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
8abc4d5b 484S2IO_PARM_INT(intr_type, 2);
7d3d0439 485/* Large receive offload feature */
43b7c451
SH
486static unsigned int lro_enable;
487module_param_named(lro, lro_enable, uint, 0);
488
7d3d0439
RA
489/* Max pkts to be aggregated by LRO at one time. If not specified,
490 * aggregation happens until we hit max IP pkt size(64K)
491 */
b41477f3 492S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
b41477f3 493S2IO_PARM_INT(indicate_max_pkts, 0);
db874e65
SS
494
495S2IO_PARM_INT(napi, 1);
496S2IO_PARM_INT(ufo, 0);
926930b2 497S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
b41477f3
AR
498
499static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
500 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
501static unsigned int rx_ring_sz[MAX_RX_RINGS] =
502 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
503static unsigned int rts_frm_len[MAX_RX_RINGS] =
504 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
505
506module_param_array(tx_fifo_len, uint, NULL, 0);
507module_param_array(rx_ring_sz, uint, NULL, 0);
508module_param_array(rts_frm_len, uint, NULL, 0);
1da177e4 509
20346722 510/*
1da177e4 511 * S2IO device table.
20346722 512 * This table lists all the devices that this driver supports.
1da177e4
LT
513 */
514static struct pci_device_id s2io_tbl[] __devinitdata = {
515 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
516 PCI_ANY_ID, PCI_ANY_ID},
517 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
518 PCI_ANY_ID, PCI_ANY_ID},
519 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
20346722
K
520 PCI_ANY_ID, PCI_ANY_ID},
521 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
522 PCI_ANY_ID, PCI_ANY_ID},
1da177e4
LT
523 {0,}
524};
525
526MODULE_DEVICE_TABLE(pci, s2io_tbl);
527
d796fdb7
LV
528static struct pci_error_handlers s2io_err_handler = {
529 .error_detected = s2io_io_error_detected,
530 .slot_reset = s2io_io_slot_reset,
531 .resume = s2io_io_resume,
532};
533
1da177e4
LT
534static struct pci_driver s2io_driver = {
535 .name = "S2IO",
536 .id_table = s2io_tbl,
537 .probe = s2io_init_nic,
538 .remove = __devexit_p(s2io_rem_nic),
d796fdb7 539 .err_handler = &s2io_err_handler,
1da177e4
LT
540};
541
542/* A simplifier macro used both by init and free shared_mem Fns(). */
543#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
544
3a3d5756
SH
545/* netqueue manipulation helper functions */
546static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
547{
548 int i;
3a3d5756
SH
549 if (sp->config.multiq) {
550 for (i = 0; i < sp->config.tx_fifo_num; i++)
551 netif_stop_subqueue(sp->dev, i);
b19fa1fa 552 } else {
3a3d5756
SH
553 for (i = 0; i < sp->config.tx_fifo_num; i++)
554 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
555 netif_stop_queue(sp->dev);
556 }
557}
558
559static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
560{
3a3d5756
SH
561 if (sp->config.multiq)
562 netif_stop_subqueue(sp->dev, fifo_no);
b19fa1fa 563 else {
3a3d5756
SH
564 sp->mac_control.fifos[fifo_no].queue_state =
565 FIFO_QUEUE_STOP;
566 netif_stop_queue(sp->dev);
567 }
568}
569
570static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
571{
572 int i;
3a3d5756
SH
573 if (sp->config.multiq) {
574 for (i = 0; i < sp->config.tx_fifo_num; i++)
575 netif_start_subqueue(sp->dev, i);
b19fa1fa 576 } else {
3a3d5756
SH
577 for (i = 0; i < sp->config.tx_fifo_num; i++)
578 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
579 netif_start_queue(sp->dev);
580 }
581}
582
583static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
584{
3a3d5756
SH
585 if (sp->config.multiq)
586 netif_start_subqueue(sp->dev, fifo_no);
b19fa1fa 587 else {
3a3d5756
SH
588 sp->mac_control.fifos[fifo_no].queue_state =
589 FIFO_QUEUE_START;
590 netif_start_queue(sp->dev);
591 }
592}
593
594static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
595{
596 int i;
3a3d5756
SH
597 if (sp->config.multiq) {
598 for (i = 0; i < sp->config.tx_fifo_num; i++)
599 netif_wake_subqueue(sp->dev, i);
b19fa1fa 600 } else {
3a3d5756
SH
601 for (i = 0; i < sp->config.tx_fifo_num; i++)
602 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
603 netif_wake_queue(sp->dev);
604 }
605}
606
607static inline void s2io_wake_tx_queue(
608 struct fifo_info *fifo, int cnt, u8 multiq)
609{
610
3a3d5756
SH
611 if (multiq) {
612 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
613 netif_wake_subqueue(fifo->dev, fifo->fifo_no);
b19fa1fa 614 } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
3a3d5756
SH
615 if (netif_queue_stopped(fifo->dev)) {
616 fifo->queue_state = FIFO_QUEUE_START;
617 netif_wake_queue(fifo->dev);
618 }
619 }
620}
621
1da177e4
LT
622/**
623 * init_shared_mem - Allocation and Initialization of Memory
624 * @nic: Device private variable.
20346722
K
625 * Description: The function allocates all the memory areas shared
626 * between the NIC and the driver. This includes Tx descriptors,
1da177e4
LT
627 * Rx descriptors and the statistics block.
628 */
629
630static int init_shared_mem(struct s2io_nic *nic)
631{
632 u32 size;
633 void *tmp_v_addr, *tmp_v_addr_next;
634 dma_addr_t tmp_p_addr, tmp_p_addr_next;
1ee6dd77 635 struct RxD_block *pre_rxd_blk = NULL;
372cc597 636 int i, j, blk_cnt;
1da177e4
LT
637 int lst_size, lst_per_page;
638 struct net_device *dev = nic->dev;
8ae418cf 639 unsigned long tmp;
1ee6dd77 640 struct buffAdd *ba;
1da177e4 641
1ee6dd77 642 struct mac_info *mac_control;
1da177e4 643 struct config_param *config;
491976b2 644 unsigned long long mem_allocated = 0;
1da177e4
LT
645
646 mac_control = &nic->mac_control;
647 config = &nic->config;
648
649
650 /* Allocation and initialization of TXDLs in FIOFs */
651 size = 0;
652 for (i = 0; i < config->tx_fifo_num; i++) {
653 size += config->tx_cfg[i].fifo_len;
654 }
655 if (size > MAX_AVAILABLE_TXDS) {
b41477f3 656 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
0b1f7ebe 657 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
b41477f3 658 return -EINVAL;
1da177e4
LT
659 }
660
2fda096d
SR
661 size = 0;
662 for (i = 0; i < config->tx_fifo_num; i++) {
663 size = config->tx_cfg[i].fifo_len;
664 /*
665 * Legal values are from 2 to 8192
666 */
667 if (size < 2) {
668 DBG_PRINT(ERR_DBG, "s2io: Invalid fifo len (%d)", size);
669 DBG_PRINT(ERR_DBG, "for fifo %d\n", i);
670 DBG_PRINT(ERR_DBG, "s2io: Legal values for fifo len"
671 "are 2 to 8192\n");
672 return -EINVAL;
673 }
674 }
675
1ee6dd77 676 lst_size = (sizeof(struct TxD) * config->max_txds);
1da177e4
LT
677 lst_per_page = PAGE_SIZE / lst_size;
678
679 for (i = 0; i < config->tx_fifo_num; i++) {
680 int fifo_len = config->tx_cfg[i].fifo_len;
1ee6dd77 681 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
bd684e43 682 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
20346722
K
683 GFP_KERNEL);
684 if (!mac_control->fifos[i].list_info) {
0c61ed5f 685 DBG_PRINT(INFO_DBG,
1da177e4
LT
686 "Malloc failed for list_info\n");
687 return -ENOMEM;
688 }
491976b2 689 mem_allocated += list_holder_size;
1da177e4
LT
690 }
691 for (i = 0; i < config->tx_fifo_num; i++) {
692 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
693 lst_per_page);
20346722
K
694 mac_control->fifos[i].tx_curr_put_info.offset = 0;
695 mac_control->fifos[i].tx_curr_put_info.fifo_len =
1da177e4 696 config->tx_cfg[i].fifo_len - 1;
20346722
K
697 mac_control->fifos[i].tx_curr_get_info.offset = 0;
698 mac_control->fifos[i].tx_curr_get_info.fifo_len =
1da177e4 699 config->tx_cfg[i].fifo_len - 1;
20346722
K
700 mac_control->fifos[i].fifo_no = i;
701 mac_control->fifos[i].nic = nic;
fed5eccd 702 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
3a3d5756 703 mac_control->fifos[i].dev = dev;
20346722 704
1da177e4
LT
705 for (j = 0; j < page_num; j++) {
706 int k = 0;
707 dma_addr_t tmp_p;
708 void *tmp_v;
709 tmp_v = pci_alloc_consistent(nic->pdev,
710 PAGE_SIZE, &tmp_p);
711 if (!tmp_v) {
0c61ed5f 712 DBG_PRINT(INFO_DBG,
1da177e4 713 "pci_alloc_consistent ");
0c61ed5f 714 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
1da177e4
LT
715 return -ENOMEM;
716 }
776bd20f 717 /* If we got a zero DMA address(can happen on
718 * certain platforms like PPC), reallocate.
719 * Store virtual address of page we don't want,
720 * to be freed later.
721 */
722 if (!tmp_p) {
723 mac_control->zerodma_virt_addr = tmp_v;
6aa20a22 724 DBG_PRINT(INIT_DBG,
776bd20f 725 "%s: Zero DMA address for TxDL. ", dev->name);
6aa20a22 726 DBG_PRINT(INIT_DBG,
6b4d617d 727 "Virtual address %p\n", tmp_v);
776bd20f 728 tmp_v = pci_alloc_consistent(nic->pdev,
729 PAGE_SIZE, &tmp_p);
730 if (!tmp_v) {
0c61ed5f 731 DBG_PRINT(INFO_DBG,
776bd20f 732 "pci_alloc_consistent ");
0c61ed5f 733 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
776bd20f 734 return -ENOMEM;
735 }
491976b2 736 mem_allocated += PAGE_SIZE;
776bd20f 737 }
1da177e4
LT
738 while (k < lst_per_page) {
739 int l = (j * lst_per_page) + k;
740 if (l == config->tx_cfg[i].fifo_len)
20346722
K
741 break;
742 mac_control->fifos[i].list_info[l].list_virt_addr =
1da177e4 743 tmp_v + (k * lst_size);
20346722 744 mac_control->fifos[i].list_info[l].list_phy_addr =
1da177e4
LT
745 tmp_p + (k * lst_size);
746 k++;
747 }
748 }
749 }
1da177e4 750
2fda096d
SR
751 for (i = 0; i < config->tx_fifo_num; i++) {
752 size = config->tx_cfg[i].fifo_len;
753 mac_control->fifos[i].ufo_in_band_v
754 = kcalloc(size, sizeof(u64), GFP_KERNEL);
755 if (!mac_control->fifos[i].ufo_in_band_v)
756 return -ENOMEM;
757 mem_allocated += (size * sizeof(u64));
758 }
fed5eccd 759
1da177e4
LT
760 /* Allocation and initialization of RXDs in Rings */
761 size = 0;
762 for (i = 0; i < config->rx_ring_num; i++) {
da6971d8
AR
763 if (config->rx_cfg[i].num_rxd %
764 (rxd_count[nic->rxd_mode] + 1)) {
1da177e4
LT
765 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
766 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
767 i);
768 DBG_PRINT(ERR_DBG, "RxDs per Block");
769 return FAILURE;
770 }
771 size += config->rx_cfg[i].num_rxd;
20346722 772 mac_control->rings[i].block_count =
da6971d8
AR
773 config->rx_cfg[i].num_rxd /
774 (rxd_count[nic->rxd_mode] + 1 );
775 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
776 mac_control->rings[i].block_count;
1da177e4 777 }
da6971d8 778 if (nic->rxd_mode == RXD_MODE_1)
1ee6dd77 779 size = (size * (sizeof(struct RxD1)));
da6971d8 780 else
1ee6dd77 781 size = (size * (sizeof(struct RxD3)));
1da177e4
LT
782
783 for (i = 0; i < config->rx_ring_num; i++) {
20346722
K
784 mac_control->rings[i].rx_curr_get_info.block_index = 0;
785 mac_control->rings[i].rx_curr_get_info.offset = 0;
786 mac_control->rings[i].rx_curr_get_info.ring_len =
1da177e4 787 config->rx_cfg[i].num_rxd - 1;
20346722
K
788 mac_control->rings[i].rx_curr_put_info.block_index = 0;
789 mac_control->rings[i].rx_curr_put_info.offset = 0;
790 mac_control->rings[i].rx_curr_put_info.ring_len =
1da177e4 791 config->rx_cfg[i].num_rxd - 1;
20346722
K
792 mac_control->rings[i].nic = nic;
793 mac_control->rings[i].ring_no = i;
0425b46a 794 mac_control->rings[i].lro = lro_enable;
20346722 795
da6971d8
AR
796 blk_cnt = config->rx_cfg[i].num_rxd /
797 (rxd_count[nic->rxd_mode] + 1);
1da177e4
LT
798 /* Allocating all the Rx blocks */
799 for (j = 0; j < blk_cnt; j++) {
1ee6dd77 800 struct rx_block_info *rx_blocks;
da6971d8
AR
801 int l;
802
803 rx_blocks = &mac_control->rings[i].rx_blocks[j];
804 size = SIZE_OF_BLOCK; //size is always page size
1da177e4
LT
805 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
806 &tmp_p_addr);
807 if (tmp_v_addr == NULL) {
808 /*
20346722
K
809 * In case of failure, free_shared_mem()
810 * is called, which should free any
811 * memory that was alloced till the
1da177e4
LT
812 * failure happened.
813 */
da6971d8 814 rx_blocks->block_virt_addr = tmp_v_addr;
1da177e4
LT
815 return -ENOMEM;
816 }
491976b2 817 mem_allocated += size;
1da177e4 818 memset(tmp_v_addr, 0, size);
da6971d8
AR
819 rx_blocks->block_virt_addr = tmp_v_addr;
820 rx_blocks->block_dma_addr = tmp_p_addr;
1ee6dd77 821 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
da6971d8
AR
822 rxd_count[nic->rxd_mode],
823 GFP_KERNEL);
372cc597
SS
824 if (!rx_blocks->rxds)
825 return -ENOMEM;
8a4bdbaa 826 mem_allocated +=
491976b2 827 (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
da6971d8
AR
828 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
829 rx_blocks->rxds[l].virt_addr =
830 rx_blocks->block_virt_addr +
831 (rxd_size[nic->rxd_mode] * l);
832 rx_blocks->rxds[l].dma_addr =
833 rx_blocks->block_dma_addr +
834 (rxd_size[nic->rxd_mode] * l);
835 }
1da177e4
LT
836 }
837 /* Interlinking all Rx Blocks */
838 for (j = 0; j < blk_cnt; j++) {
20346722
K
839 tmp_v_addr =
840 mac_control->rings[i].rx_blocks[j].block_virt_addr;
1da177e4 841 tmp_v_addr_next =
20346722 842 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4 843 blk_cnt].block_virt_addr;
20346722
K
844 tmp_p_addr =
845 mac_control->rings[i].rx_blocks[j].block_dma_addr;
1da177e4 846 tmp_p_addr_next =
20346722 847 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4
LT
848 blk_cnt].block_dma_addr;
849
1ee6dd77 850 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
1da177e4
LT
851 pre_rxd_blk->reserved_2_pNext_RxD_block =
852 (unsigned long) tmp_v_addr_next;
1da177e4
LT
853 pre_rxd_blk->pNext_RxD_Blk_physical =
854 (u64) tmp_p_addr_next;
855 }
856 }
6d517a27 857 if (nic->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
858 /*
859 * Allocation of Storages for buffer addresses in 2BUFF mode
860 * and the buffers as well.
861 */
862 for (i = 0; i < config->rx_ring_num; i++) {
863 blk_cnt = config->rx_cfg[i].num_rxd /
864 (rxd_count[nic->rxd_mode]+ 1);
865 mac_control->rings[i].ba =
1ee6dd77 866 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
1da177e4 867 GFP_KERNEL);
da6971d8 868 if (!mac_control->rings[i].ba)
1da177e4 869 return -ENOMEM;
491976b2 870 mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
da6971d8
AR
871 for (j = 0; j < blk_cnt; j++) {
872 int k = 0;
873 mac_control->rings[i].ba[j] =
1ee6dd77 874 kmalloc((sizeof(struct buffAdd) *
da6971d8
AR
875 (rxd_count[nic->rxd_mode] + 1)),
876 GFP_KERNEL);
877 if (!mac_control->rings[i].ba[j])
1da177e4 878 return -ENOMEM;
491976b2
SH
879 mem_allocated += (sizeof(struct buffAdd) * \
880 (rxd_count[nic->rxd_mode] + 1));
da6971d8
AR
881 while (k != rxd_count[nic->rxd_mode]) {
882 ba = &mac_control->rings[i].ba[j][k];
883
884 ba->ba_0_org = (void *) kmalloc
885 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
886 if (!ba->ba_0_org)
887 return -ENOMEM;
8a4bdbaa 888 mem_allocated +=
491976b2 889 (BUF0_LEN + ALIGN_SIZE);
da6971d8
AR
890 tmp = (unsigned long)ba->ba_0_org;
891 tmp += ALIGN_SIZE;
892 tmp &= ~((unsigned long) ALIGN_SIZE);
893 ba->ba_0 = (void *) tmp;
894
895 ba->ba_1_org = (void *) kmalloc
896 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
897 if (!ba->ba_1_org)
898 return -ENOMEM;
8a4bdbaa 899 mem_allocated
491976b2 900 += (BUF1_LEN + ALIGN_SIZE);
da6971d8
AR
901 tmp = (unsigned long) ba->ba_1_org;
902 tmp += ALIGN_SIZE;
903 tmp &= ~((unsigned long) ALIGN_SIZE);
904 ba->ba_1 = (void *) tmp;
905 k++;
906 }
1da177e4
LT
907 }
908 }
909 }
1da177e4
LT
910
911 /* Allocation and initialization of Statistics block */
1ee6dd77 912 size = sizeof(struct stat_block);
1da177e4
LT
913 mac_control->stats_mem = pci_alloc_consistent
914 (nic->pdev, size, &mac_control->stats_mem_phy);
915
916 if (!mac_control->stats_mem) {
20346722
K
917 /*
918 * In case of failure, free_shared_mem() is called, which
919 * should free any memory that was alloced till the
1da177e4
LT
920 * failure happened.
921 */
922 return -ENOMEM;
923 }
491976b2 924 mem_allocated += size;
1da177e4
LT
925 mac_control->stats_mem_sz = size;
926
927 tmp_v_addr = mac_control->stats_mem;
1ee6dd77 928 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
1da177e4 929 memset(tmp_v_addr, 0, size);
1da177e4
LT
930 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
931 (unsigned long long) tmp_p_addr);
491976b2 932 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
1da177e4
LT
933 return SUCCESS;
934}
935
20346722
K
936/**
937 * free_shared_mem - Free the allocated Memory
1da177e4
LT
938 * @nic: Device private variable.
939 * Description: This function is to free all memory locations allocated by
940 * the init_shared_mem() function and return it to the kernel.
941 */
942
943static void free_shared_mem(struct s2io_nic *nic)
944{
945 int i, j, blk_cnt, size;
946 void *tmp_v_addr;
947 dma_addr_t tmp_p_addr;
1ee6dd77 948 struct mac_info *mac_control;
1da177e4
LT
949 struct config_param *config;
950 int lst_size, lst_per_page;
8910b49f 951 struct net_device *dev;
491976b2 952 int page_num = 0;
1da177e4
LT
953
954 if (!nic)
955 return;
956
8910b49f
MG
957 dev = nic->dev;
958
1da177e4
LT
959 mac_control = &nic->mac_control;
960 config = &nic->config;
961
1ee6dd77 962 lst_size = (sizeof(struct TxD) * config->max_txds);
1da177e4
LT
963 lst_per_page = PAGE_SIZE / lst_size;
964
965 for (i = 0; i < config->tx_fifo_num; i++) {
491976b2
SH
966 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
967 lst_per_page);
1da177e4
LT
968 for (j = 0; j < page_num; j++) {
969 int mem_blks = (j * lst_per_page);
776bd20f 970 if (!mac_control->fifos[i].list_info)
6aa20a22 971 return;
776bd20f 972 if (!mac_control->fifos[i].list_info[mem_blks].
973 list_virt_addr)
1da177e4
LT
974 break;
975 pci_free_consistent(nic->pdev, PAGE_SIZE,
20346722
K
976 mac_control->fifos[i].
977 list_info[mem_blks].
1da177e4 978 list_virt_addr,
20346722
K
979 mac_control->fifos[i].
980 list_info[mem_blks].
1da177e4 981 list_phy_addr);
8a4bdbaa 982 nic->mac_control.stats_info->sw_stat.mem_freed
491976b2 983 += PAGE_SIZE;
1da177e4 984 }
776bd20f 985 /* If we got a zero DMA address during allocation,
986 * free the page now
987 */
988 if (mac_control->zerodma_virt_addr) {
989 pci_free_consistent(nic->pdev, PAGE_SIZE,
990 mac_control->zerodma_virt_addr,
991 (dma_addr_t)0);
6aa20a22 992 DBG_PRINT(INIT_DBG,
6b4d617d
AM
993 "%s: Freeing TxDL with zero DMA addr. ",
994 dev->name);
995 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
996 mac_control->zerodma_virt_addr);
8a4bdbaa 997 nic->mac_control.stats_info->sw_stat.mem_freed
491976b2 998 += PAGE_SIZE;
776bd20f 999 }
20346722 1000 kfree(mac_control->fifos[i].list_info);
8a4bdbaa 1001 nic->mac_control.stats_info->sw_stat.mem_freed +=
491976b2 1002 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
1da177e4
LT
1003 }
1004
1da177e4 1005 size = SIZE_OF_BLOCK;
1da177e4 1006 for (i = 0; i < config->rx_ring_num; i++) {
20346722 1007 blk_cnt = mac_control->rings[i].block_count;
1da177e4 1008 for (j = 0; j < blk_cnt; j++) {
20346722
K
1009 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
1010 block_virt_addr;
1011 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
1012 block_dma_addr;
1da177e4
LT
1013 if (tmp_v_addr == NULL)
1014 break;
1015 pci_free_consistent(nic->pdev, size,
1016 tmp_v_addr, tmp_p_addr);
491976b2 1017 nic->mac_control.stats_info->sw_stat.mem_freed += size;
da6971d8 1018 kfree(mac_control->rings[i].rx_blocks[j].rxds);
8a4bdbaa 1019 nic->mac_control.stats_info->sw_stat.mem_freed +=
491976b2 1020 ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
1da177e4
LT
1021 }
1022 }
1023
6d517a27 1024 if (nic->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
1025 /* Freeing buffer storage addresses in 2BUFF mode. */
1026 for (i = 0; i < config->rx_ring_num; i++) {
1027 blk_cnt = config->rx_cfg[i].num_rxd /
1028 (rxd_count[nic->rxd_mode] + 1);
1029 for (j = 0; j < blk_cnt; j++) {
1030 int k = 0;
1031 if (!mac_control->rings[i].ba[j])
1032 continue;
1033 while (k != rxd_count[nic->rxd_mode]) {
1ee6dd77 1034 struct buffAdd *ba =
da6971d8
AR
1035 &mac_control->rings[i].ba[j][k];
1036 kfree(ba->ba_0_org);
491976b2
SH
1037 nic->mac_control.stats_info->sw_stat.\
1038 mem_freed += (BUF0_LEN + ALIGN_SIZE);
da6971d8 1039 kfree(ba->ba_1_org);
491976b2
SH
1040 nic->mac_control.stats_info->sw_stat.\
1041 mem_freed += (BUF1_LEN + ALIGN_SIZE);
da6971d8
AR
1042 k++;
1043 }
1044 kfree(mac_control->rings[i].ba[j]);
9caab458
SS
1045 nic->mac_control.stats_info->sw_stat.mem_freed +=
1046 (sizeof(struct buffAdd) *
1047 (rxd_count[nic->rxd_mode] + 1));
1da177e4 1048 }
da6971d8 1049 kfree(mac_control->rings[i].ba);
8a4bdbaa 1050 nic->mac_control.stats_info->sw_stat.mem_freed +=
491976b2 1051 (sizeof(struct buffAdd *) * blk_cnt);
1da177e4 1052 }
1da177e4 1053 }
1da177e4 1054
2fda096d
SR
1055 for (i = 0; i < nic->config.tx_fifo_num; i++) {
1056 if (mac_control->fifos[i].ufo_in_band_v) {
1057 nic->mac_control.stats_info->sw_stat.mem_freed
1058 += (config->tx_cfg[i].fifo_len * sizeof(u64));
1059 kfree(mac_control->fifos[i].ufo_in_band_v);
1060 }
1061 }
1062
1da177e4 1063 if (mac_control->stats_mem) {
2fda096d
SR
1064 nic->mac_control.stats_info->sw_stat.mem_freed +=
1065 mac_control->stats_mem_sz;
1da177e4
LT
1066 pci_free_consistent(nic->pdev,
1067 mac_control->stats_mem_sz,
1068 mac_control->stats_mem,
1069 mac_control->stats_mem_phy);
491976b2 1070 }
1da177e4
LT
1071}
1072
541ae68f
K
1073/**
1074 * s2io_verify_pci_mode -
1075 */
1076
1ee6dd77 1077static int s2io_verify_pci_mode(struct s2io_nic *nic)
541ae68f 1078{
1ee6dd77 1079 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f
K
1080 register u64 val64 = 0;
1081 int mode;
1082
1083 val64 = readq(&bar0->pci_mode);
1084 mode = (u8)GET_PCI_MODE(val64);
1085
1086 if ( val64 & PCI_MODE_UNKNOWN_MODE)
1087 return -1; /* Unknown PCI mode */
1088 return mode;
1089}
1090
c92ca04b
AR
1091#define NEC_VENID 0x1033
1092#define NEC_DEVID 0x0125
1093static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1094{
1095 struct pci_dev *tdev = NULL;
26d36b64
AC
1096 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1097 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
7ad62dbc 1098 if (tdev->bus == s2io_pdev->bus->parent) {
26d36b64 1099 pci_dev_put(tdev);
c92ca04b 1100 return 1;
7ad62dbc 1101 }
c92ca04b
AR
1102 }
1103 }
1104 return 0;
1105}
541ae68f 1106
7b32a312 1107static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
541ae68f
K
1108/**
1109 * s2io_print_pci_mode -
1110 */
1ee6dd77 1111static int s2io_print_pci_mode(struct s2io_nic *nic)
541ae68f 1112{
1ee6dd77 1113 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f
K
1114 register u64 val64 = 0;
1115 int mode;
1116 struct config_param *config = &nic->config;
1117
1118 val64 = readq(&bar0->pci_mode);
1119 mode = (u8)GET_PCI_MODE(val64);
1120
1121 if ( val64 & PCI_MODE_UNKNOWN_MODE)
1122 return -1; /* Unknown PCI mode */
1123
c92ca04b
AR
1124 config->bus_speed = bus_speed[mode];
1125
1126 if (s2io_on_nec_bridge(nic->pdev)) {
1127 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1128 nic->dev->name);
1129 return mode;
1130 }
1131
541ae68f
K
1132 if (val64 & PCI_MODE_32_BITS) {
1133 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1134 } else {
1135 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1136 }
1137
1138 switch(mode) {
1139 case PCI_MODE_PCI_33:
1140 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
541ae68f
K
1141 break;
1142 case PCI_MODE_PCI_66:
1143 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
541ae68f
K
1144 break;
1145 case PCI_MODE_PCIX_M1_66:
1146 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
541ae68f
K
1147 break;
1148 case PCI_MODE_PCIX_M1_100:
1149 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
541ae68f
K
1150 break;
1151 case PCI_MODE_PCIX_M1_133:
1152 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
541ae68f
K
1153 break;
1154 case PCI_MODE_PCIX_M2_66:
1155 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
541ae68f
K
1156 break;
1157 case PCI_MODE_PCIX_M2_100:
1158 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
541ae68f
K
1159 break;
1160 case PCI_MODE_PCIX_M2_133:
1161 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
541ae68f
K
1162 break;
1163 default:
1164 return -1; /* Unsupported bus speed */
1165 }
1166
1167 return mode;
1168}
1169
b7c5678f
RV
1170/**
1171 * init_tti - Initialization transmit traffic interrupt scheme
1172 * @nic: device private variable
1173 * @link: link status (UP/DOWN) used to enable/disable continuous
1174 * transmit interrupts
1175 * Description: The function configures transmit traffic interrupts
1176 * Return Value: SUCCESS on success and
1177 * '-1' on failure
1178 */
1179
0d66afe7 1180static int init_tti(struct s2io_nic *nic, int link)
b7c5678f
RV
1181{
1182 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1183 register u64 val64 = 0;
1184 int i;
1185 struct config_param *config;
1186
1187 config = &nic->config;
1188
1189 for (i = 0; i < config->tx_fifo_num; i++) {
1190 /*
1191 * TTI Initialization. Default Tx timer gets us about
1192 * 250 interrupts per sec. Continuous interrupts are enabled
1193 * by default.
1194 */
1195 if (nic->device_type == XFRAME_II_DEVICE) {
1196 int count = (nic->config.bus_speed * 125)/2;
1197 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1198 } else
1199 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1200
1201 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1202 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1203 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1204 TTI_DATA1_MEM_TX_TIMER_AC_EN;
ac731ab6
SH
1205 if (i == 0)
1206 if (use_continuous_tx_intrs && (link == LINK_UP))
1207 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
b7c5678f
RV
1208 writeq(val64, &bar0->tti_data1_mem);
1209
ac731ab6
SH
1210 if (nic->config.intr_type == MSI_X) {
1211 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1212 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1213 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1214 TTI_DATA2_MEM_TX_UFC_D(0x300);
1215 } else {
1216 if ((nic->config.tx_steering_type ==
1217 TX_DEFAULT_STEERING) &&
1218 (config->tx_fifo_num > 1) &&
1219 (i >= nic->udp_fifo_idx) &&
1220 (i < (nic->udp_fifo_idx +
1221 nic->total_udp_fifos)))
1222 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1223 TTI_DATA2_MEM_TX_UFC_B(0x80) |
1224 TTI_DATA2_MEM_TX_UFC_C(0x100) |
1225 TTI_DATA2_MEM_TX_UFC_D(0x120);
1226 else
1227 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1228 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1229 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1230 TTI_DATA2_MEM_TX_UFC_D(0x80);
1231 }
b7c5678f
RV
1232
1233 writeq(val64, &bar0->tti_data2_mem);
1234
1235 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD |
1236 TTI_CMD_MEM_OFFSET(i);
1237 writeq(val64, &bar0->tti_command_mem);
1238
1239 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1240 TTI_CMD_MEM_STROBE_NEW_CMD, S2IO_BIT_RESET) != SUCCESS)
1241 return FAILURE;
1242 }
1243
1244 return SUCCESS;
1245}
1246
20346722
K
1247/**
1248 * init_nic - Initialization of hardware
b7c5678f 1249 * @nic: device private variable
20346722
K
1250 * Description: The function sequentially configures every block
1251 * of the H/W from their reset values.
1252 * Return Value: SUCCESS on success and
1da177e4
LT
1253 * '-1' on failure (endian settings incorrect).
1254 */
1255
1256static int init_nic(struct s2io_nic *nic)
1257{
1ee6dd77 1258 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
1259 struct net_device *dev = nic->dev;
1260 register u64 val64 = 0;
1261 void __iomem *add;
1262 u32 time;
1263 int i, j;
1ee6dd77 1264 struct mac_info *mac_control;
1da177e4 1265 struct config_param *config;
c92ca04b 1266 int dtx_cnt = 0;
1da177e4 1267 unsigned long long mem_share;
20346722 1268 int mem_size;
1da177e4
LT
1269
1270 mac_control = &nic->mac_control;
1271 config = &nic->config;
1272
5e25b9dd 1273 /* to set the swapper controle on the card */
20346722 1274 if(s2io_set_swapper(nic)) {
1da177e4 1275 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
9f74ffde 1276 return -EIO;
1da177e4
LT
1277 }
1278
541ae68f
K
1279 /*
1280 * Herc requires EOI to be removed from reset before XGXS, so..
1281 */
1282 if (nic->device_type & XFRAME_II_DEVICE) {
1283 val64 = 0xA500000000ULL;
1284 writeq(val64, &bar0->sw_reset);
1285 msleep(500);
1286 val64 = readq(&bar0->sw_reset);
1287 }
1288
1da177e4
LT
1289 /* Remove XGXS from reset state */
1290 val64 = 0;
1291 writeq(val64, &bar0->sw_reset);
1da177e4 1292 msleep(500);
20346722 1293 val64 = readq(&bar0->sw_reset);
1da177e4 1294
7962024e
SH
1295 /* Ensure that it's safe to access registers by checking
1296 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1297 */
1298 if (nic->device_type == XFRAME_II_DEVICE) {
1299 for (i = 0; i < 50; i++) {
1300 val64 = readq(&bar0->adapter_status);
1301 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1302 break;
1303 msleep(10);
1304 }
1305 if (i == 50)
1306 return -ENODEV;
1307 }
1308
1da177e4
LT
1309 /* Enable Receiving broadcasts */
1310 add = &bar0->mac_cfg;
1311 val64 = readq(&bar0->mac_cfg);
1312 val64 |= MAC_RMAC_BCAST_ENABLE;
1313 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1314 writel((u32) val64, add);
1315 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1316 writel((u32) (val64 >> 32), (add + 4));
1317
1318 /* Read registers in all blocks */
1319 val64 = readq(&bar0->mac_int_mask);
1320 val64 = readq(&bar0->mc_int_mask);
1321 val64 = readq(&bar0->xgxs_int_mask);
1322
1323 /* Set MTU */
1324 val64 = dev->mtu;
1325 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1326
541ae68f
K
1327 if (nic->device_type & XFRAME_II_DEVICE) {
1328 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
303bcb4b 1329 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1da177e4 1330 &bar0->dtx_control, UF);
541ae68f
K
1331 if (dtx_cnt & 0x1)
1332 msleep(1); /* Necessary!! */
1da177e4
LT
1333 dtx_cnt++;
1334 }
541ae68f 1335 } else {
c92ca04b
AR
1336 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1337 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1338 &bar0->dtx_control, UF);
1339 val64 = readq(&bar0->dtx_control);
1340 dtx_cnt++;
1da177e4
LT
1341 }
1342 }
1343
1344 /* Tx DMA Initialization */
1345 val64 = 0;
1346 writeq(val64, &bar0->tx_fifo_partition_0);
1347 writeq(val64, &bar0->tx_fifo_partition_1);
1348 writeq(val64, &bar0->tx_fifo_partition_2);
1349 writeq(val64, &bar0->tx_fifo_partition_3);
1350
1351
1352 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1353 val64 |=
b7c5678f 1354 vBIT(config->tx_cfg[i].fifo_len - 1, ((j * 32) + 19),
1da177e4 1355 13) | vBIT(config->tx_cfg[i].fifo_priority,
b7c5678f 1356 ((j * 32) + 5), 3);
1da177e4
LT
1357
1358 if (i == (config->tx_fifo_num - 1)) {
1359 if (i % 2 == 0)
1360 i++;
1361 }
1362
1363 switch (i) {
1364 case 1:
1365 writeq(val64, &bar0->tx_fifo_partition_0);
1366 val64 = 0;
b7c5678f 1367 j = 0;
1da177e4
LT
1368 break;
1369 case 3:
1370 writeq(val64, &bar0->tx_fifo_partition_1);
1371 val64 = 0;
b7c5678f 1372 j = 0;
1da177e4
LT
1373 break;
1374 case 5:
1375 writeq(val64, &bar0->tx_fifo_partition_2);
1376 val64 = 0;
b7c5678f 1377 j = 0;
1da177e4
LT
1378 break;
1379 case 7:
1380 writeq(val64, &bar0->tx_fifo_partition_3);
b7c5678f
RV
1381 val64 = 0;
1382 j = 0;
1383 break;
1384 default:
1385 j++;
1da177e4
LT
1386 break;
1387 }
1388 }
1389
5e25b9dd
K
1390 /*
1391 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1392 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1393 */
541ae68f 1394 if ((nic->device_type == XFRAME_I_DEVICE) &&
44c10138 1395 (nic->pdev->revision < 4))
5e25b9dd
K
1396 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1397
1da177e4
LT
1398 val64 = readq(&bar0->tx_fifo_partition_0);
1399 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1400 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1401
20346722
K
1402 /*
1403 * Initialization of Tx_PA_CONFIG register to ignore packet
1da177e4
LT
1404 * integrity checking.
1405 */
1406 val64 = readq(&bar0->tx_pa_cfg);
1407 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1408 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1409 writeq(val64, &bar0->tx_pa_cfg);
1410
1411 /* Rx DMA intialization. */
1412 val64 = 0;
1413 for (i = 0; i < config->rx_ring_num; i++) {
1414 val64 |=
1415 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1416 3);
1417 }
1418 writeq(val64, &bar0->rx_queue_priority);
1419
20346722
K
1420 /*
1421 * Allocating equal share of memory to all the
1da177e4
LT
1422 * configured Rings.
1423 */
1424 val64 = 0;
541ae68f
K
1425 if (nic->device_type & XFRAME_II_DEVICE)
1426 mem_size = 32;
1427 else
1428 mem_size = 64;
1429
1da177e4
LT
1430 for (i = 0; i < config->rx_ring_num; i++) {
1431 switch (i) {
1432 case 0:
20346722
K
1433 mem_share = (mem_size / config->rx_ring_num +
1434 mem_size % config->rx_ring_num);
1da177e4
LT
1435 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1436 continue;
1437 case 1:
20346722 1438 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1439 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1440 continue;
1441 case 2:
20346722 1442 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1443 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1444 continue;
1445 case 3:
20346722 1446 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1447 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1448 continue;
1449 case 4:
20346722 1450 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1451 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1452 continue;
1453 case 5:
20346722 1454 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1455 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1456 continue;
1457 case 6:
20346722 1458 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1459 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1460 continue;
1461 case 7:
20346722 1462 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1463 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1464 continue;
1465 }
1466 }
1467 writeq(val64, &bar0->rx_queue_cfg);
1468
20346722 1469 /*
5e25b9dd 1470 * Filling Tx round robin registers
b7c5678f 1471 * as per the number of FIFOs for equal scheduling priority
1da177e4 1472 */
5e25b9dd
K
1473 switch (config->tx_fifo_num) {
1474 case 1:
b7c5678f 1475 val64 = 0x0;
5e25b9dd
K
1476 writeq(val64, &bar0->tx_w_round_robin_0);
1477 writeq(val64, &bar0->tx_w_round_robin_1);
1478 writeq(val64, &bar0->tx_w_round_robin_2);
1479 writeq(val64, &bar0->tx_w_round_robin_3);
1480 writeq(val64, &bar0->tx_w_round_robin_4);
1481 break;
1482 case 2:
b7c5678f 1483 val64 = 0x0001000100010001ULL;
5e25b9dd 1484 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1485 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1486 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1487 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1488 val64 = 0x0001000100000000ULL;
5e25b9dd
K
1489 writeq(val64, &bar0->tx_w_round_robin_4);
1490 break;
1491 case 3:
b7c5678f 1492 val64 = 0x0001020001020001ULL;
5e25b9dd 1493 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1494 val64 = 0x0200010200010200ULL;
5e25b9dd 1495 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1496 val64 = 0x0102000102000102ULL;
5e25b9dd 1497 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1498 val64 = 0x0001020001020001ULL;
5e25b9dd 1499 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1500 val64 = 0x0200010200000000ULL;
5e25b9dd
K
1501 writeq(val64, &bar0->tx_w_round_robin_4);
1502 break;
1503 case 4:
b7c5678f 1504 val64 = 0x0001020300010203ULL;
5e25b9dd 1505 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1506 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1507 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1508 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1509 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1510 writeq(val64, &bar0->tx_w_round_robin_4);
1511 break;
1512 case 5:
b7c5678f 1513 val64 = 0x0001020304000102ULL;
5e25b9dd 1514 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1515 val64 = 0x0304000102030400ULL;
5e25b9dd 1516 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1517 val64 = 0x0102030400010203ULL;
5e25b9dd 1518 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1519 val64 = 0x0400010203040001ULL;
5e25b9dd 1520 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1521 val64 = 0x0203040000000000ULL;
5e25b9dd
K
1522 writeq(val64, &bar0->tx_w_round_robin_4);
1523 break;
1524 case 6:
b7c5678f 1525 val64 = 0x0001020304050001ULL;
5e25b9dd 1526 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1527 val64 = 0x0203040500010203ULL;
5e25b9dd 1528 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1529 val64 = 0x0405000102030405ULL;
5e25b9dd 1530 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1531 val64 = 0x0001020304050001ULL;
5e25b9dd 1532 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1533 val64 = 0x0203040500000000ULL;
5e25b9dd
K
1534 writeq(val64, &bar0->tx_w_round_robin_4);
1535 break;
1536 case 7:
b7c5678f 1537 val64 = 0x0001020304050600ULL;
5e25b9dd 1538 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1539 val64 = 0x0102030405060001ULL;
5e25b9dd 1540 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1541 val64 = 0x0203040506000102ULL;
5e25b9dd 1542 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1543 val64 = 0x0304050600010203ULL;
5e25b9dd 1544 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1545 val64 = 0x0405060000000000ULL;
5e25b9dd
K
1546 writeq(val64, &bar0->tx_w_round_robin_4);
1547 break;
1548 case 8:
b7c5678f 1549 val64 = 0x0001020304050607ULL;
5e25b9dd 1550 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1551 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1552 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1553 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1554 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1555 writeq(val64, &bar0->tx_w_round_robin_4);
1556 break;
1557 }
1558
b41477f3 1559 /* Enable all configured Tx FIFO partitions */
5d3213cc
AR
1560 val64 = readq(&bar0->tx_fifo_partition_0);
1561 val64 |= (TX_FIFO_PARTITION_EN);
1562 writeq(val64, &bar0->tx_fifo_partition_0);
1563
5e25b9dd 1564 /* Filling the Rx round robin registers as per the
0425b46a
SH
1565 * number of Rings and steering based on QoS with
1566 * equal priority.
1567 */
5e25b9dd
K
1568 switch (config->rx_ring_num) {
1569 case 1:
0425b46a
SH
1570 val64 = 0x0;
1571 writeq(val64, &bar0->rx_w_round_robin_0);
1572 writeq(val64, &bar0->rx_w_round_robin_1);
1573 writeq(val64, &bar0->rx_w_round_robin_2);
1574 writeq(val64, &bar0->rx_w_round_robin_3);
1575 writeq(val64, &bar0->rx_w_round_robin_4);
1576
5e25b9dd
K
1577 val64 = 0x8080808080808080ULL;
1578 writeq(val64, &bar0->rts_qos_steering);
1579 break;
1580 case 2:
0425b46a 1581 val64 = 0x0001000100010001ULL;
5e25b9dd 1582 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1583 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1584 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1585 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1586 val64 = 0x0001000100000000ULL;
5e25b9dd
K
1587 writeq(val64, &bar0->rx_w_round_robin_4);
1588
1589 val64 = 0x8080808040404040ULL;
1590 writeq(val64, &bar0->rts_qos_steering);
1591 break;
1592 case 3:
0425b46a 1593 val64 = 0x0001020001020001ULL;
5e25b9dd 1594 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1595 val64 = 0x0200010200010200ULL;
5e25b9dd 1596 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1597 val64 = 0x0102000102000102ULL;
5e25b9dd 1598 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1599 val64 = 0x0001020001020001ULL;
5e25b9dd 1600 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1601 val64 = 0x0200010200000000ULL;
5e25b9dd
K
1602 writeq(val64, &bar0->rx_w_round_robin_4);
1603
1604 val64 = 0x8080804040402020ULL;
1605 writeq(val64, &bar0->rts_qos_steering);
1606 break;
1607 case 4:
0425b46a 1608 val64 = 0x0001020300010203ULL;
5e25b9dd 1609 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1610 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1611 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1612 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1613 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1614 writeq(val64, &bar0->rx_w_round_robin_4);
1615
1616 val64 = 0x8080404020201010ULL;
1617 writeq(val64, &bar0->rts_qos_steering);
1618 break;
1619 case 5:
0425b46a 1620 val64 = 0x0001020304000102ULL;
5e25b9dd 1621 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1622 val64 = 0x0304000102030400ULL;
5e25b9dd 1623 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1624 val64 = 0x0102030400010203ULL;
5e25b9dd 1625 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1626 val64 = 0x0400010203040001ULL;
5e25b9dd 1627 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1628 val64 = 0x0203040000000000ULL;
5e25b9dd
K
1629 writeq(val64, &bar0->rx_w_round_robin_4);
1630
1631 val64 = 0x8080404020201008ULL;
1632 writeq(val64, &bar0->rts_qos_steering);
1633 break;
1634 case 6:
0425b46a 1635 val64 = 0x0001020304050001ULL;
5e25b9dd 1636 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1637 val64 = 0x0203040500010203ULL;
5e25b9dd 1638 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1639 val64 = 0x0405000102030405ULL;
5e25b9dd 1640 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1641 val64 = 0x0001020304050001ULL;
5e25b9dd 1642 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1643 val64 = 0x0203040500000000ULL;
5e25b9dd
K
1644 writeq(val64, &bar0->rx_w_round_robin_4);
1645
1646 val64 = 0x8080404020100804ULL;
1647 writeq(val64, &bar0->rts_qos_steering);
1648 break;
1649 case 7:
0425b46a 1650 val64 = 0x0001020304050600ULL;
5e25b9dd 1651 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1652 val64 = 0x0102030405060001ULL;
5e25b9dd 1653 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1654 val64 = 0x0203040506000102ULL;
5e25b9dd 1655 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1656 val64 = 0x0304050600010203ULL;
5e25b9dd 1657 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1658 val64 = 0x0405060000000000ULL;
5e25b9dd
K
1659 writeq(val64, &bar0->rx_w_round_robin_4);
1660
1661 val64 = 0x8080402010080402ULL;
1662 writeq(val64, &bar0->rts_qos_steering);
1663 break;
1664 case 8:
0425b46a 1665 val64 = 0x0001020304050607ULL;
5e25b9dd 1666 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1667 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1668 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1669 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1670 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1671 writeq(val64, &bar0->rx_w_round_robin_4);
1672
1673 val64 = 0x8040201008040201ULL;
1674 writeq(val64, &bar0->rts_qos_steering);
1675 break;
1676 }
1da177e4
LT
1677
1678 /* UDP Fix */
1679 val64 = 0;
20346722 1680 for (i = 0; i < 8; i++)
1da177e4
LT
1681 writeq(val64, &bar0->rts_frm_len_n[i]);
1682
5e25b9dd
K
1683 /* Set the default rts frame length for the rings configured */
1684 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1685 for (i = 0 ; i < config->rx_ring_num ; i++)
1686 writeq(val64, &bar0->rts_frm_len_n[i]);
1687
1688 /* Set the frame length for the configured rings
1689 * desired by the user
1690 */
1691 for (i = 0; i < config->rx_ring_num; i++) {
1692 /* If rts_frm_len[i] == 0 then it is assumed that user not
1693 * specified frame length steering.
1694 * If the user provides the frame length then program
1695 * the rts_frm_len register for those values or else
1696 * leave it as it is.
1697 */
1698 if (rts_frm_len[i] != 0) {
1699 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1700 &bar0->rts_frm_len_n[i]);
1701 }
1702 }
8a4bdbaa 1703
9fc93a41
SS
1704 /* Disable differentiated services steering logic */
1705 for (i = 0; i < 64; i++) {
1706 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1707 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1708 dev->name);
1709 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
9f74ffde 1710 return -ENODEV;
9fc93a41
SS
1711 }
1712 }
1713
20346722 1714 /* Program statistics memory */
1da177e4 1715 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1da177e4 1716
541ae68f
K
1717 if (nic->device_type == XFRAME_II_DEVICE) {
1718 val64 = STAT_BC(0x320);
1719 writeq(val64, &bar0->stat_byte_cnt);
1720 }
1721
20346722 1722 /*
1da177e4
LT
1723 * Initializing the sampling rate for the device to calculate the
1724 * bandwidth utilization.
1725 */
1726 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1727 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1728 writeq(val64, &bar0->mac_link_util);
1729
20346722
K
1730 /*
1731 * Initializing the Transmit and Receive Traffic Interrupt
1da177e4
LT
1732 * Scheme.
1733 */
1da177e4 1734
b7c5678f
RV
1735 /* Initialize TTI */
1736 if (SUCCESS != init_tti(nic, nic->last_link_state))
1737 return -ENODEV;
1da177e4 1738
8a4bdbaa
SS
1739 /* RTI Initialization */
1740 if (nic->device_type == XFRAME_II_DEVICE) {
541ae68f 1741 /*
8a4bdbaa
SS
1742 * Programmed to generate Apprx 500 Intrs per
1743 * second
1744 */
1745 int count = (nic->config.bus_speed * 125)/4;
1746 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1747 } else
1748 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1749 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1750 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1751 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1752
1753 writeq(val64, &bar0->rti_data1_mem);
1754
1755 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1756 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1757 if (nic->config.intr_type == MSI_X)
1758 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1759 RTI_DATA2_MEM_RX_UFC_D(0x40));
1760 else
1761 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1762 RTI_DATA2_MEM_RX_UFC_D(0x80));
1763 writeq(val64, &bar0->rti_data2_mem);
1da177e4 1764
8a4bdbaa
SS
1765 for (i = 0; i < config->rx_ring_num; i++) {
1766 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1767 | RTI_CMD_MEM_OFFSET(i);
1768 writeq(val64, &bar0->rti_command_mem);
1da177e4 1769
8a4bdbaa
SS
1770 /*
1771 * Once the operation completes, the Strobe bit of the
1772 * command register will be reset. We poll for this
1773 * particular condition. We wait for a maximum of 500ms
1774 * for the operation to complete, if it's not complete
1775 * by then we return error.
1776 */
1777 time = 0;
1778 while (TRUE) {
1779 val64 = readq(&bar0->rti_command_mem);
1780 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1781 break;
b6e3f982 1782
8a4bdbaa
SS
1783 if (time > 10) {
1784 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1785 dev->name);
9f74ffde 1786 return -ENODEV;
b6e3f982 1787 }
8a4bdbaa
SS
1788 time++;
1789 msleep(50);
1da177e4 1790 }
1da177e4
LT
1791 }
1792
20346722
K
1793 /*
1794 * Initializing proper values as Pause threshold into all
1da177e4
LT
1795 * the 8 Queues on Rx side.
1796 */
1797 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1798 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1799
1800 /* Disable RMAC PAD STRIPPING */
509a2671 1801 add = &bar0->mac_cfg;
1da177e4
LT
1802 val64 = readq(&bar0->mac_cfg);
1803 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1804 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1805 writel((u32) (val64), add);
1806 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1807 writel((u32) (val64 >> 32), (add + 4));
1808 val64 = readq(&bar0->mac_cfg);
1809
7d3d0439
RA
1810 /* Enable FCS stripping by adapter */
1811 add = &bar0->mac_cfg;
1812 val64 = readq(&bar0->mac_cfg);
1813 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1814 if (nic->device_type == XFRAME_II_DEVICE)
1815 writeq(val64, &bar0->mac_cfg);
1816 else {
1817 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1818 writel((u32) (val64), add);
1819 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1820 writel((u32) (val64 >> 32), (add + 4));
1821 }
1822
20346722
K
1823 /*
1824 * Set the time value to be inserted in the pause frame
1da177e4
LT
1825 * generated by xena.
1826 */
1827 val64 = readq(&bar0->rmac_pause_cfg);
1828 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1829 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1830 writeq(val64, &bar0->rmac_pause_cfg);
1831
20346722 1832 /*
1da177e4
LT
1833 * Set the Threshold Limit for Generating the pause frame
1834 * If the amount of data in any Queue exceeds ratio of
1835 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1836 * pause frame is generated
1837 */
1838 val64 = 0;
1839 for (i = 0; i < 4; i++) {
1840 val64 |=
1841 (((u64) 0xFF00 | nic->mac_control.
1842 mc_pause_threshold_q0q3)
1843 << (i * 2 * 8));
1844 }
1845 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1846
1847 val64 = 0;
1848 for (i = 0; i < 4; i++) {
1849 val64 |=
1850 (((u64) 0xFF00 | nic->mac_control.
1851 mc_pause_threshold_q4q7)
1852 << (i * 2 * 8));
1853 }
1854 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1855
20346722
K
1856 /*
1857 * TxDMA will stop Read request if the number of read split has
1da177e4
LT
1858 * exceeded the limit pointed by shared_splits
1859 */
1860 val64 = readq(&bar0->pic_control);
1861 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1862 writeq(val64, &bar0->pic_control);
1863
863c11a9
AR
1864 if (nic->config.bus_speed == 266) {
1865 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1866 writeq(0x0, &bar0->read_retry_delay);
1867 writeq(0x0, &bar0->write_retry_delay);
1868 }
1869
541ae68f
K
1870 /*
1871 * Programming the Herc to split every write transaction
1872 * that does not start on an ADB to reduce disconnects.
1873 */
1874 if (nic->device_type == XFRAME_II_DEVICE) {
19a60522
SS
1875 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1876 MISC_LINK_STABILITY_PRD(3);
863c11a9
AR
1877 writeq(val64, &bar0->misc_control);
1878 val64 = readq(&bar0->pic_control2);
b7b5a128 1879 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
863c11a9 1880 writeq(val64, &bar0->pic_control2);
541ae68f 1881 }
c92ca04b
AR
1882 if (strstr(nic->product_name, "CX4")) {
1883 val64 = TMAC_AVG_IPG(0x17);
1884 writeq(val64, &bar0->tmac_avg_ipg);
a371a07d
K
1885 }
1886
1da177e4
LT
1887 return SUCCESS;
1888}
a371a07d
K
1889#define LINK_UP_DOWN_INTERRUPT 1
1890#define MAC_RMAC_ERR_TIMER 2
1891
1ee6dd77 1892static int s2io_link_fault_indication(struct s2io_nic *nic)
a371a07d 1893{
eaae7f72 1894 if (nic->config.intr_type != INTA)
cc6e7c44 1895 return MAC_RMAC_ERR_TIMER;
a371a07d
K
1896 if (nic->device_type == XFRAME_II_DEVICE)
1897 return LINK_UP_DOWN_INTERRUPT;
1898 else
1899 return MAC_RMAC_ERR_TIMER;
1900}
8116f3cf 1901
9caab458
SS
1902/**
1903 * do_s2io_write_bits - update alarm bits in alarm register
1904 * @value: alarm bits
1905 * @flag: interrupt status
1906 * @addr: address value
1907 * Description: update alarm bits in alarm register
1908 * Return Value:
1909 * NONE.
1910 */
1911static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1912{
1913 u64 temp64;
1914
1915 temp64 = readq(addr);
1916
1917 if(flag == ENABLE_INTRS)
1918 temp64 &= ~((u64) value);
1919 else
1920 temp64 |= ((u64) value);
1921 writeq(temp64, addr);
1922}
1da177e4 1923
43b7c451 1924static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
9caab458
SS
1925{
1926 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1927 register u64 gen_int_mask = 0;
1928
1929 if (mask & TX_DMA_INTR) {
1930
1931 gen_int_mask |= TXDMA_INT_M;
1932
1933 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1934 TXDMA_PCC_INT | TXDMA_TTI_INT |
1935 TXDMA_LSO_INT | TXDMA_TPA_INT |
1936 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1937
1938 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1939 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1940 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1941 &bar0->pfc_err_mask);
1942
1943 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1944 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1945 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1946
1947 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1948 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1949 PCC_N_SERR | PCC_6_COF_OV_ERR |
1950 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1951 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1952 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1953
1954 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1955 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1956
1957 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1958 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1959 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1960 flag, &bar0->lso_err_mask);
1961
1962 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1963 flag, &bar0->tpa_err_mask);
1964
1965 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1966
1967 }
1968
1969 if (mask & TX_MAC_INTR) {
1970 gen_int_mask |= TXMAC_INT_M;
1971 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1972 &bar0->mac_int_mask);
1973 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1974 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1975 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1976 flag, &bar0->mac_tmac_err_mask);
1977 }
1978
1979 if (mask & TX_XGXS_INTR) {
1980 gen_int_mask |= TXXGXS_INT_M;
1981 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1982 &bar0->xgxs_int_mask);
1983 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1984 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1985 flag, &bar0->xgxs_txgxs_err_mask);
1986 }
1987
1988 if (mask & RX_DMA_INTR) {
1989 gen_int_mask |= RXDMA_INT_M;
1990 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1991 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1992 flag, &bar0->rxdma_int_mask);
1993 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1994 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1995 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1996 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1997 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1998 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1999 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
2000 &bar0->prc_pcix_err_mask);
2001 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
2002 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
2003 &bar0->rpa_err_mask);
2004 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
2005 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
2006 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
2007 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
2008 flag, &bar0->rda_err_mask);
2009 do_s2io_write_bits(RTI_SM_ERR_ALARM |
2010 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
2011 flag, &bar0->rti_err_mask);
2012 }
2013
2014 if (mask & RX_MAC_INTR) {
2015 gen_int_mask |= RXMAC_INT_M;
2016 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
2017 &bar0->mac_int_mask);
2018 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2019 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2020 RMAC_DOUBLE_ECC_ERR |
2021 RMAC_LINK_STATE_CHANGE_INT,
2022 flag, &bar0->mac_rmac_err_mask);
2023 }
2024
2025 if (mask & RX_XGXS_INTR)
2026 {
2027 gen_int_mask |= RXXGXS_INT_M;
2028 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
2029 &bar0->xgxs_int_mask);
2030 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
2031 &bar0->xgxs_rxgxs_err_mask);
2032 }
2033
2034 if (mask & MC_INTR) {
2035 gen_int_mask |= MC_INT_M;
2036 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
2037 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
2038 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2039 &bar0->mc_err_mask);
2040 }
2041 nic->general_int_mask = gen_int_mask;
2042
2043 /* Remove this line when alarm interrupts are enabled */
2044 nic->general_int_mask = 0;
2045}
20346722
K
2046/**
2047 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1da177e4
LT
2048 * @nic: device private variable,
2049 * @mask: A mask indicating which Intr block must be modified and,
2050 * @flag: A flag indicating whether to enable or disable the Intrs.
2051 * Description: This function will either disable or enable the interrupts
20346722
K
2052 * depending on the flag argument. The mask argument can be used to
2053 * enable/disable any Intr block.
1da177e4
LT
2054 * Return Value: NONE.
2055 */
2056
2057static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2058{
1ee6dd77 2059 struct XENA_dev_config __iomem *bar0 = nic->bar0;
9caab458
SS
2060 register u64 temp64 = 0, intr_mask = 0;
2061
2062 intr_mask = nic->general_int_mask;
1da177e4
LT
2063
2064 /* Top level interrupt classification */
2065 /* PIC Interrupts */
9caab458 2066 if (mask & TX_PIC_INTR) {
1da177e4 2067 /* Enable PIC Intrs in the general intr mask register */
9caab458 2068 intr_mask |= TXPIC_INT_M;
1da177e4 2069 if (flag == ENABLE_INTRS) {
20346722 2070 /*
a371a07d 2071 * If Hercules adapter enable GPIO otherwise
b41477f3 2072 * disable all PCIX, Flash, MDIO, IIC and GPIO
20346722
K
2073 * interrupts for now.
2074 * TODO
1da177e4 2075 */
a371a07d
K
2076 if (s2io_link_fault_indication(nic) ==
2077 LINK_UP_DOWN_INTERRUPT ) {
9caab458
SS
2078 do_s2io_write_bits(PIC_INT_GPIO, flag,
2079 &bar0->pic_int_mask);
2080 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2081 &bar0->gpio_int_mask);
2082 } else
a371a07d 2083 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1da177e4 2084 } else if (flag == DISABLE_INTRS) {
20346722
K
2085 /*
2086 * Disable PIC Intrs in the general
2087 * intr mask register
1da177e4
LT
2088 */
2089 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1da177e4
LT
2090 }
2091 }
2092
1da177e4
LT
2093 /* Tx traffic interrupts */
2094 if (mask & TX_TRAFFIC_INTR) {
9caab458 2095 intr_mask |= TXTRAFFIC_INT_M;
1da177e4 2096 if (flag == ENABLE_INTRS) {
20346722 2097 /*
1da177e4 2098 * Enable all the Tx side interrupts
20346722 2099 * writing 0 Enables all 64 TX interrupt levels
1da177e4
LT
2100 */
2101 writeq(0x0, &bar0->tx_traffic_mask);
2102 } else if (flag == DISABLE_INTRS) {
20346722
K
2103 /*
2104 * Disable Tx Traffic Intrs in the general intr mask
1da177e4
LT
2105 * register.
2106 */
2107 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1da177e4
LT
2108 }
2109 }
2110
2111 /* Rx traffic interrupts */
2112 if (mask & RX_TRAFFIC_INTR) {
9caab458 2113 intr_mask |= RXTRAFFIC_INT_M;
1da177e4 2114 if (flag == ENABLE_INTRS) {
1da177e4
LT
2115 /* writing 0 Enables all 8 RX interrupt levels */
2116 writeq(0x0, &bar0->rx_traffic_mask);
2117 } else if (flag == DISABLE_INTRS) {
20346722
K
2118 /*
2119 * Disable Rx Traffic Intrs in the general intr mask
1da177e4
LT
2120 * register.
2121 */
2122 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1da177e4
LT
2123 }
2124 }
9caab458
SS
2125
2126 temp64 = readq(&bar0->general_int_mask);
2127 if (flag == ENABLE_INTRS)
2128 temp64 &= ~((u64) intr_mask);
2129 else
2130 temp64 = DISABLE_ALL_INTRS;
2131 writeq(temp64, &bar0->general_int_mask);
2132
2133 nic->general_int_mask = readq(&bar0->general_int_mask);
1da177e4
LT
2134}
2135
19a60522
SS
2136/**
2137 * verify_pcc_quiescent- Checks for PCC quiescent state
2138 * Return: 1 If PCC is quiescence
2139 * 0 If PCC is not quiescence
2140 */
1ee6dd77 2141static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
20346722 2142{
19a60522 2143 int ret = 0, herc;
1ee6dd77 2144 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522 2145 u64 val64 = readq(&bar0->adapter_status);
8a4bdbaa 2146
19a60522 2147 herc = (sp->device_type == XFRAME_II_DEVICE);
20346722
K
2148
2149 if (flag == FALSE) {
44c10138 2150 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
19a60522 2151 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 2152 ret = 1;
19a60522
SS
2153 } else {
2154 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 2155 ret = 1;
20346722
K
2156 }
2157 } else {
44c10138 2158 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
5e25b9dd 2159 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
19a60522 2160 ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 2161 ret = 1;
5e25b9dd
K
2162 } else {
2163 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
19a60522 2164 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 2165 ret = 1;
20346722
K
2166 }
2167 }
2168
2169 return ret;
2170}
2171/**
2172 * verify_xena_quiescence - Checks whether the H/W is ready
1da177e4 2173 * Description: Returns whether the H/W is ready to go or not. Depending
20346722 2174 * on whether adapter enable bit was written or not the comparison
1da177e4
LT
2175 * differs and the calling function passes the input argument flag to
2176 * indicate this.
20346722 2177 * Return: 1 If xena is quiescence
1da177e4
LT
2178 * 0 If Xena is not quiescence
2179 */
2180
1ee6dd77 2181static int verify_xena_quiescence(struct s2io_nic *sp)
1da177e4 2182{
19a60522 2183 int mode;
1ee6dd77 2184 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522
SS
2185 u64 val64 = readq(&bar0->adapter_status);
2186 mode = s2io_verify_pci_mode(sp);
1da177e4 2187
19a60522
SS
2188 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2189 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2190 return 0;
2191 }
2192 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2193 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2194 return 0;
2195 }
2196 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2197 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2198 return 0;
2199 }
2200 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2201 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2202 return 0;
2203 }
2204 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2205 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2206 return 0;
2207 }
2208 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2209 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2210 return 0;
2211 }
2212 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2213 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2214 return 0;
2215 }
2216 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2217 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2218 return 0;
1da177e4
LT
2219 }
2220
19a60522
SS
2221 /*
2222 * In PCI 33 mode, the P_PLL is not used, and therefore,
2223 * the the P_PLL_LOCK bit in the adapter_status register will
2224 * not be asserted.
2225 */
2226 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2227 sp->device_type == XFRAME_II_DEVICE && mode !=
2228 PCI_MODE_PCI_33) {
2229 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2230 return 0;
2231 }
2232 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2233 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2234 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2235 return 0;
2236 }
2237 return 1;
1da177e4
LT
2238}
2239
2240/**
2241 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2242 * @sp: Pointer to device specifc structure
20346722 2243 * Description :
1da177e4
LT
2244 * New procedure to clear mac address reading problems on Alpha platforms
2245 *
2246 */
2247
1ee6dd77 2248static void fix_mac_address(struct s2io_nic * sp)
1da177e4 2249{
1ee6dd77 2250 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
2251 u64 val64;
2252 int i = 0;
2253
2254 while (fix_mac[i] != END_SIGN) {
2255 writeq(fix_mac[i++], &bar0->gpio_control);
20346722 2256 udelay(10);
1da177e4
LT
2257 val64 = readq(&bar0->gpio_control);
2258 }
2259}
2260
2261/**
20346722 2262 * start_nic - Turns the device on
1da177e4 2263 * @nic : device private variable.
20346722
K
2264 * Description:
2265 * This function actually turns the device on. Before this function is
2266 * called,all Registers are configured from their reset states
2267 * and shared memory is allocated but the NIC is still quiescent. On
1da177e4
LT
2268 * calling this function, the device interrupts are cleared and the NIC is
2269 * literally switched on by writing into the adapter control register.
20346722 2270 * Return Value:
1da177e4
LT
2271 * SUCCESS on success and -1 on failure.
2272 */
2273
2274static int start_nic(struct s2io_nic *nic)
2275{
1ee6dd77 2276 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
2277 struct net_device *dev = nic->dev;
2278 register u64 val64 = 0;
20346722 2279 u16 subid, i;
1ee6dd77 2280 struct mac_info *mac_control;
1da177e4
LT
2281 struct config_param *config;
2282
2283 mac_control = &nic->mac_control;
2284 config = &nic->config;
2285
2286 /* PRC Initialization and configuration */
2287 for (i = 0; i < config->rx_ring_num; i++) {
20346722 2288 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1da177e4
LT
2289 &bar0->prc_rxd0_n[i]);
2290
2291 val64 = readq(&bar0->prc_ctrl_n[i]);
da6971d8
AR
2292 if (nic->rxd_mode == RXD_MODE_1)
2293 val64 |= PRC_CTRL_RC_ENABLED;
2294 else
2295 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
863c11a9
AR
2296 if (nic->device_type == XFRAME_II_DEVICE)
2297 val64 |= PRC_CTRL_GROUP_READS;
2298 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2299 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1da177e4
LT
2300 writeq(val64, &bar0->prc_ctrl_n[i]);
2301 }
2302
da6971d8
AR
2303 if (nic->rxd_mode == RXD_MODE_3B) {
2304 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2305 val64 = readq(&bar0->rx_pa_cfg);
2306 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2307 writeq(val64, &bar0->rx_pa_cfg);
2308 }
1da177e4 2309
926930b2
SS
2310 if (vlan_tag_strip == 0) {
2311 val64 = readq(&bar0->rx_pa_cfg);
2312 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2313 writeq(val64, &bar0->rx_pa_cfg);
2314 vlan_strip_flag = 0;
2315 }
2316
20346722 2317 /*
1da177e4
LT
2318 * Enabling MC-RLDRAM. After enabling the device, we timeout
2319 * for around 100ms, which is approximately the time required
2320 * for the device to be ready for operation.
2321 */
2322 val64 = readq(&bar0->mc_rldram_mrs);
2323 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2324 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2325 val64 = readq(&bar0->mc_rldram_mrs);
2326
20346722 2327 msleep(100); /* Delay by around 100 ms. */
1da177e4
LT
2328
2329 /* Enabling ECC Protection. */
2330 val64 = readq(&bar0->adapter_control);
2331 val64 &= ~ADAPTER_ECC_EN;
2332 writeq(val64, &bar0->adapter_control);
2333
20346722
K
2334 /*
2335 * Verify if the device is ready to be enabled, if so enable
1da177e4
LT
2336 * it.
2337 */
2338 val64 = readq(&bar0->adapter_status);
19a60522 2339 if (!verify_xena_quiescence(nic)) {
1da177e4
LT
2340 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2341 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2342 (unsigned long long) val64);
2343 return FAILURE;
2344 }
2345
20346722 2346 /*
1da177e4 2347 * With some switches, link might be already up at this point.
20346722
K
2348 * Because of this weird behavior, when we enable laser,
2349 * we may not get link. We need to handle this. We cannot
2350 * figure out which switch is misbehaving. So we are forced to
2351 * make a global change.
1da177e4
LT
2352 */
2353
2354 /* Enabling Laser. */
2355 val64 = readq(&bar0->adapter_control);
2356 val64 |= ADAPTER_EOI_TX_ON;
2357 writeq(val64, &bar0->adapter_control);
2358
c92ca04b
AR
2359 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2360 /*
2361 * Dont see link state interrupts initally on some switches,
2362 * so directly scheduling the link state task here.
2363 */
2364 schedule_work(&nic->set_link_task);
2365 }
1da177e4
LT
2366 /* SXE-002: Initialize link and activity LED */
2367 subid = nic->pdev->subsystem_device;
541ae68f
K
2368 if (((subid & 0xFF) >= 0x07) &&
2369 (nic->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
2370 val64 = readq(&bar0->gpio_control);
2371 val64 |= 0x0000800000000000ULL;
2372 writeq(val64, &bar0->gpio_control);
2373 val64 = 0x0411040400000000ULL;
509a2671 2374 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
2375 }
2376
1da177e4
LT
2377 return SUCCESS;
2378}
fed5eccd
AR
2379/**
2380 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2381 */
1ee6dd77
RB
2382static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2383 TxD *txdlp, int get_off)
fed5eccd 2384{
1ee6dd77 2385 struct s2io_nic *nic = fifo_data->nic;
fed5eccd 2386 struct sk_buff *skb;
1ee6dd77 2387 struct TxD *txds;
fed5eccd
AR
2388 u16 j, frg_cnt;
2389
2390 txds = txdlp;
2fda096d 2391 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
fed5eccd
AR
2392 pci_unmap_single(nic->pdev, (dma_addr_t)
2393 txds->Buffer_Pointer, sizeof(u64),
2394 PCI_DMA_TODEVICE);
2395 txds++;
2396 }
2397
2398 skb = (struct sk_buff *) ((unsigned long)
2399 txds->Host_Control);
2400 if (!skb) {
1ee6dd77 2401 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
fed5eccd
AR
2402 return NULL;
2403 }
2404 pci_unmap_single(nic->pdev, (dma_addr_t)
2405 txds->Buffer_Pointer,
2406 skb->len - skb->data_len,
2407 PCI_DMA_TODEVICE);
2408 frg_cnt = skb_shinfo(skb)->nr_frags;
2409 if (frg_cnt) {
2410 txds++;
2411 for (j = 0; j < frg_cnt; j++, txds++) {
2412 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2413 if (!txds->Buffer_Pointer)
2414 break;
6aa20a22 2415 pci_unmap_page(nic->pdev, (dma_addr_t)
fed5eccd
AR
2416 txds->Buffer_Pointer,
2417 frag->size, PCI_DMA_TODEVICE);
2418 }
2419 }
1ee6dd77 2420 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
fed5eccd
AR
2421 return(skb);
2422}
1da177e4 2423
20346722
K
2424/**
2425 * free_tx_buffers - Free all queued Tx buffers
1da177e4 2426 * @nic : device private variable.
20346722 2427 * Description:
1da177e4 2428 * Free all queued Tx buffers.
20346722 2429 * Return Value: void
1da177e4
LT
2430*/
2431
2432static void free_tx_buffers(struct s2io_nic *nic)
2433{
2434 struct net_device *dev = nic->dev;
2435 struct sk_buff *skb;
1ee6dd77 2436 struct TxD *txdp;
1da177e4 2437 int i, j;
1ee6dd77 2438 struct mac_info *mac_control;
1da177e4 2439 struct config_param *config;
fed5eccd 2440 int cnt = 0;
1da177e4
LT
2441
2442 mac_control = &nic->mac_control;
2443 config = &nic->config;
2444
2445 for (i = 0; i < config->tx_fifo_num; i++) {
2fda096d
SR
2446 unsigned long flags;
2447 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags);
b35b3b49 2448 for (j = 0; j < config->tx_cfg[i].fifo_len; j++) {
491976b2
SH
2449 txdp = (struct TxD *) \
2450 mac_control->fifos[i].list_info[j].list_virt_addr;
fed5eccd
AR
2451 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2452 if (skb) {
8a4bdbaa 2453 nic->mac_control.stats_info->sw_stat.mem_freed
491976b2 2454 += skb->truesize;
fed5eccd
AR
2455 dev_kfree_skb(skb);
2456 cnt++;
1da177e4 2457 }
1da177e4
LT
2458 }
2459 DBG_PRINT(INTR_DBG,
2460 "%s:forcibly freeing %d skbs on FIFO%d\n",
2461 dev->name, cnt, i);
20346722
K
2462 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2463 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2fda096d 2464 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, flags);
1da177e4
LT
2465 }
2466}
2467
20346722
K
2468/**
2469 * stop_nic - To stop the nic
1da177e4 2470 * @nic ; device private variable.
20346722
K
2471 * Description:
2472 * This function does exactly the opposite of what the start_nic()
1da177e4
LT
2473 * function does. This function is called to stop the device.
2474 * Return Value:
2475 * void.
2476 */
2477
2478static void stop_nic(struct s2io_nic *nic)
2479{
1ee6dd77 2480 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4 2481 register u64 val64 = 0;
5d3213cc 2482 u16 interruptible;
1ee6dd77 2483 struct mac_info *mac_control;
1da177e4
LT
2484 struct config_param *config;
2485
2486 mac_control = &nic->mac_control;
2487 config = &nic->config;
2488
2489 /* Disable all interrupts */
9caab458 2490 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
e960fc5c 2491 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
9caab458 2492 interruptible |= TX_PIC_INTR;
1da177e4
LT
2493 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2494
5d3213cc
AR
2495 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2496 val64 = readq(&bar0->adapter_control);
2497 val64 &= ~(ADAPTER_CNTL_EN);
2498 writeq(val64, &bar0->adapter_control);
1da177e4
LT
2499}
2500
20346722
K
2501/**
2502 * fill_rx_buffers - Allocates the Rx side skbs
0425b46a 2503 * @ring_info: per ring structure
20346722 2504 * Description:
1da177e4
LT
2505 * The function allocates Rx side skbs and puts the physical
2506 * address of these buffers into the RxD buffer pointers, so that the NIC
2507 * can DMA the received frame into these locations.
2508 * The NIC supports 3 receive modes, viz
2509 * 1. single buffer,
2510 * 2. three buffer and
2511 * 3. Five buffer modes.
20346722
K
2512 * Each mode defines how many fragments the received frame will be split
2513 * up into by the NIC. The frame is split into L3 header, L4 Header,
1da177e4
LT
2514 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2515 * is split into 3 fragments. As of now only single buffer mode is
2516 * supported.
2517 * Return Value:
2518 * SUCCESS on success or an appropriate -ve value on failure.
2519 */
2520
0425b46a 2521static int fill_rx_buffers(struct ring_info *ring)
1da177e4 2522{
1da177e4 2523 struct sk_buff *skb;
1ee6dd77 2524 struct RxD_t *rxdp;
0425b46a 2525 int off, size, block_no, block_no1;
1da177e4 2526 u32 alloc_tab = 0;
20346722 2527 u32 alloc_cnt;
20346722 2528 u64 tmp;
1ee6dd77 2529 struct buffAdd *ba;
1ee6dd77 2530 struct RxD_t *first_rxdp = NULL;
363dc367 2531 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
0425b46a 2532 int rxd_index = 0;
6d517a27
VP
2533 struct RxD1 *rxdp1;
2534 struct RxD3 *rxdp3;
0425b46a 2535 struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat;
1da177e4 2536
0425b46a 2537 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
1da177e4 2538
0425b46a 2539 block_no1 = ring->rx_curr_get_info.block_index;
1da177e4 2540 while (alloc_tab < alloc_cnt) {
0425b46a 2541 block_no = ring->rx_curr_put_info.block_index;
1da177e4 2542
0425b46a
SH
2543 off = ring->rx_curr_put_info.offset;
2544
2545 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2546
2547 rxd_index = off + 1;
2548 if (block_no)
2549 rxd_index += (block_no * ring->rxd_count);
da6971d8 2550
7d2e3cb7 2551 if ((block_no == block_no1) &&
0425b46a
SH
2552 (off == ring->rx_curr_get_info.offset) &&
2553 (rxdp->Host_Control)) {
da6971d8 2554 DBG_PRINT(INTR_DBG, "%s: Get and Put",
0425b46a 2555 ring->dev->name);
1da177e4
LT
2556 DBG_PRINT(INTR_DBG, " info equated\n");
2557 goto end;
2558 }
0425b46a
SH
2559 if (off && (off == ring->rxd_count)) {
2560 ring->rx_curr_put_info.block_index++;
2561 if (ring->rx_curr_put_info.block_index ==
2562 ring->block_count)
2563 ring->rx_curr_put_info.block_index = 0;
2564 block_no = ring->rx_curr_put_info.block_index;
2565 off = 0;
2566 ring->rx_curr_put_info.offset = off;
2567 rxdp = ring->rx_blocks[block_no].block_virt_addr;
1da177e4 2568 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
0425b46a
SH
2569 ring->dev->name, rxdp);
2570
1da177e4 2571 }
c9fcbf47 2572
da6971d8 2573 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
0425b46a 2574 ((ring->rxd_mode == RXD_MODE_3B) &&
b7b5a128 2575 (rxdp->Control_2 & s2BIT(0)))) {
0425b46a 2576 ring->rx_curr_put_info.offset = off;
1da177e4
LT
2577 goto end;
2578 }
da6971d8 2579 /* calculate size of skb based on ring mode */
0425b46a 2580 size = ring->mtu + HEADER_ETHERNET_II_802_3_SIZE +
da6971d8 2581 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
0425b46a 2582 if (ring->rxd_mode == RXD_MODE_1)
da6971d8 2583 size += NET_IP_ALIGN;
da6971d8 2584 else
0425b46a 2585 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
1da177e4 2586
da6971d8
AR
2587 /* allocate skb */
2588 skb = dev_alloc_skb(size);
2589 if(!skb) {
0425b46a 2590 DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name);
0c61ed5f 2591 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
303bcb4b
K
2592 if (first_rxdp) {
2593 wmb();
2594 first_rxdp->Control_1 |= RXD_OWN_XENA;
2595 }
0425b46a 2596 stats->mem_alloc_fail_cnt++;
7d2e3cb7 2597
da6971d8
AR
2598 return -ENOMEM ;
2599 }
0425b46a
SH
2600 stats->mem_allocated += skb->truesize;
2601
2602 if (ring->rxd_mode == RXD_MODE_1) {
da6971d8 2603 /* 1 buffer mode - normal operation mode */
6d517a27 2604 rxdp1 = (struct RxD1*)rxdp;
1ee6dd77 2605 memset(rxdp, 0, sizeof(struct RxD1));
da6971d8 2606 skb_reserve(skb, NET_IP_ALIGN);
6d517a27 2607 rxdp1->Buffer0_ptr = pci_map_single
0425b46a 2608 (ring->pdev, skb->data, size - NET_IP_ALIGN,
863c11a9 2609 PCI_DMA_FROMDEVICE);
64c42f69 2610 if(pci_dma_mapping_error(rxdp1->Buffer0_ptr))
491abf25
VP
2611 goto pci_map_failed;
2612
8a4bdbaa 2613 rxdp->Control_2 =
491976b2 2614 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
0425b46a
SH
2615 rxdp->Host_Control = (unsigned long) (skb);
2616 } else if (ring->rxd_mode == RXD_MODE_3B) {
da6971d8 2617 /*
6d517a27
VP
2618 * 2 buffer mode -
2619 * 2 buffer mode provides 128
da6971d8 2620 * byte aligned receive buffers.
da6971d8
AR
2621 */
2622
6d517a27 2623 rxdp3 = (struct RxD3*)rxdp;
491976b2 2624 /* save buffer pointers to avoid frequent dma mapping */
6d517a27
VP
2625 Buffer0_ptr = rxdp3->Buffer0_ptr;
2626 Buffer1_ptr = rxdp3->Buffer1_ptr;
1ee6dd77 2627 memset(rxdp, 0, sizeof(struct RxD3));
363dc367 2628 /* restore the buffer pointers for dma sync*/
6d517a27
VP
2629 rxdp3->Buffer0_ptr = Buffer0_ptr;
2630 rxdp3->Buffer1_ptr = Buffer1_ptr;
363dc367 2631
0425b46a 2632 ba = &ring->ba[block_no][off];
da6971d8
AR
2633 skb_reserve(skb, BUF0_LEN);
2634 tmp = (u64)(unsigned long) skb->data;
2635 tmp += ALIGN_SIZE;
2636 tmp &= ~ALIGN_SIZE;
2637 skb->data = (void *) (unsigned long)tmp;
27a884dc 2638 skb_reset_tail_pointer(skb);
da6971d8 2639
64c42f69 2640 /* AK: check is wrong. 0 can be valid dma address */
6d517a27
VP
2641 if (!(rxdp3->Buffer0_ptr))
2642 rxdp3->Buffer0_ptr =
0425b46a
SH
2643 pci_map_single(ring->pdev, ba->ba_0,
2644 BUF0_LEN, PCI_DMA_FROMDEVICE);
75c30b13 2645 else
0425b46a 2646 pci_dma_sync_single_for_device(ring->pdev,
6d517a27 2647 (dma_addr_t) rxdp3->Buffer0_ptr,
75c30b13 2648 BUF0_LEN, PCI_DMA_FROMDEVICE);
64c42f69 2649 if (pci_dma_mapping_error(rxdp3->Buffer0_ptr))
491abf25
VP
2650 goto pci_map_failed;
2651
da6971d8 2652 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
0425b46a 2653 if (ring->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
2654 /* Two buffer mode */
2655
2656 /*
6aa20a22 2657 * Buffer2 will have L3/L4 header plus
da6971d8
AR
2658 * L4 payload
2659 */
6d517a27 2660 rxdp3->Buffer2_ptr = pci_map_single
0425b46a 2661 (ring->pdev, skb->data, ring->mtu + 4,
da6971d8
AR
2662 PCI_DMA_FROMDEVICE);
2663
64c42f69 2664 if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
491abf25
VP
2665 goto pci_map_failed;
2666
64c42f69 2667 /* AK: check is wrong */
0425b46a
SH
2668 if (!rxdp3->Buffer1_ptr)
2669 rxdp3->Buffer1_ptr =
2670 pci_map_single(ring->pdev,
75c30b13
AR
2671 ba->ba_1, BUF1_LEN,
2672 PCI_DMA_FROMDEVICE);
0425b46a 2673
64c42f69 2674 if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) {
491abf25 2675 pci_unmap_single
0425b46a
SH
2676 (ring->pdev,
2677 (dma_addr_t)(unsigned long)
2678 skb->data,
2679 ring->mtu + 4,
491abf25
VP
2680 PCI_DMA_FROMDEVICE);
2681 goto pci_map_failed;
75c30b13 2682 }
da6971d8
AR
2683 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2684 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
0425b46a 2685 (ring->mtu + 4);
da6971d8 2686 }
b7b5a128 2687 rxdp->Control_2 |= s2BIT(0);
0425b46a 2688 rxdp->Host_Control = (unsigned long) (skb);
1da177e4 2689 }
303bcb4b
K
2690 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2691 rxdp->Control_1 |= RXD_OWN_XENA;
1da177e4 2692 off++;
0425b46a 2693 if (off == (ring->rxd_count + 1))
da6971d8 2694 off = 0;
0425b46a 2695 ring->rx_curr_put_info.offset = off;
20346722 2696
da6971d8 2697 rxdp->Control_2 |= SET_RXD_MARKER;
303bcb4b
K
2698 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2699 if (first_rxdp) {
2700 wmb();
2701 first_rxdp->Control_1 |= RXD_OWN_XENA;
2702 }
2703 first_rxdp = rxdp;
2704 }
0425b46a 2705 ring->rx_bufs_left += 1;
1da177e4
LT
2706 alloc_tab++;
2707 }
2708
2709 end:
303bcb4b
K
2710 /* Transfer ownership of first descriptor to adapter just before
2711 * exiting. Before that, use memory barrier so that ownership
2712 * and other fields are seen by adapter correctly.
2713 */
2714 if (first_rxdp) {
2715 wmb();
2716 first_rxdp->Control_1 |= RXD_OWN_XENA;
2717 }
2718
1da177e4 2719 return SUCCESS;
491abf25
VP
2720pci_map_failed:
2721 stats->pci_map_fail_cnt++;
2722 stats->mem_freed += skb->truesize;
2723 dev_kfree_skb_irq(skb);
2724 return -ENOMEM;
1da177e4
LT
2725}
2726
da6971d8
AR
2727static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2728{
2729 struct net_device *dev = sp->dev;
2730 int j;
2731 struct sk_buff *skb;
1ee6dd77
RB
2732 struct RxD_t *rxdp;
2733 struct mac_info *mac_control;
2734 struct buffAdd *ba;
6d517a27
VP
2735 struct RxD1 *rxdp1;
2736 struct RxD3 *rxdp3;
da6971d8
AR
2737
2738 mac_control = &sp->mac_control;
2739 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2740 rxdp = mac_control->rings[ring_no].
2741 rx_blocks[blk].rxds[j].virt_addr;
2742 skb = (struct sk_buff *)
2743 ((unsigned long) rxdp->Host_Control);
2744 if (!skb) {
2745 continue;
2746 }
2747 if (sp->rxd_mode == RXD_MODE_1) {
6d517a27 2748 rxdp1 = (struct RxD1*)rxdp;
da6971d8 2749 pci_unmap_single(sp->pdev, (dma_addr_t)
6d517a27
VP
2750 rxdp1->Buffer0_ptr,
2751 dev->mtu +
2752 HEADER_ETHERNET_II_802_3_SIZE
2753 + HEADER_802_2_SIZE +
2754 HEADER_SNAP_SIZE,
2755 PCI_DMA_FROMDEVICE);
1ee6dd77 2756 memset(rxdp, 0, sizeof(struct RxD1));
da6971d8 2757 } else if(sp->rxd_mode == RXD_MODE_3B) {
6d517a27 2758 rxdp3 = (struct RxD3*)rxdp;
da6971d8
AR
2759 ba = &mac_control->rings[ring_no].
2760 ba[blk][j];
2761 pci_unmap_single(sp->pdev, (dma_addr_t)
6d517a27
VP
2762 rxdp3->Buffer0_ptr,
2763 BUF0_LEN,
da6971d8
AR
2764 PCI_DMA_FROMDEVICE);
2765 pci_unmap_single(sp->pdev, (dma_addr_t)
6d517a27
VP
2766 rxdp3->Buffer1_ptr,
2767 BUF1_LEN,
da6971d8
AR
2768 PCI_DMA_FROMDEVICE);
2769 pci_unmap_single(sp->pdev, (dma_addr_t)
6d517a27
VP
2770 rxdp3->Buffer2_ptr,
2771 dev->mtu + 4,
da6971d8 2772 PCI_DMA_FROMDEVICE);
1ee6dd77 2773 memset(rxdp, 0, sizeof(struct RxD3));
da6971d8 2774 }
491976b2 2775 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
da6971d8 2776 dev_kfree_skb(skb);
0425b46a 2777 mac_control->rings[ring_no].rx_bufs_left -= 1;
da6971d8
AR
2778 }
2779}
2780
1da177e4 2781/**
20346722 2782 * free_rx_buffers - Frees all Rx buffers
1da177e4 2783 * @sp: device private variable.
20346722 2784 * Description:
1da177e4
LT
2785 * This function will free all Rx buffers allocated by host.
2786 * Return Value:
2787 * NONE.
2788 */
2789
2790static void free_rx_buffers(struct s2io_nic *sp)
2791{
2792 struct net_device *dev = sp->dev;
da6971d8 2793 int i, blk = 0, buf_cnt = 0;
1ee6dd77 2794 struct mac_info *mac_control;
1da177e4 2795 struct config_param *config;
1da177e4
LT
2796
2797 mac_control = &sp->mac_control;
2798 config = &sp->config;
2799
2800 for (i = 0; i < config->rx_ring_num; i++) {
da6971d8
AR
2801 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2802 free_rxd_blk(sp,i,blk);
1da177e4 2803
20346722
K
2804 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2805 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2806 mac_control->rings[i].rx_curr_put_info.offset = 0;
2807 mac_control->rings[i].rx_curr_get_info.offset = 0;
0425b46a 2808 mac_control->rings[i].rx_bufs_left = 0;
1da177e4
LT
2809 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2810 dev->name, buf_cnt, i);
2811 }
2812}
2813
f61e0a35
SH
2814static int s2io_chk_rx_buffers(struct ring_info *ring)
2815{
2816 if (fill_rx_buffers(ring) == -ENOMEM) {
2817 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
2818 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
2819 }
2820 return 0;
2821}
2822
1da177e4
LT
2823/**
2824 * s2io_poll - Rx interrupt handler for NAPI support
bea3348e 2825 * @napi : pointer to the napi structure.
20346722 2826 * @budget : The number of packets that were budgeted to be processed
1da177e4
LT
2827 * during one pass through the 'Poll" function.
2828 * Description:
2829 * Comes into picture only if NAPI support has been incorporated. It does
2830 * the same thing that rx_intr_handler does, but not in a interrupt context
2831 * also It will process only a given number of packets.
2832 * Return value:
2833 * 0 on success and 1 if there are No Rx packets to be processed.
2834 */
2835
f61e0a35 2836static int s2io_poll_msix(struct napi_struct *napi, int budget)
1da177e4 2837{
f61e0a35
SH
2838 struct ring_info *ring = container_of(napi, struct ring_info, napi);
2839 struct net_device *dev = ring->dev;
1da177e4 2840 struct config_param *config;
f61e0a35
SH
2841 struct mac_info *mac_control;
2842 int pkts_processed = 0;
1a79d1c3
AV
2843 u8 __iomem *addr = NULL;
2844 u8 val8 = 0;
f61e0a35 2845 struct s2io_nic *nic = dev->priv;
1ee6dd77 2846 struct XENA_dev_config __iomem *bar0 = nic->bar0;
f61e0a35 2847 int budget_org = budget;
1da177e4 2848
1da177e4 2849 config = &nic->config;
f61e0a35 2850 mac_control = &nic->mac_control;
1da177e4 2851
f61e0a35
SH
2852 if (unlikely(!is_s2io_card_up(nic)))
2853 return 0;
1da177e4 2854
f61e0a35
SH
2855 pkts_processed = rx_intr_handler(ring, budget);
2856 s2io_chk_rx_buffers(ring);
1da177e4 2857
f61e0a35
SH
2858 if (pkts_processed < budget_org) {
2859 netif_rx_complete(dev, napi);
2860 /*Re Enable MSI-Rx Vector*/
1a79d1c3 2861 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
f61e0a35
SH
2862 addr += 7 - ring->ring_no;
2863 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2864 writeb(val8, addr);
2865 val8 = readb(addr);
1da177e4 2866 }
f61e0a35
SH
2867 return pkts_processed;
2868}
2869static int s2io_poll_inta(struct napi_struct *napi, int budget)
2870{
2871 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2872 struct ring_info *ring;
2873 struct net_device *dev = nic->dev;
2874 struct config_param *config;
2875 struct mac_info *mac_control;
2876 int pkts_processed = 0;
2877 int ring_pkts_processed, i;
2878 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2879 int budget_org = budget;
1da177e4 2880
f61e0a35
SH
2881 config = &nic->config;
2882 mac_control = &nic->mac_control;
1da177e4 2883
f61e0a35
SH
2884 if (unlikely(!is_s2io_card_up(nic)))
2885 return 0;
1da177e4 2886
1da177e4 2887 for (i = 0; i < config->rx_ring_num; i++) {
f61e0a35
SH
2888 ring = &mac_control->rings[i];
2889 ring_pkts_processed = rx_intr_handler(ring, budget);
2890 s2io_chk_rx_buffers(ring);
2891 pkts_processed += ring_pkts_processed;
2892 budget -= ring_pkts_processed;
2893 if (budget <= 0)
1da177e4 2894 break;
1da177e4 2895 }
f61e0a35
SH
2896 if (pkts_processed < budget_org) {
2897 netif_rx_complete(dev, napi);
2898 /* Re enable the Rx interrupts for the ring */
2899 writeq(0, &bar0->rx_traffic_mask);
2900 readl(&bar0->rx_traffic_mask);
2901 }
2902 return pkts_processed;
1da177e4 2903}
20346722 2904
b41477f3 2905#ifdef CONFIG_NET_POLL_CONTROLLER
612eff0e 2906/**
b41477f3 2907 * s2io_netpoll - netpoll event handler entry point
612eff0e
BH
2908 * @dev : pointer to the device structure.
2909 * Description:
b41477f3
AR
2910 * This function will be called by upper layer to check for events on the
2911 * interface in situations where interrupts are disabled. It is used for
2912 * specific in-kernel networking tasks, such as remote consoles and kernel
2913 * debugging over the network (example netdump in RedHat).
612eff0e 2914 */
612eff0e
BH
2915static void s2io_netpoll(struct net_device *dev)
2916{
1ee6dd77
RB
2917 struct s2io_nic *nic = dev->priv;
2918 struct mac_info *mac_control;
612eff0e 2919 struct config_param *config;
1ee6dd77 2920 struct XENA_dev_config __iomem *bar0 = nic->bar0;
b41477f3 2921 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
612eff0e
BH
2922 int i;
2923
d796fdb7
LV
2924 if (pci_channel_offline(nic->pdev))
2925 return;
2926
612eff0e
BH
2927 disable_irq(dev->irq);
2928
612eff0e
BH
2929 mac_control = &nic->mac_control;
2930 config = &nic->config;
2931
612eff0e 2932 writeq(val64, &bar0->rx_traffic_int);
b41477f3
AR
2933 writeq(val64, &bar0->tx_traffic_int);
2934
6aa20a22 2935 /* we need to free up the transmitted skbufs or else netpoll will
b41477f3
AR
2936 * run out of skbs and will fail and eventually netpoll application such
2937 * as netdump will fail.
2938 */
2939 for (i = 0; i < config->tx_fifo_num; i++)
2940 tx_intr_handler(&mac_control->fifos[i]);
612eff0e 2941
b41477f3 2942 /* check for received packet and indicate up to network */
612eff0e 2943 for (i = 0; i < config->rx_ring_num; i++)
f61e0a35 2944 rx_intr_handler(&mac_control->rings[i], 0);
612eff0e
BH
2945
2946 for (i = 0; i < config->rx_ring_num; i++) {
0425b46a 2947 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
0c61ed5f
RV
2948 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2949 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
612eff0e
BH
2950 break;
2951 }
2952 }
612eff0e
BH
2953 enable_irq(dev->irq);
2954 return;
2955}
2956#endif
2957
20346722 2958/**
1da177e4 2959 * rx_intr_handler - Rx interrupt handler
f61e0a35
SH
2960 * @ring_info: per ring structure.
2961 * @budget: budget for napi processing.
20346722
K
2962 * Description:
2963 * If the interrupt is because of a received frame or if the
1da177e4 2964 * receive ring contains fresh as yet un-processed frames,this function is
20346722
K
2965 * called. It picks out the RxD at which place the last Rx processing had
2966 * stopped and sends the skb to the OSM's Rx handler and then increments
1da177e4
LT
2967 * the offset.
2968 * Return Value:
f61e0a35 2969 * No. of napi packets processed.
1da177e4 2970 */
f61e0a35 2971static int rx_intr_handler(struct ring_info *ring_data, int budget)
1da177e4 2972{
c9fcbf47 2973 int get_block, put_block;
1ee6dd77
RB
2974 struct rx_curr_get_info get_info, put_info;
2975 struct RxD_t *rxdp;
1da177e4 2976 struct sk_buff *skb;
f61e0a35 2977 int pkt_cnt = 0, napi_pkts = 0;
7d3d0439 2978 int i;
6d517a27
VP
2979 struct RxD1* rxdp1;
2980 struct RxD3* rxdp3;
7d3d0439 2981
20346722
K
2982 get_info = ring_data->rx_curr_get_info;
2983 get_block = get_info.block_index;
1ee6dd77 2984 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
20346722 2985 put_block = put_info.block_index;
da6971d8 2986 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
db874e65 2987
da6971d8 2988 while (RXD_IS_UP2DT(rxdp)) {
db874e65
SS
2989 /*
2990 * If your are next to put index then it's
2991 * FIFO full condition
2992 */
da6971d8
AR
2993 if ((get_block == put_block) &&
2994 (get_info.offset + 1) == put_info.offset) {
0425b46a
SH
2995 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2996 ring_data->dev->name);
da6971d8
AR
2997 break;
2998 }
20346722
K
2999 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
3000 if (skb == NULL) {
3001 DBG_PRINT(ERR_DBG, "%s: The skb is ",
0425b46a 3002 ring_data->dev->name);
20346722 3003 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
f61e0a35 3004 return 0;
1da177e4 3005 }
0425b46a 3006 if (ring_data->rxd_mode == RXD_MODE_1) {
6d517a27 3007 rxdp1 = (struct RxD1*)rxdp;
0425b46a 3008 pci_unmap_single(ring_data->pdev, (dma_addr_t)
6d517a27 3009 rxdp1->Buffer0_ptr,
0425b46a 3010 ring_data->mtu +
6d517a27
VP
3011 HEADER_ETHERNET_II_802_3_SIZE +
3012 HEADER_802_2_SIZE +
3013 HEADER_SNAP_SIZE,
3014 PCI_DMA_FROMDEVICE);
0425b46a 3015 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
6d517a27 3016 rxdp3 = (struct RxD3*)rxdp;
0425b46a 3017 pci_dma_sync_single_for_cpu(ring_data->pdev, (dma_addr_t)
6d517a27
VP
3018 rxdp3->Buffer0_ptr,
3019 BUF0_LEN, PCI_DMA_FROMDEVICE);
0425b46a 3020 pci_unmap_single(ring_data->pdev, (dma_addr_t)
6d517a27 3021 rxdp3->Buffer2_ptr,
0425b46a 3022 ring_data->mtu + 4,
6d517a27 3023 PCI_DMA_FROMDEVICE);
da6971d8 3024 }
863c11a9 3025 prefetch(skb->data);
20346722
K
3026 rx_osm_handler(ring_data, rxdp);
3027 get_info.offset++;
da6971d8
AR
3028 ring_data->rx_curr_get_info.offset = get_info.offset;
3029 rxdp = ring_data->rx_blocks[get_block].
3030 rxds[get_info.offset].virt_addr;
0425b46a 3031 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
20346722 3032 get_info.offset = 0;
da6971d8 3033 ring_data->rx_curr_get_info.offset = get_info.offset;
20346722 3034 get_block++;
da6971d8
AR
3035 if (get_block == ring_data->block_count)
3036 get_block = 0;
3037 ring_data->rx_curr_get_info.block_index = get_block;
20346722
K
3038 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3039 }
1da177e4 3040
f61e0a35
SH
3041 if (ring_data->nic->config.napi) {
3042 budget--;
3043 napi_pkts++;
3044 if (!budget)
0425b46a
SH
3045 break;
3046 }
20346722 3047 pkt_cnt++;
1da177e4
LT
3048 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3049 break;
3050 }
0425b46a 3051 if (ring_data->lro) {
7d3d0439
RA
3052 /* Clear all LRO sessions before exiting */
3053 for (i=0; i<MAX_LRO_SESSIONS; i++) {
0425b46a 3054 struct lro *lro = &ring_data->lro0_n[i];
7d3d0439 3055 if (lro->in_use) {
0425b46a 3056 update_L3L4_header(ring_data->nic, lro);
cdb5bf02 3057 queue_rx_frame(lro->parent, lro->vlan_tag);
7d3d0439
RA
3058 clear_lro_session(lro);
3059 }
3060 }
3061 }
f61e0a35 3062 return(napi_pkts);
1da177e4 3063}
20346722
K
3064
3065/**
1da177e4
LT
3066 * tx_intr_handler - Transmit interrupt handler
3067 * @nic : device private variable
20346722
K
3068 * Description:
3069 * If an interrupt was raised to indicate DMA complete of the
3070 * Tx packet, this function is called. It identifies the last TxD
3071 * whose buffer was freed and frees all skbs whose data have already
1da177e4
LT
3072 * DMA'ed into the NICs internal memory.
3073 * Return Value:
3074 * NONE
3075 */
3076
1ee6dd77 3077static void tx_intr_handler(struct fifo_info *fifo_data)
1da177e4 3078{
1ee6dd77 3079 struct s2io_nic *nic = fifo_data->nic;
1ee6dd77 3080 struct tx_curr_get_info get_info, put_info;
3a3d5756 3081 struct sk_buff *skb = NULL;
1ee6dd77 3082 struct TxD *txdlp;
3a3d5756 3083 int pkt_cnt = 0;
2fda096d 3084 unsigned long flags = 0;
f9046eb3 3085 u8 err_mask;
1da177e4 3086
2fda096d
SR
3087 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3088 return;
3089
20346722 3090 get_info = fifo_data->tx_curr_get_info;
1ee6dd77
RB
3091 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3092 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
20346722
K
3093 list_virt_addr;
3094 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3095 (get_info.offset != put_info.offset) &&
3096 (txdlp->Host_Control)) {
3097 /* Check for TxD errors */
3098 if (txdlp->Control_1 & TXD_T_CODE) {
3099 unsigned long long err;
3100 err = txdlp->Control_1 & TXD_T_CODE;
bd1034f0
AR
3101 if (err & 0x1) {
3102 nic->mac_control.stats_info->sw_stat.
3103 parity_err_cnt++;
3104 }
491976b2
SH
3105
3106 /* update t_code statistics */
f9046eb3
OH
3107 err_mask = err >> 48;
3108 switch(err_mask) {
491976b2
SH
3109 case 2:
3110 nic->mac_control.stats_info->sw_stat.
3111 tx_buf_abort_cnt++;
3112 break;
3113
3114 case 3:
3115 nic->mac_control.stats_info->sw_stat.
3116 tx_desc_abort_cnt++;
3117 break;
3118
3119 case 7:
3120 nic->mac_control.stats_info->sw_stat.
3121 tx_parity_err_cnt++;
3122 break;
3123
3124 case 10:
3125 nic->mac_control.stats_info->sw_stat.
3126 tx_link_loss_cnt++;
3127 break;
3128
3129 case 15:
3130 nic->mac_control.stats_info->sw_stat.
3131 tx_list_proc_err_cnt++;
3132 break;
3133 }
20346722 3134 }
1da177e4 3135
fed5eccd 3136 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
20346722 3137 if (skb == NULL) {
2fda096d 3138 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
20346722
K
3139 DBG_PRINT(ERR_DBG, "%s: Null skb ",
3140 __FUNCTION__);
3141 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3142 return;
3143 }
3a3d5756 3144 pkt_cnt++;
20346722 3145
20346722 3146 /* Updating the statistics block */
20346722 3147 nic->stats.tx_bytes += skb->len;
491976b2 3148 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
20346722
K
3149 dev_kfree_skb_irq(skb);
3150
3151 get_info.offset++;
863c11a9
AR
3152 if (get_info.offset == get_info.fifo_len + 1)
3153 get_info.offset = 0;
1ee6dd77 3154 txdlp = (struct TxD *) fifo_data->list_info
20346722
K
3155 [get_info.offset].list_virt_addr;
3156 fifo_data->tx_curr_get_info.offset =
3157 get_info.offset;
1da177e4
LT
3158 }
3159
3a3d5756 3160 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
2fda096d
SR
3161
3162 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
1da177e4
LT
3163}
3164
bd1034f0
AR
3165/**
3166 * s2io_mdio_write - Function to write in to MDIO registers
3167 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3168 * @addr : address value
3169 * @value : data value
3170 * @dev : pointer to net_device structure
3171 * Description:
3172 * This function is used to write values to the MDIO registers
3173 * NONE
3174 */
3175static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3176{
3177 u64 val64 = 0x0;
1ee6dd77
RB
3178 struct s2io_nic *sp = dev->priv;
3179 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0
AR
3180
3181 //address transaction
3182 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3183 | MDIO_MMD_DEV_ADDR(mmd_type)
3184 | MDIO_MMS_PRT_ADDR(0x0);
3185 writeq(val64, &bar0->mdio_control);
3186 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3187 writeq(val64, &bar0->mdio_control);
3188 udelay(100);
3189
3190 //Data transaction
3191 val64 = 0x0;
3192 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3193 | MDIO_MMD_DEV_ADDR(mmd_type)
3194 | MDIO_MMS_PRT_ADDR(0x0)
3195 | MDIO_MDIO_DATA(value)
3196 | MDIO_OP(MDIO_OP_WRITE_TRANS);
3197 writeq(val64, &bar0->mdio_control);
3198 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3199 writeq(val64, &bar0->mdio_control);
3200 udelay(100);
3201
3202 val64 = 0x0;
3203 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3204 | MDIO_MMD_DEV_ADDR(mmd_type)
3205 | MDIO_MMS_PRT_ADDR(0x0)
3206 | MDIO_OP(MDIO_OP_READ_TRANS);
3207 writeq(val64, &bar0->mdio_control);
3208 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3209 writeq(val64, &bar0->mdio_control);
3210 udelay(100);
3211
3212}
3213
3214/**
3215 * s2io_mdio_read - Function to write in to MDIO registers
3216 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3217 * @addr : address value
3218 * @dev : pointer to net_device structure
3219 * Description:
3220 * This function is used to read values to the MDIO registers
3221 * NONE
3222 */
3223static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3224{
3225 u64 val64 = 0x0;
3226 u64 rval64 = 0x0;
1ee6dd77
RB
3227 struct s2io_nic *sp = dev->priv;
3228 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0
AR
3229
3230 /* address transaction */
3231 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3232 | MDIO_MMD_DEV_ADDR(mmd_type)
3233 | MDIO_MMS_PRT_ADDR(0x0);
3234 writeq(val64, &bar0->mdio_control);
3235 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3236 writeq(val64, &bar0->mdio_control);
3237 udelay(100);
3238
3239 /* Data transaction */
3240 val64 = 0x0;
3241 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3242 | MDIO_MMD_DEV_ADDR(mmd_type)
3243 | MDIO_MMS_PRT_ADDR(0x0)
3244 | MDIO_OP(MDIO_OP_READ_TRANS);
3245 writeq(val64, &bar0->mdio_control);
3246 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3247 writeq(val64, &bar0->mdio_control);
3248 udelay(100);
3249
3250 /* Read the value from regs */
3251 rval64 = readq(&bar0->mdio_control);
3252 rval64 = rval64 & 0xFFFF0000;
3253 rval64 = rval64 >> 16;
3254 return rval64;
3255}
3256/**
3257 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3258 * @counter : couter value to be updated
3259 * @flag : flag to indicate the status
3260 * @type : counter type
3261 * Description:
3262 * This function is to check the status of the xpak counters value
3263 * NONE
3264 */
3265
3266static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3267{
3268 u64 mask = 0x3;
3269 u64 val64;
3270 int i;
3271 for(i = 0; i <index; i++)
3272 mask = mask << 0x2;
3273
3274 if(flag > 0)
3275 {
3276 *counter = *counter + 1;
3277 val64 = *regs_stat & mask;
3278 val64 = val64 >> (index * 0x2);
3279 val64 = val64 + 1;
3280 if(val64 == 3)
3281 {
3282 switch(type)
3283 {
3284 case 1:
3285 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3286 "service. Excessive temperatures may "
3287 "result in premature transceiver "
3288 "failure \n");
3289 break;
3290 case 2:
3291 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3292 "service Excessive bias currents may "
3293 "indicate imminent laser diode "
3294 "failure \n");
3295 break;
3296 case 3:
3297 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3298 "service Excessive laser output "
3299 "power may saturate far-end "
3300 "receiver\n");
3301 break;
3302 default:
3303 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3304 "type \n");
3305 }
3306 val64 = 0x0;
3307 }
3308 val64 = val64 << (index * 0x2);
3309 *regs_stat = (*regs_stat & (~mask)) | (val64);
3310
3311 } else {
3312 *regs_stat = *regs_stat & (~mask);
3313 }
3314}
3315
3316/**
3317 * s2io_updt_xpak_counter - Function to update the xpak counters
3318 * @dev : pointer to net_device struct
3319 * Description:
3320 * This function is to upate the status of the xpak counters value
3321 * NONE
3322 */
3323static void s2io_updt_xpak_counter(struct net_device *dev)
3324{
3325 u16 flag = 0x0;
3326 u16 type = 0x0;
3327 u16 val16 = 0x0;
3328 u64 val64 = 0x0;
3329 u64 addr = 0x0;
3330
1ee6dd77
RB
3331 struct s2io_nic *sp = dev->priv;
3332 struct stat_block *stat_info = sp->mac_control.stats_info;
bd1034f0
AR
3333
3334 /* Check the communication with the MDIO slave */
3335 addr = 0x0000;
3336 val64 = 0x0;
3337 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3338 if((val64 == 0xFFFF) || (val64 == 0x0000))
3339 {
3340 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3341 "Returned %llx\n", (unsigned long long)val64);
3342 return;
3343 }
3344
3345 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3346 if(val64 != 0x2040)
3347 {
3348 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3349 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3350 (unsigned long long)val64);
3351 return;
3352 }
3353
3354 /* Loading the DOM register to MDIO register */
3355 addr = 0xA100;
3356 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3357 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3358
3359 /* Reading the Alarm flags */
3360 addr = 0xA070;
3361 val64 = 0x0;
3362 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3363
3364 flag = CHECKBIT(val64, 0x7);
3365 type = 1;
3366 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3367 &stat_info->xpak_stat.xpak_regs_stat,
3368 0x0, flag, type);
3369
3370 if(CHECKBIT(val64, 0x6))
3371 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3372
3373 flag = CHECKBIT(val64, 0x3);
3374 type = 2;
3375 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3376 &stat_info->xpak_stat.xpak_regs_stat,
3377 0x2, flag, type);
3378
3379 if(CHECKBIT(val64, 0x2))
3380 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3381
3382 flag = CHECKBIT(val64, 0x1);
3383 type = 3;
3384 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3385 &stat_info->xpak_stat.xpak_regs_stat,
3386 0x4, flag, type);
3387
3388 if(CHECKBIT(val64, 0x0))
3389 stat_info->xpak_stat.alarm_laser_output_power_low++;
3390
3391 /* Reading the Warning flags */
3392 addr = 0xA074;
3393 val64 = 0x0;
3394 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3395
3396 if(CHECKBIT(val64, 0x7))
3397 stat_info->xpak_stat.warn_transceiver_temp_high++;
3398
3399 if(CHECKBIT(val64, 0x6))
3400 stat_info->xpak_stat.warn_transceiver_temp_low++;
3401
3402 if(CHECKBIT(val64, 0x3))
3403 stat_info->xpak_stat.warn_laser_bias_current_high++;
3404
3405 if(CHECKBIT(val64, 0x2))
3406 stat_info->xpak_stat.warn_laser_bias_current_low++;
3407
3408 if(CHECKBIT(val64, 0x1))
3409 stat_info->xpak_stat.warn_laser_output_power_high++;
3410
3411 if(CHECKBIT(val64, 0x0))
3412 stat_info->xpak_stat.warn_laser_output_power_low++;
3413}
3414
20346722 3415/**
1da177e4 3416 * wait_for_cmd_complete - waits for a command to complete.
20346722 3417 * @sp : private member of the device structure, which is a pointer to the
1da177e4 3418 * s2io_nic structure.
20346722
K
3419 * Description: Function that waits for a command to Write into RMAC
3420 * ADDR DATA registers to be completed and returns either success or
3421 * error depending on whether the command was complete or not.
1da177e4
LT
3422 * Return value:
3423 * SUCCESS on success and FAILURE on failure.
3424 */
3425
9fc93a41
SS
3426static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3427 int bit_state)
1da177e4 3428{
9fc93a41 3429 int ret = FAILURE, cnt = 0, delay = 1;
1da177e4
LT
3430 u64 val64;
3431
9fc93a41
SS
3432 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3433 return FAILURE;
3434
3435 do {
c92ca04b 3436 val64 = readq(addr);
9fc93a41
SS
3437 if (bit_state == S2IO_BIT_RESET) {
3438 if (!(val64 & busy_bit)) {
3439 ret = SUCCESS;
3440 break;
3441 }
3442 } else {
3443 if (!(val64 & busy_bit)) {
3444 ret = SUCCESS;
3445 break;
3446 }
1da177e4 3447 }
c92ca04b
AR
3448
3449 if(in_interrupt())
9fc93a41 3450 mdelay(delay);
c92ca04b 3451 else
9fc93a41 3452 msleep(delay);
c92ca04b 3453
9fc93a41
SS
3454 if (++cnt >= 10)
3455 delay = 50;
3456 } while (cnt < 20);
1da177e4
LT
3457 return ret;
3458}
19a60522
SS
3459/*
3460 * check_pci_device_id - Checks if the device id is supported
3461 * @id : device id
3462 * Description: Function to check if the pci device id is supported by driver.
3463 * Return value: Actual device id if supported else PCI_ANY_ID
3464 */
3465static u16 check_pci_device_id(u16 id)
3466{
3467 switch (id) {
3468 case PCI_DEVICE_ID_HERC_WIN:
3469 case PCI_DEVICE_ID_HERC_UNI:
3470 return XFRAME_II_DEVICE;
3471 case PCI_DEVICE_ID_S2IO_UNI:
3472 case PCI_DEVICE_ID_S2IO_WIN:
3473 return XFRAME_I_DEVICE;
3474 default:
3475 return PCI_ANY_ID;
3476 }
3477}
1da177e4 3478
20346722
K
3479/**
3480 * s2io_reset - Resets the card.
1da177e4
LT
3481 * @sp : private member of the device structure.
3482 * Description: Function to Reset the card. This function then also
20346722 3483 * restores the previously saved PCI configuration space registers as
1da177e4
LT
3484 * the card reset also resets the configuration space.
3485 * Return value:
3486 * void.
3487 */
3488
1ee6dd77 3489static void s2io_reset(struct s2io_nic * sp)
1da177e4 3490{
1ee6dd77 3491 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 3492 u64 val64;
5e25b9dd 3493 u16 subid, pci_cmd;
19a60522
SS
3494 int i;
3495 u16 val16;
491976b2
SH
3496 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3497 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3498
19a60522
SS
3499 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3500 __FUNCTION__, sp->dev->name);
1da177e4 3501
0b1f7ebe 3502 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
e960fc5c 3503 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
0b1f7ebe 3504
1da177e4
LT
3505 val64 = SW_RESET_ALL;
3506 writeq(val64, &bar0->sw_reset);
c92ca04b
AR
3507 if (strstr(sp->product_name, "CX4")) {
3508 msleep(750);
3509 }
19a60522
SS
3510 msleep(250);
3511 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
1da177e4 3512
19a60522
SS
3513 /* Restore the PCI state saved during initialization. */
3514 pci_restore_state(sp->pdev);
3515 pci_read_config_word(sp->pdev, 0x2, &val16);
3516 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3517 break;
3518 msleep(200);
3519 }
1da177e4 3520
19a60522
SS
3521 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3522 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3523 }
3524
3525 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3526
3527 s2io_init_pci(sp);
1da177e4 3528
20346722
K
3529 /* Set swapper to enable I/O register access */
3530 s2io_set_swapper(sp);
3531
faa4f796
SH
3532 /* restore mac_addr entries */
3533 do_s2io_restore_unicast_mc(sp);
3534
cc6e7c44
RA
3535 /* Restore the MSIX table entries from local variables */
3536 restore_xmsi_data(sp);
3537
5e25b9dd 3538 /* Clear certain PCI/PCI-X fields after reset */
303bcb4b 3539 if (sp->device_type == XFRAME_II_DEVICE) {
b41477f3 3540 /* Clear "detected parity error" bit */
303bcb4b 3541 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
5e25b9dd 3542
303bcb4b
K
3543 /* Clearing PCIX Ecc status register */
3544 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
5e25b9dd 3545
303bcb4b 3546 /* Clearing PCI_STATUS error reflected here */
b7b5a128 3547 writeq(s2BIT(62), &bar0->txpic_int_reg);
303bcb4b 3548 }
5e25b9dd 3549
20346722
K
3550 /* Reset device statistics maintained by OS */
3551 memset(&sp->stats, 0, sizeof (struct net_device_stats));
8a4bdbaa 3552
491976b2
SH
3553 up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3554 down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3555 up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3556 down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
363dc367 3557 reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
491976b2
SH
3558 mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3559 mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3560 watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3561 /* save link up/down time/cnt, reset/memory/watchdog cnt */
363dc367 3562 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
491976b2
SH
3563 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3564 sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3565 sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3566 sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3567 sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
363dc367 3568 sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
491976b2
SH
3569 sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3570 sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3571 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
20346722 3572
1da177e4
LT
3573 /* SXE-002: Configure link and activity LED to turn it off */
3574 subid = sp->pdev->subsystem_device;
541ae68f
K
3575 if (((subid & 0xFF) >= 0x07) &&
3576 (sp->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
3577 val64 = readq(&bar0->gpio_control);
3578 val64 |= 0x0000800000000000ULL;
3579 writeq(val64, &bar0->gpio_control);
3580 val64 = 0x0411040400000000ULL;
509a2671 3581 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
3582 }
3583
541ae68f
K
3584 /*
3585 * Clear spurious ECC interrupts that would have occured on
3586 * XFRAME II cards after reset.
3587 */
3588 if (sp->device_type == XFRAME_II_DEVICE) {
3589 val64 = readq(&bar0->pcc_err_reg);
3590 writeq(val64, &bar0->pcc_err_reg);
3591 }
3592
1da177e4
LT
3593 sp->device_enabled_once = FALSE;
3594}
3595
3596/**
20346722
K
3597 * s2io_set_swapper - to set the swapper controle on the card
3598 * @sp : private member of the device structure,
1da177e4 3599 * pointer to the s2io_nic structure.
20346722 3600 * Description: Function to set the swapper control on the card
1da177e4
LT
3601 * correctly depending on the 'endianness' of the system.
3602 * Return value:
3603 * SUCCESS on success and FAILURE on failure.
3604 */
3605
1ee6dd77 3606static int s2io_set_swapper(struct s2io_nic * sp)
1da177e4
LT
3607{
3608 struct net_device *dev = sp->dev;
1ee6dd77 3609 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
3610 u64 val64, valt, valr;
3611
20346722 3612 /*
1da177e4
LT
3613 * Set proper endian settings and verify the same by reading
3614 * the PIF Feed-back register.
3615 */
3616
3617 val64 = readq(&bar0->pif_rd_swapper_fb);
3618 if (val64 != 0x0123456789ABCDEFULL) {
3619 int i = 0;
3620 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3621 0x8100008181000081ULL, /* FE=1, SE=0 */
3622 0x4200004242000042ULL, /* FE=0, SE=1 */
3623 0}; /* FE=0, SE=0 */
3624
3625 while(i<4) {
3626 writeq(value[i], &bar0->swapper_ctrl);
3627 val64 = readq(&bar0->pif_rd_swapper_fb);
3628 if (val64 == 0x0123456789ABCDEFULL)
3629 break;
3630 i++;
3631 }
3632 if (i == 4) {
3633 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3634 dev->name);
3635 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3636 (unsigned long long) val64);
3637 return FAILURE;
3638 }
3639 valr = value[i];
3640 } else {
3641 valr = readq(&bar0->swapper_ctrl);
3642 }
3643
3644 valt = 0x0123456789ABCDEFULL;
3645 writeq(valt, &bar0->xmsi_address);
3646 val64 = readq(&bar0->xmsi_address);
3647
3648 if(val64 != valt) {
3649 int i = 0;
3650 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3651 0x0081810000818100ULL, /* FE=1, SE=0 */
3652 0x0042420000424200ULL, /* FE=0, SE=1 */
3653 0}; /* FE=0, SE=0 */
3654
3655 while(i<4) {
3656 writeq((value[i] | valr), &bar0->swapper_ctrl);
3657 writeq(valt, &bar0->xmsi_address);
3658 val64 = readq(&bar0->xmsi_address);
3659 if(val64 == valt)
3660 break;
3661 i++;
3662 }
3663 if(i == 4) {
20346722 3664 unsigned long long x = val64;
1da177e4 3665 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
20346722 3666 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
1da177e4
LT
3667 return FAILURE;
3668 }
3669 }
3670 val64 = readq(&bar0->swapper_ctrl);
3671 val64 &= 0xFFFF000000000000ULL;
3672
3673#ifdef __BIG_ENDIAN
20346722
K
3674 /*
3675 * The device by default set to a big endian format, so a
1da177e4
LT
3676 * big endian driver need not set anything.
3677 */
3678 val64 |= (SWAPPER_CTRL_TXP_FE |
3679 SWAPPER_CTRL_TXP_SE |
3680 SWAPPER_CTRL_TXD_R_FE |
3681 SWAPPER_CTRL_TXD_W_FE |
3682 SWAPPER_CTRL_TXF_R_FE |
3683 SWAPPER_CTRL_RXD_R_FE |
3684 SWAPPER_CTRL_RXD_W_FE |
3685 SWAPPER_CTRL_RXF_W_FE |
3686 SWAPPER_CTRL_XMSI_FE |
1da177e4 3687 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
eaae7f72 3688 if (sp->config.intr_type == INTA)
cc6e7c44 3689 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3690 writeq(val64, &bar0->swapper_ctrl);
3691#else
20346722 3692 /*
1da177e4 3693 * Initially we enable all bits to make it accessible by the
20346722 3694 * driver, then we selectively enable only those bits that
1da177e4
LT
3695 * we want to set.
3696 */
3697 val64 |= (SWAPPER_CTRL_TXP_FE |
3698 SWAPPER_CTRL_TXP_SE |
3699 SWAPPER_CTRL_TXD_R_FE |
3700 SWAPPER_CTRL_TXD_R_SE |
3701 SWAPPER_CTRL_TXD_W_FE |
3702 SWAPPER_CTRL_TXD_W_SE |
3703 SWAPPER_CTRL_TXF_R_FE |
3704 SWAPPER_CTRL_RXD_R_FE |
3705 SWAPPER_CTRL_RXD_R_SE |
3706 SWAPPER_CTRL_RXD_W_FE |
3707 SWAPPER_CTRL_RXD_W_SE |
3708 SWAPPER_CTRL_RXF_W_FE |
3709 SWAPPER_CTRL_XMSI_FE |
1da177e4 3710 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
eaae7f72 3711 if (sp->config.intr_type == INTA)
cc6e7c44 3712 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3713 writeq(val64, &bar0->swapper_ctrl);
3714#endif
3715 val64 = readq(&bar0->swapper_ctrl);
3716
20346722
K
3717 /*
3718 * Verifying if endian settings are accurate by reading a
1da177e4
LT
3719 * feedback register.
3720 */
3721 val64 = readq(&bar0->pif_rd_swapper_fb);
3722 if (val64 != 0x0123456789ABCDEFULL) {
3723 /* Endian settings are incorrect, calls for another dekko. */
3724 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3725 dev->name);
3726 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3727 (unsigned long long) val64);
3728 return FAILURE;
3729 }
3730
3731 return SUCCESS;
3732}
3733
1ee6dd77 3734static int wait_for_msix_trans(struct s2io_nic *nic, int i)
cc6e7c44 3735{
1ee6dd77 3736 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3737 u64 val64;
3738 int ret = 0, cnt = 0;
3739
3740 do {
3741 val64 = readq(&bar0->xmsi_access);
b7b5a128 3742 if (!(val64 & s2BIT(15)))
cc6e7c44
RA
3743 break;
3744 mdelay(1);
3745 cnt++;
3746 } while(cnt < 5);
3747 if (cnt == 5) {
3748 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3749 ret = 1;
3750 }
3751
3752 return ret;
3753}
3754
1ee6dd77 3755static void restore_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3756{
1ee6dd77 3757 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44 3758 u64 val64;
f61e0a35
SH
3759 int i, msix_index;
3760
3761
3762 if (nic->device_type == XFRAME_I_DEVICE)
3763 return;
cc6e7c44 3764
75c30b13 3765 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
f61e0a35 3766 msix_index = (i) ? ((i-1) * 8 + 1): 0;
cc6e7c44
RA
3767 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3768 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
f61e0a35 3769 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
cc6e7c44 3770 writeq(val64, &bar0->xmsi_access);
f61e0a35 3771 if (wait_for_msix_trans(nic, msix_index)) {
cc6e7c44
RA
3772 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3773 continue;
3774 }
3775 }
3776}
3777
1ee6dd77 3778static void store_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3779{
1ee6dd77 3780 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44 3781 u64 val64, addr, data;
f61e0a35
SH
3782 int i, msix_index;
3783
3784 if (nic->device_type == XFRAME_I_DEVICE)
3785 return;
cc6e7c44
RA
3786
3787 /* Store and display */
75c30b13 3788 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
f61e0a35
SH
3789 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3790 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
cc6e7c44 3791 writeq(val64, &bar0->xmsi_access);
f61e0a35 3792 if (wait_for_msix_trans(nic, msix_index)) {
cc6e7c44
RA
3793 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3794 continue;
3795 }
3796 addr = readq(&bar0->xmsi_address);
3797 data = readq(&bar0->xmsi_data);
3798 if (addr && data) {
3799 nic->msix_info[i].addr = addr;
3800 nic->msix_info[i].data = data;
3801 }
3802 }
3803}
3804
1ee6dd77 3805static int s2io_enable_msi_x(struct s2io_nic *nic)
cc6e7c44 3806{
1ee6dd77 3807 struct XENA_dev_config __iomem *bar0 = nic->bar0;
ac731ab6 3808 u64 rx_mat;
cc6e7c44
RA
3809 u16 msi_control; /* Temp variable */
3810 int ret, i, j, msix_indx = 1;
3811
f61e0a35 3812 nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry),
cc6e7c44 3813 GFP_KERNEL);
bd684e43 3814 if (!nic->entries) {
491976b2
SH
3815 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3816 __FUNCTION__);
c53d4945 3817 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
cc6e7c44
RA
3818 return -ENOMEM;
3819 }
8a4bdbaa 3820 nic->mac_control.stats_info->sw_stat.mem_allocated
f61e0a35
SH
3821 += (nic->num_entries * sizeof(struct msix_entry));
3822
3823 memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry));
cc6e7c44
RA
3824
3825 nic->s2io_entries =
f61e0a35 3826 kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry),
cc6e7c44 3827 GFP_KERNEL);
bd684e43 3828 if (!nic->s2io_entries) {
8a4bdbaa 3829 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
491976b2 3830 __FUNCTION__);
c53d4945 3831 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
cc6e7c44 3832 kfree(nic->entries);
8a4bdbaa 3833 nic->mac_control.stats_info->sw_stat.mem_freed
f61e0a35 3834 += (nic->num_entries * sizeof(struct msix_entry));
cc6e7c44
RA
3835 return -ENOMEM;
3836 }
8a4bdbaa 3837 nic->mac_control.stats_info->sw_stat.mem_allocated
f61e0a35
SH
3838 += (nic->num_entries * sizeof(struct s2io_msix_entry));
3839 memset(nic->s2io_entries, 0,
3840 nic->num_entries * sizeof(struct s2io_msix_entry));
cc6e7c44 3841
ac731ab6
SH
3842 nic->entries[0].entry = 0;
3843 nic->s2io_entries[0].entry = 0;
3844 nic->s2io_entries[0].in_use = MSIX_FLG;
3845 nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3846 nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3847
f61e0a35
SH
3848 for (i = 1; i < nic->num_entries; i++) {
3849 nic->entries[i].entry = ((i - 1) * 8) + 1;
3850 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
cc6e7c44
RA
3851 nic->s2io_entries[i].arg = NULL;
3852 nic->s2io_entries[i].in_use = 0;
3853 }
3854
8a4bdbaa 3855 rx_mat = readq(&bar0->rx_mat);
f61e0a35 3856 for (j = 0; j < nic->config.rx_ring_num; j++) {
8a4bdbaa 3857 rx_mat |= RX_MAT_SET(j, msix_indx);
f61e0a35
SH
3858 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3859 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3860 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3861 msix_indx += 8;
cc6e7c44 3862 }
8a4bdbaa 3863 writeq(rx_mat, &bar0->rx_mat);
f61e0a35 3864 readq(&bar0->rx_mat);
cc6e7c44 3865
f61e0a35 3866 ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
c92ca04b 3867 /* We fail init if error or we get less vectors than min required */
cc6e7c44
RA
3868 if (ret) {
3869 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3870 kfree(nic->entries);
8a4bdbaa 3871 nic->mac_control.stats_info->sw_stat.mem_freed
f61e0a35 3872 += (nic->num_entries * sizeof(struct msix_entry));
cc6e7c44 3873 kfree(nic->s2io_entries);
8a4bdbaa 3874 nic->mac_control.stats_info->sw_stat.mem_freed
f61e0a35 3875 += (nic->num_entries * sizeof(struct s2io_msix_entry));
cc6e7c44
RA
3876 nic->entries = NULL;
3877 nic->s2io_entries = NULL;
3878 return -ENOMEM;
3879 }
3880
3881 /*
3882 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3883 * in the herc NIC. (Temp change, needs to be removed later)
3884 */
3885 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3886 msi_control |= 0x1; /* Enable MSI */
3887 pci_write_config_word(nic->pdev, 0x42, msi_control);
3888
3889 return 0;
3890}
3891
8abc4d5b 3892/* Handle software interrupt used during MSI(X) test */
33390a70 3893static irqreturn_t s2io_test_intr(int irq, void *dev_id)
8abc4d5b
SS
3894{
3895 struct s2io_nic *sp = dev_id;
3896
3897 sp->msi_detected = 1;
3898 wake_up(&sp->msi_wait);
3899
3900 return IRQ_HANDLED;
3901}
3902
3903/* Test interrupt path by forcing a a software IRQ */
33390a70 3904static int s2io_test_msi(struct s2io_nic *sp)
8abc4d5b
SS
3905{
3906 struct pci_dev *pdev = sp->pdev;
3907 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3908 int err;
3909 u64 val64, saved64;
3910
3911 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3912 sp->name, sp);
3913 if (err) {
3914 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3915 sp->dev->name, pci_name(pdev), pdev->irq);
3916 return err;
3917 }
3918
3919 init_waitqueue_head (&sp->msi_wait);
3920 sp->msi_detected = 0;
3921
3922 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3923 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3924 val64 |= SCHED_INT_CTRL_TIMER_EN;
3925 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3926 writeq(val64, &bar0->scheduled_int_ctrl);
3927
3928 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3929
3930 if (!sp->msi_detected) {
3931 /* MSI(X) test failed, go back to INTx mode */
2450022a 3932 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
8abc4d5b
SS
3933 "using MSI(X) during test\n", sp->dev->name,
3934 pci_name(pdev));
3935
3936 err = -EOPNOTSUPP;
3937 }
3938
3939 free_irq(sp->entries[1].vector, sp);
3940
3941 writeq(saved64, &bar0->scheduled_int_ctrl);
3942
3943 return err;
3944}
18b2b7bd
SH
3945
3946static void remove_msix_isr(struct s2io_nic *sp)
3947{
3948 int i;
3949 u16 msi_control;
3950
f61e0a35 3951 for (i = 0; i < sp->num_entries; i++) {
18b2b7bd
SH
3952 if (sp->s2io_entries[i].in_use ==
3953 MSIX_REGISTERED_SUCCESS) {
3954 int vector = sp->entries[i].vector;
3955 void *arg = sp->s2io_entries[i].arg;
3956 free_irq(vector, arg);
3957 }
3958 }
3959
3960 kfree(sp->entries);
3961 kfree(sp->s2io_entries);
3962 sp->entries = NULL;
3963 sp->s2io_entries = NULL;
3964
3965 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3966 msi_control &= 0xFFFE; /* Disable MSI */
3967 pci_write_config_word(sp->pdev, 0x42, msi_control);
3968
3969 pci_disable_msix(sp->pdev);
3970}
3971
3972static void remove_inta_isr(struct s2io_nic *sp)
3973{
3974 struct net_device *dev = sp->dev;
3975
3976 free_irq(sp->pdev->irq, dev);
3977}
3978
1da177e4
LT
3979/* ********************************************************* *
3980 * Functions defined below concern the OS part of the driver *
3981 * ********************************************************* */
3982
20346722 3983/**
1da177e4
LT
3984 * s2io_open - open entry point of the driver
3985 * @dev : pointer to the device structure.
3986 * Description:
3987 * This function is the open entry point of the driver. It mainly calls a
3988 * function to allocate Rx buffers and inserts them into the buffer
20346722 3989 * descriptors and then enables the Rx part of the NIC.
1da177e4
LT
3990 * Return value:
3991 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3992 * file on failure.
3993 */
3994
ac1f60db 3995static int s2io_open(struct net_device *dev)
1da177e4 3996{
1ee6dd77 3997 struct s2io_nic *sp = dev->priv;
1da177e4
LT
3998 int err = 0;
3999
20346722
K
4000 /*
4001 * Make sure you have link off by default every time
1da177e4
LT
4002 * Nic is initialized
4003 */
4004 netif_carrier_off(dev);
0b1f7ebe 4005 sp->last_link_state = 0;
1da177e4
LT
4006
4007 /* Initialize H/W and enable interrupts */
c92ca04b
AR
4008 err = s2io_card_up(sp);
4009 if (err) {
1da177e4
LT
4010 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4011 dev->name);
e6a8fee2 4012 goto hw_init_failed;
1da177e4
LT
4013 }
4014
2fd37688 4015 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
1da177e4 4016 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
e6a8fee2 4017 s2io_card_down(sp);
20346722 4018 err = -ENODEV;
e6a8fee2 4019 goto hw_init_failed;
1da177e4 4020 }
3a3d5756 4021 s2io_start_all_tx_queue(sp);
1da177e4 4022 return 0;
20346722 4023
20346722 4024hw_init_failed:
eaae7f72 4025 if (sp->config.intr_type == MSI_X) {
491976b2 4026 if (sp->entries) {
cc6e7c44 4027 kfree(sp->entries);
8a4bdbaa 4028 sp->mac_control.stats_info->sw_stat.mem_freed
f61e0a35 4029 += (sp->num_entries * sizeof(struct msix_entry));
491976b2
SH
4030 }
4031 if (sp->s2io_entries) {
cc6e7c44 4032 kfree(sp->s2io_entries);
8a4bdbaa 4033 sp->mac_control.stats_info->sw_stat.mem_freed
f61e0a35 4034 += (sp->num_entries * sizeof(struct s2io_msix_entry));
491976b2 4035 }
cc6e7c44 4036 }
20346722 4037 return err;
1da177e4
LT
4038}
4039
4040/**
4041 * s2io_close -close entry point of the driver
4042 * @dev : device pointer.
4043 * Description:
4044 * This is the stop entry point of the driver. It needs to undo exactly
4045 * whatever was done by the open entry point,thus it's usually referred to
4046 * as the close function.Among other things this function mainly stops the
4047 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4048 * Return value:
4049 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4050 * file on failure.
4051 */
4052
ac1f60db 4053static int s2io_close(struct net_device *dev)
1da177e4 4054{
1ee6dd77 4055 struct s2io_nic *sp = dev->priv;
faa4f796
SH
4056 struct config_param *config = &sp->config;
4057 u64 tmp64;
4058 int offset;
cc6e7c44 4059
9f74ffde
SH
4060 /* Return if the device is already closed *
4061 * Can happen when s2io_card_up failed in change_mtu *
4062 */
4063 if (!is_s2io_card_up(sp))
4064 return 0;
4065
3a3d5756 4066 s2io_stop_all_tx_queue(sp);
faa4f796
SH
4067 /* delete all populated mac entries */
4068 for (offset = 1; offset < config->max_mc_addr; offset++) {
4069 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4070 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4071 do_s2io_delete_unicast_mc(sp, tmp64);
4072 }
4073
e6a8fee2 4074 s2io_card_down(sp);
cc6e7c44 4075
1da177e4
LT
4076 return 0;
4077}
4078
4079/**
4080 * s2io_xmit - Tx entry point of te driver
4081 * @skb : the socket buffer containing the Tx data.
4082 * @dev : device pointer.
4083 * Description :
4084 * This function is the Tx entry point of the driver. S2IO NIC supports
4085 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
4086 * NOTE: when device cant queue the pkt,just the trans_start variable will
4087 * not be upadted.
4088 * Return value:
4089 * 0 on success & 1 on failure.
4090 */
4091
ac1f60db 4092static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 4093{
1ee6dd77 4094 struct s2io_nic *sp = dev->priv;
1da177e4
LT
4095 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4096 register u64 val64;
1ee6dd77
RB
4097 struct TxD *txdp;
4098 struct TxFIFO_element __iomem *tx_fifo;
2fda096d 4099 unsigned long flags = 0;
be3a6b02 4100 u16 vlan_tag = 0;
2fda096d 4101 struct fifo_info *fifo = NULL;
1ee6dd77 4102 struct mac_info *mac_control;
1da177e4 4103 struct config_param *config;
6cfc482b 4104 int do_spin_lock = 1;
75c30b13 4105 int offload_type;
6cfc482b 4106 int enable_per_list_interrupt = 0;
491abf25 4107 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
1da177e4
LT
4108
4109 mac_control = &sp->mac_control;
4110 config = &sp->config;
4111
20346722 4112 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
491976b2
SH
4113
4114 if (unlikely(skb->len <= 0)) {
4115 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4116 dev_kfree_skb_any(skb);
4117 return 0;
2fda096d 4118 }
491976b2 4119
92b84437 4120 if (!is_s2io_card_up(sp)) {
20346722 4121 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
1da177e4 4122 dev->name);
20346722
K
4123 dev_kfree_skb(skb);
4124 return 0;
1da177e4
LT
4125 }
4126
4127 queue = 0;
3a3d5756 4128 if (sp->vlgrp && vlan_tx_tag_present(skb))
be3a6b02 4129 vlan_tag = vlan_tx_tag_get(skb);
6cfc482b
SH
4130 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4131 if (skb->protocol == htons(ETH_P_IP)) {
4132 struct iphdr *ip;
4133 struct tcphdr *th;
4134 ip = ip_hdr(skb);
4135
4136 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4137 th = (struct tcphdr *)(((unsigned char *)ip) +
4138 ip->ihl*4);
4139
4140 if (ip->protocol == IPPROTO_TCP) {
4141 queue_len = sp->total_tcp_fifos;
4142 queue = (ntohs(th->source) +
4143 ntohs(th->dest)) &
4144 sp->fifo_selector[queue_len - 1];
4145 if (queue >= queue_len)
4146 queue = queue_len - 1;
4147 } else if (ip->protocol == IPPROTO_UDP) {
4148 queue_len = sp->total_udp_fifos;
4149 queue = (ntohs(th->source) +
4150 ntohs(th->dest)) &
4151 sp->fifo_selector[queue_len - 1];
4152 if (queue >= queue_len)
4153 queue = queue_len - 1;
4154 queue += sp->udp_fifo_idx;
4155 if (skb->len > 1024)
4156 enable_per_list_interrupt = 1;
4157 do_spin_lock = 0;
4158 }
4159 }
4160 }
4161 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4162 /* get fifo number based on skb->priority value */
4163 queue = config->fifo_mapping
4164 [skb->priority & (MAX_TX_FIFOS - 1)];
4165 fifo = &mac_control->fifos[queue];
3a3d5756 4166
6cfc482b
SH
4167 if (do_spin_lock)
4168 spin_lock_irqsave(&fifo->tx_lock, flags);
4169 else {
4170 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4171 return NETDEV_TX_LOCKED;
4172 }
be3a6b02 4173
3a3d5756
SH
4174 if (sp->config.multiq) {
4175 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4176 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4177 return NETDEV_TX_BUSY;
4178 }
b19fa1fa 4179 } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
3a3d5756
SH
4180 if (netif_queue_stopped(dev)) {
4181 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4182 return NETDEV_TX_BUSY;
4183 }
4184 }
4185
2fda096d
SR
4186 put_off = (u16) fifo->tx_curr_put_info.offset;
4187 get_off = (u16) fifo->tx_curr_get_info.offset;
4188 txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr;
20346722 4189
2fda096d 4190 queue_len = fifo->tx_curr_put_info.fifo_len + 1;
1da177e4 4191 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9
AR
4192 if (txdp->Host_Control ||
4193 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
776bd20f 4194 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3a3d5756 4195 s2io_stop_tx_queue(sp, fifo->fifo_no);
1da177e4 4196 dev_kfree_skb(skb);
2fda096d 4197 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1da177e4
LT
4198 return 0;
4199 }
0b1f7ebe 4200
75c30b13 4201 offload_type = s2io_offload_type(skb);
75c30b13 4202 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1da177e4 4203 txdp->Control_1 |= TXD_TCP_LSO_EN;
75c30b13 4204 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
1da177e4 4205 }
84fa7933 4206 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1da177e4
LT
4207 txdp->Control_2 |=
4208 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4209 TXD_TX_CKO_UDP_EN);
4210 }
fed5eccd
AR
4211 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4212 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2fda096d 4213 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
6cfc482b
SH
4214 if (enable_per_list_interrupt)
4215 if (put_off & (queue_len >> 5))
4216 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
3a3d5756 4217 if (vlan_tag) {
be3a6b02
K
4218 txdp->Control_2 |= TXD_VLAN_ENABLE;
4219 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4220 }
4221
fed5eccd 4222 frg_len = skb->len - skb->data_len;
75c30b13 4223 if (offload_type == SKB_GSO_UDP) {
fed5eccd
AR
4224 int ufo_size;
4225
75c30b13 4226 ufo_size = s2io_udp_mss(skb);
fed5eccd
AR
4227 ufo_size &= ~7;
4228 txdp->Control_1 |= TXD_UFO_EN;
4229 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4230 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4231#ifdef __BIG_ENDIAN
3459feb8 4232 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
2fda096d 4233 fifo->ufo_in_band_v[put_off] =
3459feb8 4234 (__force u64)skb_shinfo(skb)->ip6_frag_id;
fed5eccd 4235#else
2fda096d 4236 fifo->ufo_in_band_v[put_off] =
3459feb8 4237 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
fed5eccd 4238#endif
2fda096d 4239 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
fed5eccd 4240 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
2fda096d 4241 fifo->ufo_in_band_v,
fed5eccd 4242 sizeof(u64), PCI_DMA_TODEVICE);
64c42f69 4243 if (pci_dma_mapping_error(txdp->Buffer_Pointer))
491abf25 4244 goto pci_map_failed;
fed5eccd 4245 txdp++;
fed5eccd 4246 }
1da177e4 4247
fed5eccd
AR
4248 txdp->Buffer_Pointer = pci_map_single
4249 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
64c42f69 4250 if (pci_dma_mapping_error(txdp->Buffer_Pointer))
491abf25
VP
4251 goto pci_map_failed;
4252
fed5eccd
AR
4253 txdp->Host_Control = (unsigned long) skb;
4254 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
75c30b13 4255 if (offload_type == SKB_GSO_UDP)
fed5eccd
AR
4256 txdp->Control_1 |= TXD_UFO_EN;
4257
4258 frg_cnt = skb_shinfo(skb)->nr_frags;
1da177e4
LT
4259 /* For fragmented SKB. */
4260 for (i = 0; i < frg_cnt; i++) {
4261 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0b1f7ebe
K
4262 /* A '0' length fragment will be ignored */
4263 if (!frag->size)
4264 continue;
1da177e4
LT
4265 txdp++;
4266 txdp->Buffer_Pointer = (u64) pci_map_page
4267 (sp->pdev, frag->page, frag->page_offset,
4268 frag->size, PCI_DMA_TODEVICE);
efd51b5c 4269 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
75c30b13 4270 if (offload_type == SKB_GSO_UDP)
fed5eccd 4271 txdp->Control_1 |= TXD_UFO_EN;
1da177e4
LT
4272 }
4273 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4274
75c30b13 4275 if (offload_type == SKB_GSO_UDP)
fed5eccd
AR
4276 frg_cnt++; /* as Txd0 was used for inband header */
4277
1da177e4 4278 tx_fifo = mac_control->tx_FIFO_start[queue];
2fda096d 4279 val64 = fifo->list_info[put_off].list_phy_addr;
1da177e4
LT
4280 writeq(val64, &tx_fifo->TxDL_Pointer);
4281
4282 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4283 TX_FIFO_LAST_LIST);
75c30b13 4284 if (offload_type)
fed5eccd 4285 val64 |= TX_FIFO_SPECIAL_FUNC;
75c30b13 4286
1da177e4
LT
4287 writeq(val64, &tx_fifo->List_Control);
4288
303bcb4b
K
4289 mmiowb();
4290
1da177e4 4291 put_off++;
2fda096d 4292 if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
863c11a9 4293 put_off = 0;
2fda096d 4294 fifo->tx_curr_put_info.offset = put_off;
1da177e4
LT
4295
4296 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9 4297 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
bd1034f0 4298 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
1da177e4
LT
4299 DBG_PRINT(TX_DBG,
4300 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4301 put_off, get_off);
3a3d5756 4302 s2io_stop_tx_queue(sp, fifo->fifo_no);
1da177e4 4303 }
491976b2 4304 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
1da177e4 4305 dev->trans_start = jiffies;
2fda096d 4306 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1da177e4 4307
f6f4bfa3
SH
4308 if (sp->config.intr_type == MSI_X)
4309 tx_intr_handler(fifo);
4310
491abf25
VP
4311 return 0;
4312pci_map_failed:
4313 stats->pci_map_fail_cnt++;
3a3d5756 4314 s2io_stop_tx_queue(sp, fifo->fifo_no);
491abf25
VP
4315 stats->mem_freed += skb->truesize;
4316 dev_kfree_skb(skb);
2fda096d 4317 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1da177e4
LT
4318 return 0;
4319}
4320
25fff88e
K
4321static void
4322s2io_alarm_handle(unsigned long data)
4323{
1ee6dd77 4324 struct s2io_nic *sp = (struct s2io_nic *)data;
8116f3cf 4325 struct net_device *dev = sp->dev;
25fff88e 4326
8116f3cf 4327 s2io_handle_errors(dev);
25fff88e
K
4328 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4329}
4330
7d12e780 4331static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
cc6e7c44 4332{
1ee6dd77
RB
4333 struct ring_info *ring = (struct ring_info *)dev_id;
4334 struct s2io_nic *sp = ring->nic;
f61e0a35
SH
4335 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4336 struct net_device *dev = sp->dev;
cc6e7c44 4337
f61e0a35 4338 if (unlikely(!is_s2io_card_up(sp)))
92b84437 4339 return IRQ_HANDLED;
92b84437 4340
f61e0a35 4341 if (sp->config.napi) {
1a79d1c3
AV
4342 u8 __iomem *addr = NULL;
4343 u8 val8 = 0;
f61e0a35 4344
1a79d1c3 4345 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
f61e0a35
SH
4346 addr += (7 - ring->ring_no);
4347 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4348 writeb(val8, addr);
4349 val8 = readb(addr);
4350 netif_rx_schedule(dev, &ring->napi);
4351 } else {
4352 rx_intr_handler(ring, 0);
4353 s2io_chk_rx_buffers(ring);
4354 }
7d3d0439 4355
cc6e7c44
RA
4356 return IRQ_HANDLED;
4357}
4358
7d12e780 4359static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
cc6e7c44 4360{
ac731ab6
SH
4361 int i;
4362 struct fifo_info *fifos = (struct fifo_info *)dev_id;
4363 struct s2io_nic *sp = fifos->nic;
4364 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4365 struct config_param *config = &sp->config;
4366 u64 reason;
cc6e7c44 4367
ac731ab6
SH
4368 if (unlikely(!is_s2io_card_up(sp)))
4369 return IRQ_NONE;
4370
4371 reason = readq(&bar0->general_int_status);
4372 if (unlikely(reason == S2IO_MINUS_ONE))
4373 /* Nothing much can be done. Get out */
92b84437 4374 return IRQ_HANDLED;
92b84437 4375
ac731ab6
SH
4376 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4377
4378 if (reason & GEN_INTR_TXTRAFFIC)
4379 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4380
4381 for (i = 0; i < config->tx_fifo_num; i++)
4382 tx_intr_handler(&fifos[i]);
4383
4384 writeq(sp->general_int_mask, &bar0->general_int_mask);
4385 readl(&bar0->general_int_status);
4386
cc6e7c44
RA
4387 return IRQ_HANDLED;
4388}
ac731ab6 4389
1ee6dd77 4390static void s2io_txpic_intr_handle(struct s2io_nic *sp)
a371a07d 4391{
1ee6dd77 4392 struct XENA_dev_config __iomem *bar0 = sp->bar0;
a371a07d
K
4393 u64 val64;
4394
4395 val64 = readq(&bar0->pic_int_status);
4396 if (val64 & PIC_INT_GPIO) {
4397 val64 = readq(&bar0->gpio_int_reg);
4398 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4399 (val64 & GPIO_INT_REG_LINK_UP)) {
c92ca04b
AR
4400 /*
4401 * This is unstable state so clear both up/down
4402 * interrupt and adapter to re-evaluate the link state.
4403 */
a371a07d
K
4404 val64 |= GPIO_INT_REG_LINK_DOWN;
4405 val64 |= GPIO_INT_REG_LINK_UP;
4406 writeq(val64, &bar0->gpio_int_reg);
a371a07d 4407 val64 = readq(&bar0->gpio_int_mask);
c92ca04b
AR
4408 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4409 GPIO_INT_MASK_LINK_DOWN);
a371a07d 4410 writeq(val64, &bar0->gpio_int_mask);
a371a07d 4411 }
c92ca04b
AR
4412 else if (val64 & GPIO_INT_REG_LINK_UP) {
4413 val64 = readq(&bar0->adapter_status);
c92ca04b 4414 /* Enable Adapter */
19a60522
SS
4415 val64 = readq(&bar0->adapter_control);
4416 val64 |= ADAPTER_CNTL_EN;
4417 writeq(val64, &bar0->adapter_control);
4418 val64 |= ADAPTER_LED_ON;
4419 writeq(val64, &bar0->adapter_control);
4420 if (!sp->device_enabled_once)
4421 sp->device_enabled_once = 1;
c92ca04b 4422
19a60522
SS
4423 s2io_link(sp, LINK_UP);
4424 /*
4425 * unmask link down interrupt and mask link-up
4426 * intr
4427 */
4428 val64 = readq(&bar0->gpio_int_mask);
4429 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4430 val64 |= GPIO_INT_MASK_LINK_UP;
4431 writeq(val64, &bar0->gpio_int_mask);
c92ca04b 4432
c92ca04b
AR
4433 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4434 val64 = readq(&bar0->adapter_status);
19a60522
SS
4435 s2io_link(sp, LINK_DOWN);
4436 /* Link is down so unmaks link up interrupt */
4437 val64 = readq(&bar0->gpio_int_mask);
4438 val64 &= ~GPIO_INT_MASK_LINK_UP;
4439 val64 |= GPIO_INT_MASK_LINK_DOWN;
4440 writeq(val64, &bar0->gpio_int_mask);
ac1f90d6
SS
4441
4442 /* turn off LED */
4443 val64 = readq(&bar0->adapter_control);
4444 val64 = val64 &(~ADAPTER_LED_ON);
4445 writeq(val64, &bar0->adapter_control);
a371a07d
K
4446 }
4447 }
c92ca04b 4448 val64 = readq(&bar0->gpio_int_mask);
a371a07d
K
4449}
4450
8116f3cf
SS
4451/**
4452 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4453 * @value: alarm bits
4454 * @addr: address value
4455 * @cnt: counter variable
4456 * Description: Check for alarm and increment the counter
4457 * Return Value:
4458 * 1 - if alarm bit set
4459 * 0 - if alarm bit is not set
4460 */
43b7c451 4461static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
8116f3cf
SS
4462 unsigned long long *cnt)
4463{
4464 u64 val64;
4465 val64 = readq(addr);
4466 if ( val64 & value ) {
4467 writeq(val64, addr);
4468 (*cnt)++;
4469 return 1;
4470 }
4471 return 0;
4472
4473}
4474
4475/**
4476 * s2io_handle_errors - Xframe error indication handler
4477 * @nic: device private variable
4478 * Description: Handle alarms such as loss of link, single or
4479 * double ECC errors, critical and serious errors.
4480 * Return Value:
4481 * NONE
4482 */
4483static void s2io_handle_errors(void * dev_id)
4484{
4485 struct net_device *dev = (struct net_device *) dev_id;
4486 struct s2io_nic *sp = dev->priv;
4487 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4488 u64 temp64 = 0,val64=0;
4489 int i = 0;
4490
4491 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4492 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4493
92b84437 4494 if (!is_s2io_card_up(sp))
8116f3cf
SS
4495 return;
4496
4497 if (pci_channel_offline(sp->pdev))
4498 return;
4499
4500 memset(&sw_stat->ring_full_cnt, 0,
4501 sizeof(sw_stat->ring_full_cnt));
4502
4503 /* Handling the XPAK counters update */
4504 if(stats->xpak_timer_count < 72000) {
4505 /* waiting for an hour */
4506 stats->xpak_timer_count++;
4507 } else {
4508 s2io_updt_xpak_counter(dev);
4509 /* reset the count to zero */
4510 stats->xpak_timer_count = 0;
4511 }
4512
4513 /* Handling link status change error Intr */
4514 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4515 val64 = readq(&bar0->mac_rmac_err_reg);
4516 writeq(val64, &bar0->mac_rmac_err_reg);
4517 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4518 schedule_work(&sp->set_link_task);
4519 }
4520
4521 /* In case of a serious error, the device will be Reset. */
4522 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4523 &sw_stat->serious_err_cnt))
4524 goto reset;
4525
4526 /* Check for data parity error */
4527 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4528 &sw_stat->parity_err_cnt))
4529 goto reset;
4530
4531 /* Check for ring full counter */
4532 if (sp->device_type == XFRAME_II_DEVICE) {
4533 val64 = readq(&bar0->ring_bump_counter1);
4534 for (i=0; i<4; i++) {
4535 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4536 temp64 >>= 64 - ((i+1)*16);
4537 sw_stat->ring_full_cnt[i] += temp64;
4538 }
4539
4540 val64 = readq(&bar0->ring_bump_counter2);
4541 for (i=0; i<4; i++) {
4542 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4543 temp64 >>= 64 - ((i+1)*16);
4544 sw_stat->ring_full_cnt[i+4] += temp64;
4545 }
4546 }
4547
4548 val64 = readq(&bar0->txdma_int_status);
4549 /*check for pfc_err*/
4550 if (val64 & TXDMA_PFC_INT) {
4551 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4552 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4553 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4554 &sw_stat->pfc_err_cnt))
4555 goto reset;
4556 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4557 &sw_stat->pfc_err_cnt);
4558 }
4559
4560 /*check for tda_err*/
4561 if (val64 & TXDMA_TDA_INT) {
4562 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4563 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4564 &sw_stat->tda_err_cnt))
4565 goto reset;
4566 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4567 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4568 }
4569 /*check for pcc_err*/
4570 if (val64 & TXDMA_PCC_INT) {
4571 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4572 | PCC_N_SERR | PCC_6_COF_OV_ERR
4573 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4574 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4575 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4576 &sw_stat->pcc_err_cnt))
4577 goto reset;
4578 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4579 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4580 }
4581
4582 /*check for tti_err*/
4583 if (val64 & TXDMA_TTI_INT) {
4584 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4585 &sw_stat->tti_err_cnt))
4586 goto reset;
4587 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4588 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4589 }
4590
4591 /*check for lso_err*/
4592 if (val64 & TXDMA_LSO_INT) {
4593 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4594 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4595 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4596 goto reset;
4597 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4598 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4599 }
4600
4601 /*check for tpa_err*/
4602 if (val64 & TXDMA_TPA_INT) {
4603 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4604 &sw_stat->tpa_err_cnt))
4605 goto reset;
4606 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4607 &sw_stat->tpa_err_cnt);
4608 }
4609
4610 /*check for sm_err*/
4611 if (val64 & TXDMA_SM_INT) {
4612 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4613 &sw_stat->sm_err_cnt))
4614 goto reset;
4615 }
4616
4617 val64 = readq(&bar0->mac_int_status);
4618 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4619 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4620 &bar0->mac_tmac_err_reg,
4621 &sw_stat->mac_tmac_err_cnt))
4622 goto reset;
4623 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4624 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4625 &bar0->mac_tmac_err_reg,
4626 &sw_stat->mac_tmac_err_cnt);
4627 }
4628
4629 val64 = readq(&bar0->xgxs_int_status);
4630 if (val64 & XGXS_INT_STATUS_TXGXS) {
4631 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4632 &bar0->xgxs_txgxs_err_reg,
4633 &sw_stat->xgxs_txgxs_err_cnt))
4634 goto reset;
4635 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4636 &bar0->xgxs_txgxs_err_reg,
4637 &sw_stat->xgxs_txgxs_err_cnt);
4638 }
4639
4640 val64 = readq(&bar0->rxdma_int_status);
4641 if (val64 & RXDMA_INT_RC_INT_M) {
4642 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4643 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4644 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4645 goto reset;
4646 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4647 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4648 &sw_stat->rc_err_cnt);
4649 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4650 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4651 &sw_stat->prc_pcix_err_cnt))
4652 goto reset;
4653 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4654 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4655 &sw_stat->prc_pcix_err_cnt);
4656 }
4657
4658 if (val64 & RXDMA_INT_RPA_INT_M) {
4659 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4660 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4661 goto reset;
4662 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4663 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4664 }
4665
4666 if (val64 & RXDMA_INT_RDA_INT_M) {
4667 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4668 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4669 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4670 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4671 goto reset;
4672 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4673 | RDA_MISC_ERR | RDA_PCIX_ERR,
4674 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4675 }
4676
4677 if (val64 & RXDMA_INT_RTI_INT_M) {
4678 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4679 &sw_stat->rti_err_cnt))
4680 goto reset;
4681 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4682 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4683 }
4684
4685 val64 = readq(&bar0->mac_int_status);
4686 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4687 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4688 &bar0->mac_rmac_err_reg,
4689 &sw_stat->mac_rmac_err_cnt))
4690 goto reset;
4691 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4692 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4693 &sw_stat->mac_rmac_err_cnt);
4694 }
4695
4696 val64 = readq(&bar0->xgxs_int_status);
4697 if (val64 & XGXS_INT_STATUS_RXGXS) {
4698 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4699 &bar0->xgxs_rxgxs_err_reg,
4700 &sw_stat->xgxs_rxgxs_err_cnt))
4701 goto reset;
4702 }
4703
4704 val64 = readq(&bar0->mc_int_status);
4705 if(val64 & MC_INT_STATUS_MC_INT) {
4706 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4707 &sw_stat->mc_err_cnt))
4708 goto reset;
4709
4710 /* Handling Ecc errors */
4711 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4712 writeq(val64, &bar0->mc_err_reg);
4713 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4714 sw_stat->double_ecc_errs++;
4715 if (sp->device_type != XFRAME_II_DEVICE) {
4716 /*
4717 * Reset XframeI only if critical error
4718 */
4719 if (val64 &
4720 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4721 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4722 goto reset;
4723 }
4724 } else
4725 sw_stat->single_ecc_errs++;
4726 }
4727 }
4728 return;
4729
4730reset:
3a3d5756 4731 s2io_stop_all_tx_queue(sp);
8116f3cf
SS
4732 schedule_work(&sp->rst_timer_task);
4733 sw_stat->soft_reset_cnt++;
4734 return;
4735}
4736
1da177e4
LT
4737/**
4738 * s2io_isr - ISR handler of the device .
4739 * @irq: the irq of the device.
4740 * @dev_id: a void pointer to the dev structure of the NIC.
20346722
K
4741 * Description: This function is the ISR handler of the device. It
4742 * identifies the reason for the interrupt and calls the relevant
4743 * service routines. As a contongency measure, this ISR allocates the
1da177e4
LT
4744 * recv buffers, if their numbers are below the panic value which is
4745 * presently set to 25% of the original number of rcv buffers allocated.
4746 * Return value:
20346722 4747 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
1da177e4
LT
4748 * IRQ_NONE: will be returned if interrupt is not from our device
4749 */
7d12e780 4750static irqreturn_t s2io_isr(int irq, void *dev_id)
1da177e4
LT
4751{
4752 struct net_device *dev = (struct net_device *) dev_id;
1ee6dd77
RB
4753 struct s2io_nic *sp = dev->priv;
4754 struct XENA_dev_config __iomem *bar0 = sp->bar0;
20346722 4755 int i;
19a60522 4756 u64 reason = 0;
1ee6dd77 4757 struct mac_info *mac_control;
1da177e4
LT
4758 struct config_param *config;
4759
d796fdb7
LV
4760 /* Pretend we handled any irq's from a disconnected card */
4761 if (pci_channel_offline(sp->pdev))
4762 return IRQ_NONE;
4763
596c5c97 4764 if (!is_s2io_card_up(sp))
92b84437 4765 return IRQ_NONE;
92b84437 4766
1da177e4
LT
4767 mac_control = &sp->mac_control;
4768 config = &sp->config;
4769
20346722 4770 /*
1da177e4
LT
4771 * Identify the cause for interrupt and call the appropriate
4772 * interrupt handler. Causes for the interrupt could be;
4773 * 1. Rx of packet.
4774 * 2. Tx complete.
4775 * 3. Link down.
1da177e4
LT
4776 */
4777 reason = readq(&bar0->general_int_status);
4778
596c5c97
SS
4779 if (unlikely(reason == S2IO_MINUS_ONE) ) {
4780 /* Nothing much can be done. Get out */
4781 return IRQ_HANDLED;
1da177e4 4782 }
5d3213cc 4783
596c5c97
SS
4784 if (reason & (GEN_INTR_RXTRAFFIC |
4785 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4786 {
4787 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4788
4789 if (config->napi) {
4790 if (reason & GEN_INTR_RXTRAFFIC) {
f61e0a35
SH
4791 netif_rx_schedule(dev, &sp->napi);
4792 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4793 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4794 readl(&bar0->rx_traffic_int);
db874e65 4795 }
596c5c97
SS
4796 } else {
4797 /*
4798 * rx_traffic_int reg is an R1 register, writing all 1's
4799 * will ensure that the actual interrupt causing bit
4800 * get's cleared and hence a read can be avoided.
4801 */
4802 if (reason & GEN_INTR_RXTRAFFIC)
19a60522 4803 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
596c5c97
SS
4804
4805 for (i = 0; i < config->rx_ring_num; i++)
f61e0a35 4806 rx_intr_handler(&mac_control->rings[i], 0);
db874e65 4807 }
596c5c97 4808
db874e65 4809 /*
596c5c97 4810 * tx_traffic_int reg is an R1 register, writing all 1's
db874e65
SS
4811 * will ensure that the actual interrupt causing bit get's
4812 * cleared and hence a read can be avoided.
4813 */
596c5c97
SS
4814 if (reason & GEN_INTR_TXTRAFFIC)
4815 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
19a60522 4816
596c5c97
SS
4817 for (i = 0; i < config->tx_fifo_num; i++)
4818 tx_intr_handler(&mac_control->fifos[i]);
1da177e4 4819
596c5c97
SS
4820 if (reason & GEN_INTR_TXPIC)
4821 s2io_txpic_intr_handle(sp);
fe113638 4822
596c5c97
SS
4823 /*
4824 * Reallocate the buffers from the interrupt handler itself.
4825 */
4826 if (!config->napi) {
4827 for (i = 0; i < config->rx_ring_num; i++)
0425b46a 4828 s2io_chk_rx_buffers(&mac_control->rings[i]);
596c5c97
SS
4829 }
4830 writeq(sp->general_int_mask, &bar0->general_int_mask);
4831 readl(&bar0->general_int_status);
20346722 4832
596c5c97 4833 return IRQ_HANDLED;
db874e65 4834
596c5c97
SS
4835 }
4836 else if (!reason) {
4837 /* The interrupt was not raised by us */
4838 return IRQ_NONE;
4839 }
db874e65 4840
1da177e4
LT
4841 return IRQ_HANDLED;
4842}
4843
7ba013ac
K
4844/**
4845 * s2io_updt_stats -
4846 */
1ee6dd77 4847static void s2io_updt_stats(struct s2io_nic *sp)
7ba013ac 4848{
1ee6dd77 4849 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7ba013ac
K
4850 u64 val64;
4851 int cnt = 0;
4852
92b84437 4853 if (is_s2io_card_up(sp)) {
7ba013ac
K
4854 /* Apprx 30us on a 133 MHz bus */
4855 val64 = SET_UPDT_CLICKS(10) |
4856 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4857 writeq(val64, &bar0->stat_cfg);
4858 do {
4859 udelay(100);
4860 val64 = readq(&bar0->stat_cfg);
b7b5a128 4861 if (!(val64 & s2BIT(0)))
7ba013ac
K
4862 break;
4863 cnt++;
4864 if (cnt == 5)
4865 break; /* Updt failed */
4866 } while(1);
8a4bdbaa 4867 }
7ba013ac
K
4868}
4869
1da177e4 4870/**
20346722 4871 * s2io_get_stats - Updates the device statistics structure.
1da177e4
LT
4872 * @dev : pointer to the device structure.
4873 * Description:
20346722 4874 * This function updates the device statistics structure in the s2io_nic
1da177e4
LT
4875 * structure and returns a pointer to the same.
4876 * Return value:
4877 * pointer to the updated net_device_stats structure.
4878 */
4879
ac1f60db 4880static struct net_device_stats *s2io_get_stats(struct net_device *dev)
1da177e4 4881{
1ee6dd77
RB
4882 struct s2io_nic *sp = dev->priv;
4883 struct mac_info *mac_control;
1da177e4 4884 struct config_param *config;
0425b46a 4885 int i;
1da177e4 4886
20346722 4887
1da177e4
LT
4888 mac_control = &sp->mac_control;
4889 config = &sp->config;
4890
7ba013ac
K
4891 /* Configure Stats for immediate updt */
4892 s2io_updt_stats(sp);
4893
4894 sp->stats.tx_packets =
4895 le32_to_cpu(mac_control->stats_info->tmac_frms);
20346722
K
4896 sp->stats.tx_errors =
4897 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4898 sp->stats.rx_errors =
ee705dba 4899 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
20346722
K
4900 sp->stats.multicast =
4901 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
1da177e4 4902 sp->stats.rx_length_errors =
ee705dba 4903 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
1da177e4 4904
0425b46a
SH
4905 /* collect per-ring rx_packets and rx_bytes */
4906 sp->stats.rx_packets = sp->stats.rx_bytes = 0;
4907 for (i = 0; i < config->rx_ring_num; i++) {
4908 sp->stats.rx_packets += mac_control->rings[i].rx_packets;
4909 sp->stats.rx_bytes += mac_control->rings[i].rx_bytes;
4910 }
4911
1da177e4
LT
4912 return (&sp->stats);
4913}
4914
4915/**
4916 * s2io_set_multicast - entry point for multicast address enable/disable.
4917 * @dev : pointer to the device structure
4918 * Description:
20346722
K
4919 * This function is a driver entry point which gets called by the kernel
4920 * whenever multicast addresses must be enabled/disabled. This also gets
1da177e4
LT
4921 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4922 * determine, if multicast address must be enabled or if promiscuous mode
4923 * is to be disabled etc.
4924 * Return value:
4925 * void.
4926 */
4927
4928static void s2io_set_multicast(struct net_device *dev)
4929{
4930 int i, j, prev_cnt;
4931 struct dev_mc_list *mclist;
1ee6dd77
RB
4932 struct s2io_nic *sp = dev->priv;
4933 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4934 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4935 0xfeffffffffffULL;
faa4f796 4936 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
1da177e4 4937 void __iomem *add;
faa4f796 4938 struct config_param *config = &sp->config;
1da177e4
LT
4939
4940 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4941 /* Enable all Multicast addresses */
4942 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4943 &bar0->rmac_addr_data0_mem);
4944 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4945 &bar0->rmac_addr_data1_mem);
4946 val64 = RMAC_ADDR_CMD_MEM_WE |
4947 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
faa4f796 4948 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
1da177e4
LT
4949 writeq(val64, &bar0->rmac_addr_cmd_mem);
4950 /* Wait till command completes */
c92ca04b 4951 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
4952 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4953 S2IO_BIT_RESET);
1da177e4
LT
4954
4955 sp->m_cast_flg = 1;
faa4f796 4956 sp->all_multi_pos = config->max_mc_addr - 1;
1da177e4
LT
4957 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4958 /* Disable all Multicast addresses */
4959 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4960 &bar0->rmac_addr_data0_mem);
5e25b9dd
K
4961 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4962 &bar0->rmac_addr_data1_mem);
1da177e4
LT
4963 val64 = RMAC_ADDR_CMD_MEM_WE |
4964 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4965 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4966 writeq(val64, &bar0->rmac_addr_cmd_mem);
4967 /* Wait till command completes */
c92ca04b 4968 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
4969 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4970 S2IO_BIT_RESET);
1da177e4
LT
4971
4972 sp->m_cast_flg = 0;
4973 sp->all_multi_pos = 0;
4974 }
4975
4976 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4977 /* Put the NIC into promiscuous mode */
4978 add = &bar0->mac_cfg;
4979 val64 = readq(&bar0->mac_cfg);
4980 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4981
4982 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4983 writel((u32) val64, add);
4984 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4985 writel((u32) (val64 >> 32), (add + 4));
4986
926930b2
SS
4987 if (vlan_tag_strip != 1) {
4988 val64 = readq(&bar0->rx_pa_cfg);
4989 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4990 writeq(val64, &bar0->rx_pa_cfg);
4991 vlan_strip_flag = 0;
4992 }
4993
1da177e4
LT
4994 val64 = readq(&bar0->mac_cfg);
4995 sp->promisc_flg = 1;
776bd20f 4996 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
1da177e4
LT
4997 dev->name);
4998 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4999 /* Remove the NIC from promiscuous mode */
5000 add = &bar0->mac_cfg;
5001 val64 = readq(&bar0->mac_cfg);
5002 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5003
5004 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5005 writel((u32) val64, add);
5006 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5007 writel((u32) (val64 >> 32), (add + 4));
5008
926930b2
SS
5009 if (vlan_tag_strip != 0) {
5010 val64 = readq(&bar0->rx_pa_cfg);
5011 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5012 writeq(val64, &bar0->rx_pa_cfg);
5013 vlan_strip_flag = 1;
5014 }
5015
1da177e4
LT
5016 val64 = readq(&bar0->mac_cfg);
5017 sp->promisc_flg = 0;
776bd20f 5018 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
1da177e4
LT
5019 dev->name);
5020 }
5021
5022 /* Update individual M_CAST address list */
5023 if ((!sp->m_cast_flg) && dev->mc_count) {
5024 if (dev->mc_count >
faa4f796 5025 (config->max_mc_addr - config->max_mac_addr)) {
1da177e4
LT
5026 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
5027 dev->name);
5028 DBG_PRINT(ERR_DBG, "can be added, please enable ");
5029 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
5030 return;
5031 }
5032
5033 prev_cnt = sp->mc_addr_count;
5034 sp->mc_addr_count = dev->mc_count;
5035
5036 /* Clear out the previous list of Mc in the H/W. */
5037 for (i = 0; i < prev_cnt; i++) {
5038 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5039 &bar0->rmac_addr_data0_mem);
5040 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 5041 &bar0->rmac_addr_data1_mem);
1da177e4
LT
5042 val64 = RMAC_ADDR_CMD_MEM_WE |
5043 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5044 RMAC_ADDR_CMD_MEM_OFFSET
faa4f796 5045 (config->mc_start_offset + i);
1da177e4
LT
5046 writeq(val64, &bar0->rmac_addr_cmd_mem);
5047
5048 /* Wait for command completes */
c92ca04b 5049 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
5050 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5051 S2IO_BIT_RESET)) {
1da177e4
LT
5052 DBG_PRINT(ERR_DBG, "%s: Adding ",
5053 dev->name);
5054 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5055 return;
5056 }
5057 }
5058
5059 /* Create the new Rx filter list and update the same in H/W. */
5060 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
5061 i++, mclist = mclist->next) {
5062 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
5063 ETH_ALEN);
a7a80d5a 5064 mac_addr = 0;
1da177e4
LT
5065 for (j = 0; j < ETH_ALEN; j++) {
5066 mac_addr |= mclist->dmi_addr[j];
5067 mac_addr <<= 8;
5068 }
5069 mac_addr >>= 8;
5070 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5071 &bar0->rmac_addr_data0_mem);
5072 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 5073 &bar0->rmac_addr_data1_mem);
1da177e4
LT
5074 val64 = RMAC_ADDR_CMD_MEM_WE |
5075 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5076 RMAC_ADDR_CMD_MEM_OFFSET
faa4f796 5077 (i + config->mc_start_offset);
1da177e4
LT
5078 writeq(val64, &bar0->rmac_addr_cmd_mem);
5079
5080 /* Wait for command completes */
c92ca04b 5081 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
5082 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5083 S2IO_BIT_RESET)) {
1da177e4
LT
5084 DBG_PRINT(ERR_DBG, "%s: Adding ",
5085 dev->name);
5086 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5087 return;
5088 }
5089 }
5090 }
5091}
5092
faa4f796
SH
5093/* read from CAM unicast & multicast addresses and store it in
5094 * def_mac_addr structure
5095 */
5096void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5097{
5098 int offset;
5099 u64 mac_addr = 0x0;
5100 struct config_param *config = &sp->config;
5101
5102 /* store unicast & multicast mac addresses */
5103 for (offset = 0; offset < config->max_mc_addr; offset++) {
5104 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5105 /* if read fails disable the entry */
5106 if (mac_addr == FAILURE)
5107 mac_addr = S2IO_DISABLE_MAC_ENTRY;
5108 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5109 }
5110}
5111
5112/* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5113static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5114{
5115 int offset;
5116 struct config_param *config = &sp->config;
5117 /* restore unicast mac address */
5118 for (offset = 0; offset < config->max_mac_addr; offset++)
5119 do_s2io_prog_unicast(sp->dev,
5120 sp->def_mac_addr[offset].mac_addr);
5121
5122 /* restore multicast mac address */
5123 for (offset = config->mc_start_offset;
5124 offset < config->max_mc_addr; offset++)
5125 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5126}
5127
5128/* add a multicast MAC address to CAM */
5129static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5130{
5131 int i;
5132 u64 mac_addr = 0;
5133 struct config_param *config = &sp->config;
5134
5135 for (i = 0; i < ETH_ALEN; i++) {
5136 mac_addr <<= 8;
5137 mac_addr |= addr[i];
5138 }
5139 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5140 return SUCCESS;
5141
5142 /* check if the multicast mac already preset in CAM */
5143 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5144 u64 tmp64;
5145 tmp64 = do_s2io_read_unicast_mc(sp, i);
5146 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5147 break;
5148
5149 if (tmp64 == mac_addr)
5150 return SUCCESS;
5151 }
5152 if (i == config->max_mc_addr) {
5153 DBG_PRINT(ERR_DBG,
5154 "CAM full no space left for multicast MAC\n");
5155 return FAILURE;
5156 }
5157 /* Update the internal structure with this new mac address */
5158 do_s2io_copy_mac_addr(sp, i, mac_addr);
5159
5160 return (do_s2io_add_mac(sp, mac_addr, i));
5161}
5162
5163/* add MAC address to CAM */
5164static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
2fd37688
SS
5165{
5166 u64 val64;
5167 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5168
5169 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5170 &bar0->rmac_addr_data0_mem);
5171
5172 val64 =
5173 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5174 RMAC_ADDR_CMD_MEM_OFFSET(off);
5175 writeq(val64, &bar0->rmac_addr_cmd_mem);
5176
5177 /* Wait till command completes */
5178 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5179 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5180 S2IO_BIT_RESET)) {
faa4f796 5181 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
2fd37688
SS
5182 return FAILURE;
5183 }
5184 return SUCCESS;
5185}
faa4f796
SH
5186/* deletes a specified unicast/multicast mac entry from CAM */
5187static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5188{
5189 int offset;
5190 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5191 struct config_param *config = &sp->config;
5192
5193 for (offset = 1;
5194 offset < config->max_mc_addr; offset++) {
5195 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5196 if (tmp64 == addr) {
5197 /* disable the entry by writing 0xffffffffffffULL */
5198 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5199 return FAILURE;
5200 /* store the new mac list from CAM */
5201 do_s2io_store_unicast_mc(sp);
5202 return SUCCESS;
5203 }
5204 }
5205 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5206 (unsigned long long)addr);
5207 return FAILURE;
5208}
5209
5210/* read mac entries from CAM */
5211static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5212{
5213 u64 tmp64 = 0xffffffffffff0000ULL, val64;
5214 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5215
5216 /* read mac addr */
5217 val64 =
5218 RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5219 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5220 writeq(val64, &bar0->rmac_addr_cmd_mem);
5221
5222 /* Wait till command completes */
5223 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5224 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5225 S2IO_BIT_RESET)) {
5226 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5227 return FAILURE;
5228 }
5229 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5230 return (tmp64 >> 16);
5231}
2fd37688
SS
5232
5233/**
5234 * s2io_set_mac_addr driver entry point
5235 */
faa4f796 5236
2fd37688
SS
5237static int s2io_set_mac_addr(struct net_device *dev, void *p)
5238{
5239 struct sockaddr *addr = p;
5240
5241 if (!is_valid_ether_addr(addr->sa_data))
5242 return -EINVAL;
5243
5244 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5245
5246 /* store the MAC address in CAM */
5247 return (do_s2io_prog_unicast(dev, dev->dev_addr));
5248}
1da177e4 5249/**
2fd37688 5250 * do_s2io_prog_unicast - Programs the Xframe mac address
1da177e4
LT
5251 * @dev : pointer to the device structure.
5252 * @addr: a uchar pointer to the new mac address which is to be set.
20346722 5253 * Description : This procedure will program the Xframe to receive
1da177e4 5254 * frames with new Mac Address
20346722 5255 * Return value: SUCCESS on success and an appropriate (-)ve integer
1da177e4
LT
5256 * as defined in errno.h file on failure.
5257 */
faa4f796 5258
2fd37688 5259static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
1da177e4 5260{
1ee6dd77 5261 struct s2io_nic *sp = dev->priv;
2fd37688 5262 register u64 mac_addr = 0, perm_addr = 0;
1da177e4 5263 int i;
faa4f796
SH
5264 u64 tmp64;
5265 struct config_param *config = &sp->config;
1da177e4 5266
20346722 5267 /*
2fd37688
SS
5268 * Set the new MAC address as the new unicast filter and reflect this
5269 * change on the device address registered with the OS. It will be
5270 * at offset 0.
5271 */
1da177e4
LT
5272 for (i = 0; i < ETH_ALEN; i++) {
5273 mac_addr <<= 8;
5274 mac_addr |= addr[i];
2fd37688
SS
5275 perm_addr <<= 8;
5276 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
d8d70caf
SS
5277 }
5278
2fd37688
SS
5279 /* check if the dev_addr is different than perm_addr */
5280 if (mac_addr == perm_addr)
d8d70caf
SS
5281 return SUCCESS;
5282
faa4f796
SH
5283 /* check if the mac already preset in CAM */
5284 for (i = 1; i < config->max_mac_addr; i++) {
5285 tmp64 = do_s2io_read_unicast_mc(sp, i);
5286 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5287 break;
5288
5289 if (tmp64 == mac_addr) {
5290 DBG_PRINT(INFO_DBG,
5291 "MAC addr:0x%llx already present in CAM\n",
5292 (unsigned long long)mac_addr);
5293 return SUCCESS;
5294 }
5295 }
5296 if (i == config->max_mac_addr) {
5297 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5298 return FAILURE;
5299 }
d8d70caf 5300 /* Update the internal structure with this new mac address */
faa4f796
SH
5301 do_s2io_copy_mac_addr(sp, i, mac_addr);
5302 return (do_s2io_add_mac(sp, mac_addr, i));
1da177e4
LT
5303}
5304
5305/**
20346722 5306 * s2io_ethtool_sset - Sets different link parameters.
1da177e4
LT
5307 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5308 * @info: pointer to the structure with parameters given by ethtool to set
5309 * link information.
5310 * Description:
20346722 5311 * The function sets different link parameters provided by the user onto
1da177e4
LT
5312 * the NIC.
5313 * Return value:
5314 * 0 on success.
5315*/
5316
5317static int s2io_ethtool_sset(struct net_device *dev,
5318 struct ethtool_cmd *info)
5319{
1ee6dd77 5320 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5321 if ((info->autoneg == AUTONEG_ENABLE) ||
5322 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
5323 return -EINVAL;
5324 else {
5325 s2io_close(sp->dev);
5326 s2io_open(sp->dev);
5327 }
5328
5329 return 0;
5330}
5331
5332/**
20346722 5333 * s2io_ethtol_gset - Return link specific information.
1da177e4
LT
5334 * @sp : private member of the device structure, pointer to the
5335 * s2io_nic structure.
5336 * @info : pointer to the structure with parameters given by ethtool
5337 * to return link information.
5338 * Description:
5339 * Returns link specific information like speed, duplex etc.. to ethtool.
5340 * Return value :
5341 * return 0 on success.
5342 */
5343
5344static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5345{
1ee6dd77 5346 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5347 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5348 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5349 info->port = PORT_FIBRE;
1a7eb72b
SS
5350
5351 /* info->transceiver */
5352 info->transceiver = XCVR_EXTERNAL;
1da177e4
LT
5353
5354 if (netif_carrier_ok(sp->dev)) {
5355 info->speed = 10000;
5356 info->duplex = DUPLEX_FULL;
5357 } else {
5358 info->speed = -1;
5359 info->duplex = -1;
5360 }
5361
5362 info->autoneg = AUTONEG_DISABLE;
5363 return 0;
5364}
5365
5366/**
20346722
K
5367 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5368 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5369 * s2io_nic structure.
5370 * @info : pointer to the structure with parameters given by ethtool to
5371 * return driver information.
5372 * Description:
5373 * Returns driver specefic information like name, version etc.. to ethtool.
5374 * Return value:
5375 * void
5376 */
5377
5378static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5379 struct ethtool_drvinfo *info)
5380{
1ee6dd77 5381 struct s2io_nic *sp = dev->priv;
1da177e4 5382
dbc2309d
JL
5383 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5384 strncpy(info->version, s2io_driver_version, sizeof(info->version));
5385 strncpy(info->fw_version, "", sizeof(info->fw_version));
5386 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
1da177e4
LT
5387 info->regdump_len = XENA_REG_SPACE;
5388 info->eedump_len = XENA_EEPROM_SPACE;
1da177e4
LT
5389}
5390
5391/**
5392 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
20346722 5393 * @sp: private member of the device structure, which is a pointer to the
1da177e4 5394 * s2io_nic structure.
20346722 5395 * @regs : pointer to the structure with parameters given by ethtool for
1da177e4
LT
5396 * dumping the registers.
5397 * @reg_space: The input argumnet into which all the registers are dumped.
5398 * Description:
5399 * Dumps the entire register space of xFrame NIC into the user given
5400 * buffer area.
5401 * Return value :
5402 * void .
5403*/
5404
5405static void s2io_ethtool_gregs(struct net_device *dev,
5406 struct ethtool_regs *regs, void *space)
5407{
5408 int i;
5409 u64 reg;
5410 u8 *reg_space = (u8 *) space;
1ee6dd77 5411 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5412
5413 regs->len = XENA_REG_SPACE;
5414 regs->version = sp->pdev->subsystem_device;
5415
5416 for (i = 0; i < regs->len; i += 8) {
5417 reg = readq(sp->bar0 + i);
5418 memcpy((reg_space + i), &reg, 8);
5419 }
5420}
5421
5422/**
5423 * s2io_phy_id - timer function that alternates adapter LED.
20346722 5424 * @data : address of the private member of the device structure, which
1da177e4 5425 * is a pointer to the s2io_nic structure, provided as an u32.
20346722
K
5426 * Description: This is actually the timer function that alternates the
5427 * adapter LED bit of the adapter control bit to set/reset every time on
5428 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
1da177e4
LT
5429 * once every second.
5430*/
5431static void s2io_phy_id(unsigned long data)
5432{
1ee6dd77
RB
5433 struct s2io_nic *sp = (struct s2io_nic *) data;
5434 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5435 u64 val64 = 0;
5436 u16 subid;
5437
5438 subid = sp->pdev->subsystem_device;
541ae68f
K
5439 if ((sp->device_type == XFRAME_II_DEVICE) ||
5440 ((subid & 0xFF) >= 0x07)) {
1da177e4
LT
5441 val64 = readq(&bar0->gpio_control);
5442 val64 ^= GPIO_CTRL_GPIO_0;
5443 writeq(val64, &bar0->gpio_control);
5444 } else {
5445 val64 = readq(&bar0->adapter_control);
5446 val64 ^= ADAPTER_LED_ON;
5447 writeq(val64, &bar0->adapter_control);
5448 }
5449
5450 mod_timer(&sp->id_timer, jiffies + HZ / 2);
5451}
5452
5453/**
5454 * s2io_ethtool_idnic - To physically identify the nic on the system.
5455 * @sp : private member of the device structure, which is a pointer to the
5456 * s2io_nic structure.
20346722 5457 * @id : pointer to the structure with identification parameters given by
1da177e4
LT
5458 * ethtool.
5459 * Description: Used to physically identify the NIC on the system.
20346722 5460 * The Link LED will blink for a time specified by the user for
1da177e4 5461 * identification.
20346722 5462 * NOTE: The Link has to be Up to be able to blink the LED. Hence
1da177e4
LT
5463 * identification is possible only if it's link is up.
5464 * Return value:
5465 * int , returns 0 on success
5466 */
5467
5468static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5469{
5470 u64 val64 = 0, last_gpio_ctrl_val;
1ee6dd77
RB
5471 struct s2io_nic *sp = dev->priv;
5472 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5473 u16 subid;
5474
5475 subid = sp->pdev->subsystem_device;
5476 last_gpio_ctrl_val = readq(&bar0->gpio_control);
541ae68f
K
5477 if ((sp->device_type == XFRAME_I_DEVICE) &&
5478 ((subid & 0xFF) < 0x07)) {
1da177e4
LT
5479 val64 = readq(&bar0->adapter_control);
5480 if (!(val64 & ADAPTER_CNTL_EN)) {
5481 printk(KERN_ERR
5482 "Adapter Link down, cannot blink LED\n");
5483 return -EFAULT;
5484 }
5485 }
5486 if (sp->id_timer.function == NULL) {
5487 init_timer(&sp->id_timer);
5488 sp->id_timer.function = s2io_phy_id;
5489 sp->id_timer.data = (unsigned long) sp;
5490 }
5491 mod_timer(&sp->id_timer, jiffies);
5492 if (data)
20346722 5493 msleep_interruptible(data * HZ);
1da177e4 5494 else
20346722 5495 msleep_interruptible(MAX_FLICKER_TIME);
1da177e4
LT
5496 del_timer_sync(&sp->id_timer);
5497
541ae68f 5498 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
1da177e4
LT
5499 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5500 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5501 }
5502
5503 return 0;
5504}
5505
0cec35eb
SH
5506static void s2io_ethtool_gringparam(struct net_device *dev,
5507 struct ethtool_ringparam *ering)
5508{
5509 struct s2io_nic *sp = dev->priv;
5510 int i,tx_desc_count=0,rx_desc_count=0;
5511
5512 if (sp->rxd_mode == RXD_MODE_1)
5513 ering->rx_max_pending = MAX_RX_DESC_1;
5514 else if (sp->rxd_mode == RXD_MODE_3B)
5515 ering->rx_max_pending = MAX_RX_DESC_2;
0cec35eb
SH
5516
5517 ering->tx_max_pending = MAX_TX_DESC;
8a4bdbaa 5518 for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
0cec35eb 5519 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
8a4bdbaa 5520
0cec35eb
SH
5521 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5522 ering->tx_pending = tx_desc_count;
5523 rx_desc_count = 0;
8a4bdbaa 5524 for (i = 0 ; i < sp->config.rx_ring_num ; i++)
0cec35eb 5525 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
b6627672 5526
0cec35eb
SH
5527 ering->rx_pending = rx_desc_count;
5528
5529 ering->rx_mini_max_pending = 0;
5530 ering->rx_mini_pending = 0;
5531 if(sp->rxd_mode == RXD_MODE_1)
5532 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5533 else if (sp->rxd_mode == RXD_MODE_3B)
5534 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5535 ering->rx_jumbo_pending = rx_desc_count;
5536}
5537
1da177e4
LT
5538/**
5539 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
20346722
K
5540 * @sp : private member of the device structure, which is a pointer to the
5541 * s2io_nic structure.
1da177e4
LT
5542 * @ep : pointer to the structure with pause parameters given by ethtool.
5543 * Description:
5544 * Returns the Pause frame generation and reception capability of the NIC.
5545 * Return value:
5546 * void
5547 */
5548static void s2io_ethtool_getpause_data(struct net_device *dev,
5549 struct ethtool_pauseparam *ep)
5550{
5551 u64 val64;
1ee6dd77
RB
5552 struct s2io_nic *sp = dev->priv;
5553 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5554
5555 val64 = readq(&bar0->rmac_pause_cfg);
5556 if (val64 & RMAC_PAUSE_GEN_ENABLE)
5557 ep->tx_pause = TRUE;
5558 if (val64 & RMAC_PAUSE_RX_ENABLE)
5559 ep->rx_pause = TRUE;
5560 ep->autoneg = FALSE;
5561}
5562
5563/**
5564 * s2io_ethtool_setpause_data - set/reset pause frame generation.
20346722 5565 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5566 * s2io_nic structure.
5567 * @ep : pointer to the structure with pause parameters given by ethtool.
5568 * Description:
5569 * It can be used to set or reset Pause frame generation or reception
5570 * support of the NIC.
5571 * Return value:
5572 * int, returns 0 on Success
5573 */
5574
5575static int s2io_ethtool_setpause_data(struct net_device *dev,
20346722 5576 struct ethtool_pauseparam *ep)
1da177e4
LT
5577{
5578 u64 val64;
1ee6dd77
RB
5579 struct s2io_nic *sp = dev->priv;
5580 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5581
5582 val64 = readq(&bar0->rmac_pause_cfg);
5583 if (ep->tx_pause)
5584 val64 |= RMAC_PAUSE_GEN_ENABLE;
5585 else
5586 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5587 if (ep->rx_pause)
5588 val64 |= RMAC_PAUSE_RX_ENABLE;
5589 else
5590 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5591 writeq(val64, &bar0->rmac_pause_cfg);
5592 return 0;
5593}
5594
5595/**
5596 * read_eeprom - reads 4 bytes of data from user given offset.
20346722 5597 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5598 * s2io_nic structure.
5599 * @off : offset at which the data must be written
5600 * @data : Its an output parameter where the data read at the given
20346722 5601 * offset is stored.
1da177e4 5602 * Description:
20346722 5603 * Will read 4 bytes of data from the user given offset and return the
1da177e4
LT
5604 * read data.
5605 * NOTE: Will allow to read only part of the EEPROM visible through the
5606 * I2C bus.
5607 * Return value:
5608 * -1 on failure and 0 on success.
5609 */
5610
5611#define S2IO_DEV_ID 5
1ee6dd77 5612static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
1da177e4
LT
5613{
5614 int ret = -1;
5615 u32 exit_cnt = 0;
5616 u64 val64;
1ee6dd77 5617 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 5618
ad4ebed0 5619 if (sp->device_type == XFRAME_I_DEVICE) {
5620 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5621 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5622 I2C_CONTROL_CNTL_START;
5623 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
1da177e4 5624
ad4ebed0 5625 while (exit_cnt < 5) {
5626 val64 = readq(&bar0->i2c_control);
5627 if (I2C_CONTROL_CNTL_END(val64)) {
5628 *data = I2C_CONTROL_GET_DATA(val64);
5629 ret = 0;
5630 break;
5631 }
5632 msleep(50);
5633 exit_cnt++;
1da177e4 5634 }
1da177e4
LT
5635 }
5636
ad4ebed0 5637 if (sp->device_type == XFRAME_II_DEVICE) {
5638 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 5639 SPI_CONTROL_BYTECNT(0x3) |
ad4ebed0 5640 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5641 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5642 val64 |= SPI_CONTROL_REQ;
5643 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5644 while (exit_cnt < 5) {
5645 val64 = readq(&bar0->spi_control);
5646 if (val64 & SPI_CONTROL_NACK) {
5647 ret = 1;
5648 break;
5649 } else if (val64 & SPI_CONTROL_DONE) {
5650 *data = readq(&bar0->spi_data);
5651 *data &= 0xffffff;
5652 ret = 0;
5653 break;
5654 }
5655 msleep(50);
5656 exit_cnt++;
5657 }
5658 }
1da177e4
LT
5659 return ret;
5660}
5661
5662/**
5663 * write_eeprom - actually writes the relevant part of the data value.
5664 * @sp : private member of the device structure, which is a pointer to the
5665 * s2io_nic structure.
5666 * @off : offset at which the data must be written
5667 * @data : The data that is to be written
20346722 5668 * @cnt : Number of bytes of the data that are actually to be written into
1da177e4
LT
5669 * the Eeprom. (max of 3)
5670 * Description:
5671 * Actually writes the relevant part of the data value into the Eeprom
5672 * through the I2C bus.
5673 * Return value:
5674 * 0 on success, -1 on failure.
5675 */
5676
1ee6dd77 5677static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
1da177e4
LT
5678{
5679 int exit_cnt = 0, ret = -1;
5680 u64 val64;
1ee6dd77 5681 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 5682
ad4ebed0 5683 if (sp->device_type == XFRAME_I_DEVICE) {
5684 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5685 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5686 I2C_CONTROL_CNTL_START;
5687 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5688
5689 while (exit_cnt < 5) {
5690 val64 = readq(&bar0->i2c_control);
5691 if (I2C_CONTROL_CNTL_END(val64)) {
5692 if (!(val64 & I2C_CONTROL_NACK))
5693 ret = 0;
5694 break;
5695 }
5696 msleep(50);
5697 exit_cnt++;
5698 }
5699 }
1da177e4 5700
ad4ebed0 5701 if (sp->device_type == XFRAME_II_DEVICE) {
5702 int write_cnt = (cnt == 8) ? 0 : cnt;
5703 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5704
5705 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 5706 SPI_CONTROL_BYTECNT(write_cnt) |
ad4ebed0 5707 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5708 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5709 val64 |= SPI_CONTROL_REQ;
5710 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5711 while (exit_cnt < 5) {
5712 val64 = readq(&bar0->spi_control);
5713 if (val64 & SPI_CONTROL_NACK) {
5714 ret = 1;
5715 break;
5716 } else if (val64 & SPI_CONTROL_DONE) {
1da177e4 5717 ret = 0;
ad4ebed0 5718 break;
5719 }
5720 msleep(50);
5721 exit_cnt++;
1da177e4 5722 }
1da177e4 5723 }
1da177e4
LT
5724 return ret;
5725}
1ee6dd77 5726static void s2io_vpd_read(struct s2io_nic *nic)
9dc737a7 5727{
b41477f3
AR
5728 u8 *vpd_data;
5729 u8 data;
9dc737a7
AR
5730 int i=0, cnt, fail = 0;
5731 int vpd_addr = 0x80;
5732
5733 if (nic->device_type == XFRAME_II_DEVICE) {
5734 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5735 vpd_addr = 0x80;
5736 }
5737 else {
5738 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5739 vpd_addr = 0x50;
5740 }
19a60522 5741 strcpy(nic->serial_num, "NOT AVAILABLE");
9dc737a7 5742
b41477f3 5743 vpd_data = kmalloc(256, GFP_KERNEL);
c53d4945
SH
5744 if (!vpd_data) {
5745 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
b41477f3 5746 return;
c53d4945 5747 }
491976b2 5748 nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
b41477f3 5749
9dc737a7
AR
5750 for (i = 0; i < 256; i +=4 ) {
5751 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5752 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5753 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5754 for (cnt = 0; cnt <5; cnt++) {
5755 msleep(2);
5756 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5757 if (data == 0x80)
5758 break;
5759 }
5760 if (cnt >= 5) {
5761 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5762 fail = 1;
5763 break;
5764 }
5765 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5766 (u32 *)&vpd_data[i]);
5767 }
19a60522
SS
5768
5769 if(!fail) {
5770 /* read serial number of adapter */
5771 for (cnt = 0; cnt < 256; cnt++) {
5772 if ((vpd_data[cnt] == 'S') &&
5773 (vpd_data[cnt+1] == 'N') &&
5774 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5775 memset(nic->serial_num, 0, VPD_STRING_LEN);
5776 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5777 vpd_data[cnt+2]);
5778 break;
5779 }
5780 }
5781 }
5782
5783 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
9dc737a7
AR
5784 memset(nic->product_name, 0, vpd_data[1]);
5785 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5786 }
b41477f3 5787 kfree(vpd_data);
491976b2 5788 nic->mac_control.stats_info->sw_stat.mem_freed += 256;
9dc737a7
AR
5789}
5790
1da177e4
LT
5791/**
5792 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5793 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
20346722 5794 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5795 * containing all relevant information.
5796 * @data_buf : user defined value to be written into Eeprom.
5797 * Description: Reads the values stored in the Eeprom at given offset
5798 * for a given length. Stores these values int the input argument data
5799 * buffer 'data_buf' and returns these to the caller (ethtool.)
5800 * Return value:
5801 * int 0 on success
5802 */
5803
5804static int s2io_ethtool_geeprom(struct net_device *dev,
20346722 5805 struct ethtool_eeprom *eeprom, u8 * data_buf)
1da177e4 5806{
ad4ebed0 5807 u32 i, valid;
5808 u64 data;
1ee6dd77 5809 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5810
5811 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5812
5813 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5814 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5815
5816 for (i = 0; i < eeprom->len; i += 4) {
5817 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5818 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5819 return -EFAULT;
5820 }
5821 valid = INV(data);
5822 memcpy((data_buf + i), &valid, 4);
5823 }
5824 return 0;
5825}
5826
5827/**
5828 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5829 * @sp : private member of the device structure, which is a pointer to the
5830 * s2io_nic structure.
20346722 5831 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5832 * containing all relevant information.
5833 * @data_buf ; user defined value to be written into Eeprom.
5834 * Description:
5835 * Tries to write the user provided value in the Eeprom, at the offset
5836 * given by the user.
5837 * Return value:
5838 * 0 on success, -EFAULT on failure.
5839 */
5840
5841static int s2io_ethtool_seeprom(struct net_device *dev,
5842 struct ethtool_eeprom *eeprom,
5843 u8 * data_buf)
5844{
5845 int len = eeprom->len, cnt = 0;
ad4ebed0 5846 u64 valid = 0, data;
1ee6dd77 5847 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5848
5849 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5850 DBG_PRINT(ERR_DBG,
5851 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5852 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5853 eeprom->magic);
5854 return -EFAULT;
5855 }
5856
5857 while (len) {
5858 data = (u32) data_buf[cnt] & 0x000000FF;
5859 if (data) {
5860 valid = (u32) (data << 24);
5861 } else
5862 valid = data;
5863
5864 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5865 DBG_PRINT(ERR_DBG,
5866 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5867 DBG_PRINT(ERR_DBG,
5868 "write into the specified offset\n");
5869 return -EFAULT;
5870 }
5871 cnt++;
5872 len--;
5873 }
5874
5875 return 0;
5876}
5877
5878/**
20346722
K
5879 * s2io_register_test - reads and writes into all clock domains.
5880 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5881 * s2io_nic structure.
5882 * @data : variable that returns the result of each of the test conducted b
5883 * by the driver.
5884 * Description:
5885 * Read and write into all clock domains. The NIC has 3 clock domains,
5886 * see that registers in all the three regions are accessible.
5887 * Return value:
5888 * 0 on success.
5889 */
5890
1ee6dd77 5891static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
1da177e4 5892{
1ee6dd77 5893 struct XENA_dev_config __iomem *bar0 = sp->bar0;
ad4ebed0 5894 u64 val64 = 0, exp_val;
1da177e4
LT
5895 int fail = 0;
5896
20346722
K
5897 val64 = readq(&bar0->pif_rd_swapper_fb);
5898 if (val64 != 0x123456789abcdefULL) {
1da177e4
LT
5899 fail = 1;
5900 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5901 }
5902
5903 val64 = readq(&bar0->rmac_pause_cfg);
5904 if (val64 != 0xc000ffff00000000ULL) {
5905 fail = 1;
5906 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5907 }
5908
5909 val64 = readq(&bar0->rx_queue_cfg);
ad4ebed0 5910 if (sp->device_type == XFRAME_II_DEVICE)
5911 exp_val = 0x0404040404040404ULL;
5912 else
5913 exp_val = 0x0808080808080808ULL;
5914 if (val64 != exp_val) {
1da177e4
LT
5915 fail = 1;
5916 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5917 }
5918
5919 val64 = readq(&bar0->xgxs_efifo_cfg);
5920 if (val64 != 0x000000001923141EULL) {
5921 fail = 1;
5922 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5923 }
5924
5925 val64 = 0x5A5A5A5A5A5A5A5AULL;
5926 writeq(val64, &bar0->xmsi_data);
5927 val64 = readq(&bar0->xmsi_data);
5928 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5929 fail = 1;
5930 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5931 }
5932
5933 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5934 writeq(val64, &bar0->xmsi_data);
5935 val64 = readq(&bar0->xmsi_data);
5936 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5937 fail = 1;
5938 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5939 }
5940
5941 *data = fail;
ad4ebed0 5942 return fail;
1da177e4
LT
5943}
5944
5945/**
20346722 5946 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
1da177e4
LT
5947 * @sp : private member of the device structure, which is a pointer to the
5948 * s2io_nic structure.
5949 * @data:variable that returns the result of each of the test conducted by
5950 * the driver.
5951 * Description:
20346722 5952 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
1da177e4
LT
5953 * register.
5954 * Return value:
5955 * 0 on success.
5956 */
5957
1ee6dd77 5958static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
1da177e4
LT
5959{
5960 int fail = 0;
ad4ebed0 5961 u64 ret_data, org_4F0, org_7F0;
5962 u8 saved_4F0 = 0, saved_7F0 = 0;
5963 struct net_device *dev = sp->dev;
1da177e4
LT
5964
5965 /* Test Write Error at offset 0 */
ad4ebed0 5966 /* Note that SPI interface allows write access to all areas
5967 * of EEPROM. Hence doing all negative testing only for Xframe I.
5968 */
5969 if (sp->device_type == XFRAME_I_DEVICE)
5970 if (!write_eeprom(sp, 0, 0, 3))
5971 fail = 1;
5972
5973 /* Save current values at offsets 0x4F0 and 0x7F0 */
5974 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5975 saved_4F0 = 1;
5976 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5977 saved_7F0 = 1;
1da177e4
LT
5978
5979 /* Test Write at offset 4f0 */
ad4ebed0 5980 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
1da177e4
LT
5981 fail = 1;
5982 if (read_eeprom(sp, 0x4F0, &ret_data))
5983 fail = 1;
5984
ad4ebed0 5985 if (ret_data != 0x012345) {
26b7625c
AM
5986 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5987 "Data written %llx Data read %llx\n",
5988 dev->name, (unsigned long long)0x12345,
5989 (unsigned long long)ret_data);
1da177e4 5990 fail = 1;
ad4ebed0 5991 }
1da177e4
LT
5992
5993 /* Reset the EEPROM data go FFFF */
ad4ebed0 5994 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
1da177e4
LT
5995
5996 /* Test Write Request Error at offset 0x7c */
ad4ebed0 5997 if (sp->device_type == XFRAME_I_DEVICE)
5998 if (!write_eeprom(sp, 0x07C, 0, 3))
5999 fail = 1;
1da177e4 6000
ad4ebed0 6001 /* Test Write Request at offset 0x7f0 */
6002 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
1da177e4 6003 fail = 1;
ad4ebed0 6004 if (read_eeprom(sp, 0x7F0, &ret_data))
1da177e4
LT
6005 fail = 1;
6006
ad4ebed0 6007 if (ret_data != 0x012345) {
26b7625c
AM
6008 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6009 "Data written %llx Data read %llx\n",
6010 dev->name, (unsigned long long)0x12345,
6011 (unsigned long long)ret_data);
1da177e4 6012 fail = 1;
ad4ebed0 6013 }
1da177e4
LT
6014
6015 /* Reset the EEPROM data go FFFF */
ad4ebed0 6016 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
1da177e4 6017
ad4ebed0 6018 if (sp->device_type == XFRAME_I_DEVICE) {
6019 /* Test Write Error at offset 0x80 */
6020 if (!write_eeprom(sp, 0x080, 0, 3))
6021 fail = 1;
1da177e4 6022
ad4ebed0 6023 /* Test Write Error at offset 0xfc */
6024 if (!write_eeprom(sp, 0x0FC, 0, 3))
6025 fail = 1;
1da177e4 6026
ad4ebed0 6027 /* Test Write Error at offset 0x100 */
6028 if (!write_eeprom(sp, 0x100, 0, 3))
6029 fail = 1;
1da177e4 6030
ad4ebed0 6031 /* Test Write Error at offset 4ec */
6032 if (!write_eeprom(sp, 0x4EC, 0, 3))
6033 fail = 1;
6034 }
6035
6036 /* Restore values at offsets 0x4F0 and 0x7F0 */
6037 if (saved_4F0)
6038 write_eeprom(sp, 0x4F0, org_4F0, 3);
6039 if (saved_7F0)
6040 write_eeprom(sp, 0x7F0, org_7F0, 3);
1da177e4
LT
6041
6042 *data = fail;
ad4ebed0 6043 return fail;
1da177e4
LT
6044}
6045
6046/**
6047 * s2io_bist_test - invokes the MemBist test of the card .
20346722 6048 * @sp : private member of the device structure, which is a pointer to the
1da177e4 6049 * s2io_nic structure.
20346722 6050 * @data:variable that returns the result of each of the test conducted by
1da177e4
LT
6051 * the driver.
6052 * Description:
6053 * This invokes the MemBist test of the card. We give around
6054 * 2 secs time for the Test to complete. If it's still not complete
20346722 6055 * within this peiod, we consider that the test failed.
1da177e4
LT
6056 * Return value:
6057 * 0 on success and -1 on failure.
6058 */
6059
1ee6dd77 6060static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
1da177e4
LT
6061{
6062 u8 bist = 0;
6063 int cnt = 0, ret = -1;
6064
6065 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6066 bist |= PCI_BIST_START;
6067 pci_write_config_word(sp->pdev, PCI_BIST, bist);
6068
6069 while (cnt < 20) {
6070 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6071 if (!(bist & PCI_BIST_START)) {
6072 *data = (bist & PCI_BIST_CODE_MASK);
6073 ret = 0;
6074 break;
6075 }
6076 msleep(100);
6077 cnt++;
6078 }
6079
6080 return ret;
6081}
6082
6083/**
20346722
K
6084 * s2io-link_test - verifies the link state of the nic
6085 * @sp ; private member of the device structure, which is a pointer to the
1da177e4
LT
6086 * s2io_nic structure.
6087 * @data: variable that returns the result of each of the test conducted by
6088 * the driver.
6089 * Description:
20346722 6090 * The function verifies the link state of the NIC and updates the input
1da177e4
LT
6091 * argument 'data' appropriately.
6092 * Return value:
6093 * 0 on success.
6094 */
6095
1ee6dd77 6096static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
1da177e4 6097{
1ee6dd77 6098 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
6099 u64 val64;
6100
6101 val64 = readq(&bar0->adapter_status);
c92ca04b 6102 if(!(LINK_IS_UP(val64)))
1da177e4 6103 *data = 1;
c92ca04b
AR
6104 else
6105 *data = 0;
1da177e4 6106
b41477f3 6107 return *data;
1da177e4
LT
6108}
6109
6110/**
20346722
K
6111 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6112 * @sp - private member of the device structure, which is a pointer to the
1da177e4 6113 * s2io_nic structure.
20346722 6114 * @data - variable that returns the result of each of the test
1da177e4
LT
6115 * conducted by the driver.
6116 * Description:
20346722 6117 * This is one of the offline test that tests the read and write
1da177e4
LT
6118 * access to the RldRam chip on the NIC.
6119 * Return value:
6120 * 0 on success.
6121 */
6122
1ee6dd77 6123static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
1da177e4 6124{
1ee6dd77 6125 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 6126 u64 val64;
ad4ebed0 6127 int cnt, iteration = 0, test_fail = 0;
1da177e4
LT
6128
6129 val64 = readq(&bar0->adapter_control);
6130 val64 &= ~ADAPTER_ECC_EN;
6131 writeq(val64, &bar0->adapter_control);
6132
6133 val64 = readq(&bar0->mc_rldram_test_ctrl);
6134 val64 |= MC_RLDRAM_TEST_MODE;
ad4ebed0 6135 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6136
6137 val64 = readq(&bar0->mc_rldram_mrs);
6138 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6139 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6140
6141 val64 |= MC_RLDRAM_MRS_ENABLE;
6142 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6143
6144 while (iteration < 2) {
6145 val64 = 0x55555555aaaa0000ULL;
6146 if (iteration == 1) {
6147 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6148 }
6149 writeq(val64, &bar0->mc_rldram_test_d0);
6150
6151 val64 = 0xaaaa5a5555550000ULL;
6152 if (iteration == 1) {
6153 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6154 }
6155 writeq(val64, &bar0->mc_rldram_test_d1);
6156
6157 val64 = 0x55aaaaaaaa5a0000ULL;
6158 if (iteration == 1) {
6159 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6160 }
6161 writeq(val64, &bar0->mc_rldram_test_d2);
6162
ad4ebed0 6163 val64 = (u64) (0x0000003ffffe0100ULL);
1da177e4
LT
6164 writeq(val64, &bar0->mc_rldram_test_add);
6165
ad4ebed0 6166 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
6167 MC_RLDRAM_TEST_GO;
6168 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6169
6170 for (cnt = 0; cnt < 5; cnt++) {
6171 val64 = readq(&bar0->mc_rldram_test_ctrl);
6172 if (val64 & MC_RLDRAM_TEST_DONE)
6173 break;
6174 msleep(200);
6175 }
6176
6177 if (cnt == 5)
6178 break;
6179
ad4ebed0 6180 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6181 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6182
6183 for (cnt = 0; cnt < 5; cnt++) {
6184 val64 = readq(&bar0->mc_rldram_test_ctrl);
6185 if (val64 & MC_RLDRAM_TEST_DONE)
6186 break;
6187 msleep(500);
6188 }
6189
6190 if (cnt == 5)
6191 break;
6192
6193 val64 = readq(&bar0->mc_rldram_test_ctrl);
ad4ebed0 6194 if (!(val64 & MC_RLDRAM_TEST_PASS))
6195 test_fail = 1;
1da177e4
LT
6196
6197 iteration++;
6198 }
6199
ad4ebed0 6200 *data = test_fail;
1da177e4 6201
ad4ebed0 6202 /* Bring the adapter out of test mode */
6203 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6204
6205 return test_fail;
1da177e4
LT
6206}
6207
6208/**
6209 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6210 * @sp : private member of the device structure, which is a pointer to the
6211 * s2io_nic structure.
6212 * @ethtest : pointer to a ethtool command specific structure that will be
6213 * returned to the user.
20346722 6214 * @data : variable that returns the result of each of the test
1da177e4
LT
6215 * conducted by the driver.
6216 * Description:
6217 * This function conducts 6 tests ( 4 offline and 2 online) to determine
6218 * the health of the card.
6219 * Return value:
6220 * void
6221 */
6222
6223static void s2io_ethtool_test(struct net_device *dev,
6224 struct ethtool_test *ethtest,
6225 uint64_t * data)
6226{
1ee6dd77 6227 struct s2io_nic *sp = dev->priv;
1da177e4
LT
6228 int orig_state = netif_running(sp->dev);
6229
6230 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6231 /* Offline Tests. */
20346722 6232 if (orig_state)
1da177e4 6233 s2io_close(sp->dev);
1da177e4
LT
6234
6235 if (s2io_register_test(sp, &data[0]))
6236 ethtest->flags |= ETH_TEST_FL_FAILED;
6237
6238 s2io_reset(sp);
1da177e4
LT
6239
6240 if (s2io_rldram_test(sp, &data[3]))
6241 ethtest->flags |= ETH_TEST_FL_FAILED;
6242
6243 s2io_reset(sp);
1da177e4
LT
6244
6245 if (s2io_eeprom_test(sp, &data[1]))
6246 ethtest->flags |= ETH_TEST_FL_FAILED;
6247
6248 if (s2io_bist_test(sp, &data[4]))
6249 ethtest->flags |= ETH_TEST_FL_FAILED;
6250
6251 if (orig_state)
6252 s2io_open(sp->dev);
6253
6254 data[2] = 0;
6255 } else {
6256 /* Online Tests. */
6257 if (!orig_state) {
6258 DBG_PRINT(ERR_DBG,
6259 "%s: is not up, cannot run test\n",
6260 dev->name);
6261 data[0] = -1;
6262 data[1] = -1;
6263 data[2] = -1;
6264 data[3] = -1;
6265 data[4] = -1;
6266 }
6267
6268 if (s2io_link_test(sp, &data[2]))
6269 ethtest->flags |= ETH_TEST_FL_FAILED;
6270
6271 data[0] = 0;
6272 data[1] = 0;
6273 data[3] = 0;
6274 data[4] = 0;
6275 }
6276}
6277
6278static void s2io_get_ethtool_stats(struct net_device *dev,
6279 struct ethtool_stats *estats,
6280 u64 * tmp_stats)
6281{
8116f3cf 6282 int i = 0, k;
1ee6dd77
RB
6283 struct s2io_nic *sp = dev->priv;
6284 struct stat_block *stat_info = sp->mac_control.stats_info;
1da177e4 6285
7ba013ac 6286 s2io_updt_stats(sp);
541ae68f
K
6287 tmp_stats[i++] =
6288 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
6289 le32_to_cpu(stat_info->tmac_frms);
6290 tmp_stats[i++] =
6291 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
6292 le32_to_cpu(stat_info->tmac_data_octets);
1da177e4 6293 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
541ae68f
K
6294 tmp_stats[i++] =
6295 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
6296 le32_to_cpu(stat_info->tmac_mcst_frms);
6297 tmp_stats[i++] =
6298 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
6299 le32_to_cpu(stat_info->tmac_bcst_frms);
1da177e4 6300 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
bd1034f0
AR
6301 tmp_stats[i++] =
6302 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
6303 le32_to_cpu(stat_info->tmac_ttl_octets);
6304 tmp_stats[i++] =
6305 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
6306 le32_to_cpu(stat_info->tmac_ucst_frms);
6307 tmp_stats[i++] =
6308 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
6309 le32_to_cpu(stat_info->tmac_nucst_frms);
541ae68f
K
6310 tmp_stats[i++] =
6311 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
6312 le32_to_cpu(stat_info->tmac_any_err_frms);
bd1034f0 6313 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
1da177e4 6314 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
541ae68f
K
6315 tmp_stats[i++] =
6316 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
6317 le32_to_cpu(stat_info->tmac_vld_ip);
6318 tmp_stats[i++] =
6319 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
6320 le32_to_cpu(stat_info->tmac_drop_ip);
6321 tmp_stats[i++] =
6322 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
6323 le32_to_cpu(stat_info->tmac_icmp);
6324 tmp_stats[i++] =
6325 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
6326 le32_to_cpu(stat_info->tmac_rst_tcp);
1da177e4 6327 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
541ae68f
K
6328 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
6329 le32_to_cpu(stat_info->tmac_udp);
6330 tmp_stats[i++] =
6331 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
6332 le32_to_cpu(stat_info->rmac_vld_frms);
6333 tmp_stats[i++] =
6334 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
6335 le32_to_cpu(stat_info->rmac_data_octets);
1da177e4
LT
6336 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
6337 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
541ae68f
K
6338 tmp_stats[i++] =
6339 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
6340 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
6341 tmp_stats[i++] =
6342 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6343 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
1da177e4 6344 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
bd1034f0 6345 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
1da177e4
LT
6346 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6347 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
bd1034f0
AR
6348 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6349 tmp_stats[i++] =
6350 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6351 le32_to_cpu(stat_info->rmac_ttl_octets);
6352 tmp_stats[i++] =
6353 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6354 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6355 tmp_stats[i++] =
6356 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6357 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
541ae68f
K
6358 tmp_stats[i++] =
6359 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6360 le32_to_cpu(stat_info->rmac_discarded_frms);
bd1034f0
AR
6361 tmp_stats[i++] =
6362 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6363 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6364 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6365 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
541ae68f
K
6366 tmp_stats[i++] =
6367 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6368 le32_to_cpu(stat_info->rmac_usized_frms);
6369 tmp_stats[i++] =
6370 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6371 le32_to_cpu(stat_info->rmac_osized_frms);
6372 tmp_stats[i++] =
6373 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6374 le32_to_cpu(stat_info->rmac_frag_frms);
6375 tmp_stats[i++] =
6376 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6377 le32_to_cpu(stat_info->rmac_jabber_frms);
bd1034f0
AR
6378 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6379 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6380 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6381 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6382 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6383 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6384 tmp_stats[i++] =
6385 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
541ae68f 6386 le32_to_cpu(stat_info->rmac_ip);
1da177e4
LT
6387 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6388 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
bd1034f0
AR
6389 tmp_stats[i++] =
6390 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
541ae68f 6391 le32_to_cpu(stat_info->rmac_drop_ip);
bd1034f0
AR
6392 tmp_stats[i++] =
6393 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
541ae68f 6394 le32_to_cpu(stat_info->rmac_icmp);
1da177e4 6395 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
bd1034f0
AR
6396 tmp_stats[i++] =
6397 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
541ae68f
K
6398 le32_to_cpu(stat_info->rmac_udp);
6399 tmp_stats[i++] =
6400 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6401 le32_to_cpu(stat_info->rmac_err_drp_udp);
bd1034f0
AR
6402 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6403 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6404 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6405 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6406 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6407 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6408 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6409 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6410 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6411 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6412 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6413 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6414 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6415 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6416 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6417 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6418 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
541ae68f
K
6419 tmp_stats[i++] =
6420 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6421 le32_to_cpu(stat_info->rmac_pause_cnt);
bd1034f0
AR
6422 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6423 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
541ae68f
K
6424 tmp_stats[i++] =
6425 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6426 le32_to_cpu(stat_info->rmac_accepted_ip);
1da177e4 6427 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
bd1034f0
AR
6428 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6429 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6430 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6431 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6432 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6433 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6434 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6435 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6436 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6437 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6438 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6439 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6440 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6441 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6442 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6443 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6444 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6445 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
fa1f0cb3
SS
6446
6447 /* Enhanced statistics exist only for Hercules */
6448 if(sp->device_type == XFRAME_II_DEVICE) {
6449 tmp_stats[i++] =
6450 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6451 tmp_stats[i++] =
6452 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6453 tmp_stats[i++] =
6454 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6455 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6456 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6457 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6458 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6459 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6460 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6461 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6462 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6463 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6464 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6465 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6466 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6467 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6468 }
6469
7ba013ac
K
6470 tmp_stats[i++] = 0;
6471 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6472 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
bd1034f0
AR
6473 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6474 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6475 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6476 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
8116f3cf
SS
6477 for (k = 0; k < MAX_RX_RINGS; k++)
6478 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
bd1034f0
AR
6479 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6480 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6481 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6482 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6483 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6484 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6485 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6486 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6487 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6488 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6489 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6490 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
7d3d0439
RA
6491 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6492 tmp_stats[i++] = stat_info->sw_stat.sending_both;
6493 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6494 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
fe931395 6495 if (stat_info->sw_stat.num_aggregations) {
bd1034f0
AR
6496 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6497 int count = 0;
6aa20a22 6498 /*
bd1034f0
AR
6499 * Since 64-bit divide does not work on all platforms,
6500 * do repeated subtraction.
6501 */
6502 while (tmp >= stat_info->sw_stat.num_aggregations) {
6503 tmp -= stat_info->sw_stat.num_aggregations;
6504 count++;
6505 }
6506 tmp_stats[i++] = count;
fe931395 6507 }
bd1034f0
AR
6508 else
6509 tmp_stats[i++] = 0;
c53d4945 6510 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
491abf25 6511 tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
c53d4945 6512 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
491976b2
SH
6513 tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6514 tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6515 tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6516 tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6517 tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6518 tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6519
6520 tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6521 tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6522 tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6523 tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6524 tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6525
6526 tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6527 tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6528 tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6529 tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6530 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6531 tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6532 tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6533 tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6534 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
8116f3cf
SS
6535 tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6536 tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6537 tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6538 tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6539 tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6540 tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6541 tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6542 tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6543 tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6544 tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6545 tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6546 tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6547 tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6548 tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6549 tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6550 tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6551 tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
1da177e4
LT
6552}
6553
ac1f60db 6554static int s2io_ethtool_get_regs_len(struct net_device *dev)
1da177e4
LT
6555{
6556 return (XENA_REG_SPACE);
6557}
6558
6559
ac1f60db 6560static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
1da177e4 6561{
1ee6dd77 6562 struct s2io_nic *sp = dev->priv;
1da177e4
LT
6563
6564 return (sp->rx_csum);
6565}
ac1f60db
AB
6566
6567static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
1da177e4 6568{
1ee6dd77 6569 struct s2io_nic *sp = dev->priv;
1da177e4
LT
6570
6571 if (data)
6572 sp->rx_csum = 1;
6573 else
6574 sp->rx_csum = 0;
6575
6576 return 0;
6577}
ac1f60db
AB
6578
6579static int s2io_get_eeprom_len(struct net_device *dev)
1da177e4
LT
6580{
6581 return (XENA_EEPROM_SPACE);
6582}
6583
b9f2c044 6584static int s2io_get_sset_count(struct net_device *dev, int sset)
1da177e4 6585{
b9f2c044
JG
6586 struct s2io_nic *sp = dev->priv;
6587
6588 switch (sset) {
6589 case ETH_SS_TEST:
6590 return S2IO_TEST_LEN;
6591 case ETH_SS_STATS:
6592 switch(sp->device_type) {
6593 case XFRAME_I_DEVICE:
6594 return XFRAME_I_STAT_LEN;
6595 case XFRAME_II_DEVICE:
6596 return XFRAME_II_STAT_LEN;
6597 default:
6598 return 0;
6599 }
6600 default:
6601 return -EOPNOTSUPP;
6602 }
1da177e4 6603}
ac1f60db
AB
6604
6605static void s2io_ethtool_get_strings(struct net_device *dev,
6606 u32 stringset, u8 * data)
1da177e4 6607{
fa1f0cb3
SS
6608 int stat_size = 0;
6609 struct s2io_nic *sp = dev->priv;
6610
1da177e4
LT
6611 switch (stringset) {
6612 case ETH_SS_TEST:
6613 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6614 break;
6615 case ETH_SS_STATS:
fa1f0cb3
SS
6616 stat_size = sizeof(ethtool_xena_stats_keys);
6617 memcpy(data, &ethtool_xena_stats_keys,stat_size);
6618 if(sp->device_type == XFRAME_II_DEVICE) {
6619 memcpy(data + stat_size,
6620 &ethtool_enhanced_stats_keys,
6621 sizeof(ethtool_enhanced_stats_keys));
6622 stat_size += sizeof(ethtool_enhanced_stats_keys);
6623 }
6624
6625 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6626 sizeof(ethtool_driver_stats_keys));
1da177e4
LT
6627 }
6628}
1da177e4 6629
ac1f60db 6630static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
1da177e4
LT
6631{
6632 if (data)
6633 dev->features |= NETIF_F_IP_CSUM;
6634 else
6635 dev->features &= ~NETIF_F_IP_CSUM;
6636
6637 return 0;
6638}
6639
75c30b13
AR
6640static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6641{
6642 return (dev->features & NETIF_F_TSO) != 0;
6643}
6644static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6645{
6646 if (data)
6647 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6648 else
6649 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6650
6651 return 0;
6652}
1da177e4 6653
7282d491 6654static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
6655 .get_settings = s2io_ethtool_gset,
6656 .set_settings = s2io_ethtool_sset,
6657 .get_drvinfo = s2io_ethtool_gdrvinfo,
6658 .get_regs_len = s2io_ethtool_get_regs_len,
6659 .get_regs = s2io_ethtool_gregs,
6660 .get_link = ethtool_op_get_link,
6661 .get_eeprom_len = s2io_get_eeprom_len,
6662 .get_eeprom = s2io_ethtool_geeprom,
6663 .set_eeprom = s2io_ethtool_seeprom,
0cec35eb 6664 .get_ringparam = s2io_ethtool_gringparam,
1da177e4
LT
6665 .get_pauseparam = s2io_ethtool_getpause_data,
6666 .set_pauseparam = s2io_ethtool_setpause_data,
6667 .get_rx_csum = s2io_ethtool_get_rx_csum,
6668 .set_rx_csum = s2io_ethtool_set_rx_csum,
1da177e4 6669 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
1da177e4 6670 .set_sg = ethtool_op_set_sg,
75c30b13
AR
6671 .get_tso = s2io_ethtool_op_get_tso,
6672 .set_tso = s2io_ethtool_op_set_tso,
fed5eccd 6673 .set_ufo = ethtool_op_set_ufo,
1da177e4
LT
6674 .self_test = s2io_ethtool_test,
6675 .get_strings = s2io_ethtool_get_strings,
6676 .phys_id = s2io_ethtool_idnic,
b9f2c044
JG
6677 .get_ethtool_stats = s2io_get_ethtool_stats,
6678 .get_sset_count = s2io_get_sset_count,
1da177e4
LT
6679};
6680
6681/**
20346722 6682 * s2io_ioctl - Entry point for the Ioctl
1da177e4
LT
6683 * @dev : Device pointer.
6684 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6685 * a proprietary structure used to pass information to the driver.
6686 * @cmd : This is used to distinguish between the different commands that
6687 * can be passed to the IOCTL functions.
6688 * Description:
20346722
K
6689 * Currently there are no special functionality supported in IOCTL, hence
6690 * function always return EOPNOTSUPPORTED
1da177e4
LT
6691 */
6692
ac1f60db 6693static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1da177e4
LT
6694{
6695 return -EOPNOTSUPP;
6696}
6697
6698/**
6699 * s2io_change_mtu - entry point to change MTU size for the device.
6700 * @dev : device pointer.
6701 * @new_mtu : the new MTU size for the device.
6702 * Description: A driver entry point to change MTU size for the device.
6703 * Before changing the MTU the device must be stopped.
6704 * Return value:
6705 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6706 * file on failure.
6707 */
6708
ac1f60db 6709static int s2io_change_mtu(struct net_device *dev, int new_mtu)
1da177e4 6710{
1ee6dd77 6711 struct s2io_nic *sp = dev->priv;
9f74ffde 6712 int ret = 0;
1da177e4
LT
6713
6714 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6715 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6716 dev->name);
6717 return -EPERM;
6718 }
6719
1da177e4 6720 dev->mtu = new_mtu;
d8892c6e 6721 if (netif_running(dev)) {
3a3d5756 6722 s2io_stop_all_tx_queue(sp);
e6a8fee2 6723 s2io_card_down(sp);
9f74ffde
SH
6724 ret = s2io_card_up(sp);
6725 if (ret) {
d8892c6e
K
6726 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6727 __FUNCTION__);
9f74ffde 6728 return ret;
d8892c6e 6729 }
3a3d5756 6730 s2io_wake_all_tx_queue(sp);
d8892c6e 6731 } else { /* Device is down */
1ee6dd77 6732 struct XENA_dev_config __iomem *bar0 = sp->bar0;
d8892c6e
K
6733 u64 val64 = new_mtu;
6734
6735 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6736 }
1da177e4 6737
9f74ffde 6738 return ret;
1da177e4
LT
6739}
6740
1da177e4
LT
6741/**
6742 * s2io_set_link - Set the LInk status
6743 * @data: long pointer to device private structue
6744 * Description: Sets the link status for the adapter
6745 */
6746
c4028958 6747static void s2io_set_link(struct work_struct *work)
1da177e4 6748{
1ee6dd77 6749 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
1da177e4 6750 struct net_device *dev = nic->dev;
1ee6dd77 6751 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
6752 register u64 val64;
6753 u16 subid;
6754
22747d6b
FR
6755 rtnl_lock();
6756
6757 if (!netif_running(dev))
6758 goto out_unlock;
6759
92b84437 6760 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
1da177e4 6761 /* The card is being reset, no point doing anything */
22747d6b 6762 goto out_unlock;
1da177e4
LT
6763 }
6764
6765 subid = nic->pdev->subsystem_device;
a371a07d
K
6766 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6767 /*
6768 * Allow a small delay for the NICs self initiated
6769 * cleanup to complete.
6770 */
6771 msleep(100);
6772 }
1da177e4
LT
6773
6774 val64 = readq(&bar0->adapter_status);
19a60522
SS
6775 if (LINK_IS_UP(val64)) {
6776 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6777 if (verify_xena_quiescence(nic)) {
6778 val64 = readq(&bar0->adapter_control);
6779 val64 |= ADAPTER_CNTL_EN;
1da177e4 6780 writeq(val64, &bar0->adapter_control);
19a60522
SS
6781 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6782 nic->device_type, subid)) {
6783 val64 = readq(&bar0->gpio_control);
6784 val64 |= GPIO_CTRL_GPIO_0;
6785 writeq(val64, &bar0->gpio_control);
6786 val64 = readq(&bar0->gpio_control);
6787 } else {
6788 val64 |= ADAPTER_LED_ON;
6789 writeq(val64, &bar0->adapter_control);
a371a07d 6790 }
1da177e4 6791 nic->device_enabled_once = TRUE;
19a60522
SS
6792 } else {
6793 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6794 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
3a3d5756 6795 s2io_stop_all_tx_queue(nic);
1da177e4 6796 }
19a60522 6797 }
92c48799
SS
6798 val64 = readq(&bar0->adapter_control);
6799 val64 |= ADAPTER_LED_ON;
6800 writeq(val64, &bar0->adapter_control);
6801 s2io_link(nic, LINK_UP);
19a60522
SS
6802 } else {
6803 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6804 subid)) {
6805 val64 = readq(&bar0->gpio_control);
6806 val64 &= ~GPIO_CTRL_GPIO_0;
6807 writeq(val64, &bar0->gpio_control);
6808 val64 = readq(&bar0->gpio_control);
1da177e4 6809 }
92c48799
SS
6810 /* turn off LED */
6811 val64 = readq(&bar0->adapter_control);
6812 val64 = val64 &(~ADAPTER_LED_ON);
6813 writeq(val64, &bar0->adapter_control);
19a60522 6814 s2io_link(nic, LINK_DOWN);
1da177e4 6815 }
92b84437 6816 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
22747d6b
FR
6817
6818out_unlock:
d8d70caf 6819 rtnl_unlock();
1da177e4
LT
6820}
6821
1ee6dd77
RB
6822static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6823 struct buffAdd *ba,
6824 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6825 u64 *temp2, int size)
5d3213cc
AR
6826{
6827 struct net_device *dev = sp->dev;
491abf25 6828 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
5d3213cc
AR
6829
6830 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6d517a27 6831 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
5d3213cc
AR
6832 /* allocate skb */
6833 if (*skb) {
6834 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6835 /*
6836 * As Rx frame are not going to be processed,
6837 * using same mapped address for the Rxd
6838 * buffer pointer
6839 */
6d517a27 6840 rxdp1->Buffer0_ptr = *temp0;
5d3213cc
AR
6841 } else {
6842 *skb = dev_alloc_skb(size);
6843 if (!(*skb)) {
0c61ed5f 6844 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
c53d4945
SH
6845 DBG_PRINT(INFO_DBG, "memory to allocate ");
6846 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6847 sp->mac_control.stats_info->sw_stat. \
6848 mem_alloc_fail_cnt++;
5d3213cc
AR
6849 return -ENOMEM ;
6850 }
8a4bdbaa 6851 sp->mac_control.stats_info->sw_stat.mem_allocated
491976b2 6852 += (*skb)->truesize;
5d3213cc
AR
6853 /* storing the mapped addr in a temp variable
6854 * such it will be used for next rxd whose
6855 * Host Control is NULL
6856 */
6d517a27 6857 rxdp1->Buffer0_ptr = *temp0 =
5d3213cc
AR
6858 pci_map_single( sp->pdev, (*skb)->data,
6859 size - NET_IP_ALIGN,
6860 PCI_DMA_FROMDEVICE);
64c42f69 6861 if (pci_dma_mapping_error(rxdp1->Buffer0_ptr))
491abf25 6862 goto memalloc_failed;
5d3213cc
AR
6863 rxdp->Host_Control = (unsigned long) (*skb);
6864 }
6865 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6d517a27 6866 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
5d3213cc
AR
6867 /* Two buffer Mode */
6868 if (*skb) {
6d517a27
VP
6869 rxdp3->Buffer2_ptr = *temp2;
6870 rxdp3->Buffer0_ptr = *temp0;
6871 rxdp3->Buffer1_ptr = *temp1;
5d3213cc
AR
6872 } else {
6873 *skb = dev_alloc_skb(size);
2ceaac75 6874 if (!(*skb)) {
c53d4945
SH
6875 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6876 DBG_PRINT(INFO_DBG, "memory to allocate ");
6877 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6878 sp->mac_control.stats_info->sw_stat. \
6879 mem_alloc_fail_cnt++;
2ceaac75
DR
6880 return -ENOMEM;
6881 }
8a4bdbaa 6882 sp->mac_control.stats_info->sw_stat.mem_allocated
491976b2 6883 += (*skb)->truesize;
6d517a27 6884 rxdp3->Buffer2_ptr = *temp2 =
5d3213cc
AR
6885 pci_map_single(sp->pdev, (*skb)->data,
6886 dev->mtu + 4,
6887 PCI_DMA_FROMDEVICE);
64c42f69 6888 if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
491abf25 6889 goto memalloc_failed;
6d517a27 6890 rxdp3->Buffer0_ptr = *temp0 =
5d3213cc
AR
6891 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6892 PCI_DMA_FROMDEVICE);
64c42f69 6893 if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) {
491abf25 6894 pci_unmap_single (sp->pdev,
3e847423 6895 (dma_addr_t)rxdp3->Buffer2_ptr,
491abf25
VP
6896 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6897 goto memalloc_failed;
6898 }
5d3213cc
AR
6899 rxdp->Host_Control = (unsigned long) (*skb);
6900
6901 /* Buffer-1 will be dummy buffer not used */
6d517a27 6902 rxdp3->Buffer1_ptr = *temp1 =
5d3213cc 6903 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
5d3213cc 6904 PCI_DMA_FROMDEVICE);
64c42f69 6905 if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) {
491abf25 6906 pci_unmap_single (sp->pdev,
3e847423
AV
6907 (dma_addr_t)rxdp3->Buffer0_ptr,
6908 BUF0_LEN, PCI_DMA_FROMDEVICE);
6909 pci_unmap_single (sp->pdev,
6910 (dma_addr_t)rxdp3->Buffer2_ptr,
491abf25
VP
6911 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6912 goto memalloc_failed;
6913 }
5d3213cc
AR
6914 }
6915 }
6916 return 0;
491abf25
VP
6917 memalloc_failed:
6918 stats->pci_map_fail_cnt++;
6919 stats->mem_freed += (*skb)->truesize;
6920 dev_kfree_skb(*skb);
6921 return -ENOMEM;
5d3213cc 6922}
491abf25 6923
1ee6dd77
RB
6924static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6925 int size)
5d3213cc
AR
6926{
6927 struct net_device *dev = sp->dev;
6928 if (sp->rxd_mode == RXD_MODE_1) {
6929 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6930 } else if (sp->rxd_mode == RXD_MODE_3B) {
6931 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6932 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6933 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
5d3213cc
AR
6934 }
6935}
6936
1ee6dd77 6937static int rxd_owner_bit_reset(struct s2io_nic *sp)
5d3213cc
AR
6938{
6939 int i, j, k, blk_cnt = 0, size;
1ee6dd77 6940 struct mac_info * mac_control = &sp->mac_control;
5d3213cc
AR
6941 struct config_param *config = &sp->config;
6942 struct net_device *dev = sp->dev;
1ee6dd77 6943 struct RxD_t *rxdp = NULL;
5d3213cc 6944 struct sk_buff *skb = NULL;
1ee6dd77 6945 struct buffAdd *ba = NULL;
5d3213cc
AR
6946 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6947
6948 /* Calculate the size based on ring mode */
6949 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6950 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6951 if (sp->rxd_mode == RXD_MODE_1)
6952 size += NET_IP_ALIGN;
6953 else if (sp->rxd_mode == RXD_MODE_3B)
6954 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
5d3213cc
AR
6955
6956 for (i = 0; i < config->rx_ring_num; i++) {
6957 blk_cnt = config->rx_cfg[i].num_rxd /
6958 (rxd_count[sp->rxd_mode] +1);
6959
6960 for (j = 0; j < blk_cnt; j++) {
6961 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6962 rxdp = mac_control->rings[i].
6963 rx_blocks[j].rxds[k].virt_addr;
6d517a27 6964 if(sp->rxd_mode == RXD_MODE_3B)
5d3213cc 6965 ba = &mac_control->rings[i].ba[j][k];
ac1f90d6 6966 if (set_rxd_buffer_pointer(sp, rxdp, ba,
5d3213cc
AR
6967 &skb,(u64 *)&temp0_64,
6968 (u64 *)&temp1_64,
ac1f90d6 6969 (u64 *)&temp2_64,
20cbe73c 6970 size) == -ENOMEM) {
ac1f90d6
SS
6971 return 0;
6972 }
5d3213cc
AR
6973
6974 set_rxd_buffer_size(sp, rxdp, size);
6975 wmb();
6976 /* flip the Ownership bit to Hardware */
6977 rxdp->Control_1 |= RXD_OWN_XENA;
6978 }
6979 }
6980 }
6981 return 0;
6982
6983}
6984
1ee6dd77 6985static int s2io_add_isr(struct s2io_nic * sp)
1da177e4 6986{
e6a8fee2 6987 int ret = 0;
c92ca04b 6988 struct net_device *dev = sp->dev;
e6a8fee2 6989 int err = 0;
1da177e4 6990
eaae7f72 6991 if (sp->config.intr_type == MSI_X)
e6a8fee2
AR
6992 ret = s2io_enable_msi_x(sp);
6993 if (ret) {
6994 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
eaae7f72 6995 sp->config.intr_type = INTA;
20346722 6996 }
1da177e4 6997
1ee6dd77 6998 /* Store the values of the MSIX table in the struct s2io_nic structure */
e6a8fee2 6999 store_xmsi_data(sp);
c92ca04b 7000
e6a8fee2 7001 /* After proper initialization of H/W, register ISR */
eaae7f72 7002 if (sp->config.intr_type == MSI_X) {
ac731ab6
SH
7003 int i, msix_rx_cnt = 0;
7004
f61e0a35
SH
7005 for (i = 0; i < sp->num_entries; i++) {
7006 if (sp->s2io_entries[i].in_use == MSIX_FLG) {
7007 if (sp->s2io_entries[i].type ==
ac731ab6
SH
7008 MSIX_RING_TYPE) {
7009 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7010 dev->name, i);
7011 err = request_irq(sp->entries[i].vector,
7012 s2io_msix_ring_handle, 0,
7013 sp->desc[i],
7014 sp->s2io_entries[i].arg);
7015 } else if (sp->s2io_entries[i].type ==
7016 MSIX_ALARM_TYPE) {
7017 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
e6a8fee2 7018 dev->name, i);
ac731ab6
SH
7019 err = request_irq(sp->entries[i].vector,
7020 s2io_msix_fifo_handle, 0,
7021 sp->desc[i],
7022 sp->s2io_entries[i].arg);
7023
fb6a825b 7024 }
ac731ab6
SH
7025 /* if either data or addr is zero print it. */
7026 if (!(sp->msix_info[i].addr &&
fb6a825b 7027 sp->msix_info[i].data)) {
ac731ab6
SH
7028 DBG_PRINT(ERR_DBG,
7029 "%s @Addr:0x%llx Data:0x%llx\n",
7030 sp->desc[i],
fb6a825b
SS
7031 (unsigned long long)
7032 sp->msix_info[i].addr,
3459feb8 7033 (unsigned long long)
ac731ab6
SH
7034 ntohl(sp->msix_info[i].data));
7035 } else
fb6a825b 7036 msix_rx_cnt++;
ac731ab6
SH
7037 if (err) {
7038 remove_msix_isr(sp);
7039
7040 DBG_PRINT(ERR_DBG,
7041 "%s:MSI-X-%d registration "
7042 "failed\n", dev->name, i);
7043
7044 DBG_PRINT(ERR_DBG,
7045 "%s: Defaulting to INTA\n",
7046 dev->name);
7047 sp->config.intr_type = INTA;
7048 break;
fb6a825b 7049 }
ac731ab6
SH
7050 sp->s2io_entries[i].in_use =
7051 MSIX_REGISTERED_SUCCESS;
c92ca04b 7052 }
e6a8fee2 7053 }
18b2b7bd 7054 if (!err) {
18b2b7bd 7055 printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
ac731ab6
SH
7056 --msix_rx_cnt);
7057 DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled"
7058 " through alarm vector\n");
18b2b7bd 7059 }
e6a8fee2 7060 }
eaae7f72 7061 if (sp->config.intr_type == INTA) {
e6a8fee2
AR
7062 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
7063 sp->name, dev);
7064 if (err) {
7065 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7066 dev->name);
7067 return -1;
7068 }
7069 }
7070 return 0;
7071}
1ee6dd77 7072static void s2io_rem_isr(struct s2io_nic * sp)
e6a8fee2 7073{
18b2b7bd
SH
7074 if (sp->config.intr_type == MSI_X)
7075 remove_msix_isr(sp);
7076 else
7077 remove_inta_isr(sp);
e6a8fee2
AR
7078}
7079
d796fdb7 7080static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
e6a8fee2
AR
7081{
7082 int cnt = 0;
1ee6dd77 7083 struct XENA_dev_config __iomem *bar0 = sp->bar0;
e6a8fee2 7084 register u64 val64 = 0;
5f490c96
SH
7085 struct config_param *config;
7086 config = &sp->config;
e6a8fee2 7087
9f74ffde
SH
7088 if (!is_s2io_card_up(sp))
7089 return;
7090
e6a8fee2
AR
7091 del_timer_sync(&sp->alarm_timer);
7092 /* If s2io_set_link task is executing, wait till it completes. */
92b84437 7093 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
e6a8fee2
AR
7094 msleep(50);
7095 }
92b84437 7096 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
e6a8fee2 7097
5f490c96 7098 /* Disable napi */
f61e0a35
SH
7099 if (sp->config.napi) {
7100 int off = 0;
7101 if (config->intr_type == MSI_X) {
7102 for (; off < sp->config.rx_ring_num; off++)
7103 napi_disable(&sp->mac_control.rings[off].napi);
7104 }
7105 else
7106 napi_disable(&sp->napi);
7107 }
5f490c96 7108
e6a8fee2 7109 /* disable Tx and Rx traffic on the NIC */
d796fdb7
LV
7110 if (do_io)
7111 stop_nic(sp);
e6a8fee2
AR
7112
7113 s2io_rem_isr(sp);
1da177e4 7114
1da177e4 7115 /* Check if the device is Quiescent and then Reset the NIC */
d796fdb7 7116 while(do_io) {
5d3213cc
AR
7117 /* As per the HW requirement we need to replenish the
7118 * receive buffer to avoid the ring bump. Since there is
7119 * no intention of processing the Rx frame at this pointwe are
7120 * just settting the ownership bit of rxd in Each Rx
7121 * ring to HW and set the appropriate buffer size
7122 * based on the ring mode
7123 */
7124 rxd_owner_bit_reset(sp);
7125
1da177e4 7126 val64 = readq(&bar0->adapter_status);
19a60522
SS
7127 if (verify_xena_quiescence(sp)) {
7128 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
1da177e4
LT
7129 break;
7130 }
7131
7132 msleep(50);
7133 cnt++;
7134 if (cnt == 10) {
7135 DBG_PRINT(ERR_DBG,
7136 "s2io_close:Device not Quiescent ");
7137 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
7138 (unsigned long long) val64);
7139 break;
7140 }
d796fdb7
LV
7141 }
7142 if (do_io)
7143 s2io_reset(sp);
1da177e4 7144
7ba013ac 7145 /* Free all Tx buffers */
1da177e4 7146 free_tx_buffers(sp);
7ba013ac
K
7147
7148 /* Free all Rx buffers */
1da177e4
LT
7149 free_rx_buffers(sp);
7150
92b84437 7151 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
1da177e4
LT
7152}
7153
d796fdb7
LV
7154static void s2io_card_down(struct s2io_nic * sp)
7155{
7156 do_s2io_card_down(sp, 1);
7157}
7158
1ee6dd77 7159static int s2io_card_up(struct s2io_nic * sp)
1da177e4 7160{
cc6e7c44 7161 int i, ret = 0;
1ee6dd77 7162 struct mac_info *mac_control;
1da177e4
LT
7163 struct config_param *config;
7164 struct net_device *dev = (struct net_device *) sp->dev;
e6a8fee2 7165 u16 interruptible;
1da177e4
LT
7166
7167 /* Initialize the H/W I/O registers */
9f74ffde
SH
7168 ret = init_nic(sp);
7169 if (ret != 0) {
1da177e4
LT
7170 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7171 dev->name);
9f74ffde
SH
7172 if (ret != -EIO)
7173 s2io_reset(sp);
7174 return ret;
1da177e4
LT
7175 }
7176
20346722
K
7177 /*
7178 * Initializing the Rx buffers. For now we are considering only 1
1da177e4
LT
7179 * Rx ring and initializing buffers into 30 Rx blocks
7180 */
7181 mac_control = &sp->mac_control;
7182 config = &sp->config;
7183
7184 for (i = 0; i < config->rx_ring_num; i++) {
0425b46a
SH
7185 mac_control->rings[i].mtu = dev->mtu;
7186 ret = fill_rx_buffers(&mac_control->rings[i]);
7187 if (ret) {
1da177e4
LT
7188 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7189 dev->name);
7190 s2io_reset(sp);
7191 free_rx_buffers(sp);
7192 return -ENOMEM;
7193 }
7194 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
0425b46a 7195 mac_control->rings[i].rx_bufs_left);
1da177e4 7196 }
5f490c96
SH
7197
7198 /* Initialise napi */
f61e0a35
SH
7199 if (config->napi) {
7200 int i;
7201 if (config->intr_type == MSI_X) {
7202 for (i = 0; i < sp->config.rx_ring_num; i++)
7203 napi_enable(&sp->mac_control.rings[i].napi);
7204 } else {
7205 napi_enable(&sp->napi);
7206 }
7207 }
5f490c96 7208
19a60522
SS
7209 /* Maintain the state prior to the open */
7210 if (sp->promisc_flg)
7211 sp->promisc_flg = 0;
7212 if (sp->m_cast_flg) {
7213 sp->m_cast_flg = 0;
7214 sp->all_multi_pos= 0;
7215 }
1da177e4
LT
7216
7217 /* Setting its receive mode */
7218 s2io_set_multicast(dev);
7219
7d3d0439 7220 if (sp->lro) {
b41477f3 7221 /* Initialize max aggregatable pkts per session based on MTU */
7d3d0439
RA
7222 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7223 /* Check if we can use(if specified) user provided value */
7224 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7225 sp->lro_max_aggr_per_sess = lro_max_pkts;
7226 }
7227
1da177e4
LT
7228 /* Enable Rx Traffic and interrupts on the NIC */
7229 if (start_nic(sp)) {
7230 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
1da177e4 7231 s2io_reset(sp);
e6a8fee2
AR
7232 free_rx_buffers(sp);
7233 return -ENODEV;
7234 }
7235
7236 /* Add interrupt service routine */
7237 if (s2io_add_isr(sp) != 0) {
eaae7f72 7238 if (sp->config.intr_type == MSI_X)
e6a8fee2
AR
7239 s2io_rem_isr(sp);
7240 s2io_reset(sp);
1da177e4
LT
7241 free_rx_buffers(sp);
7242 return -ENODEV;
7243 }
7244
25fff88e
K
7245 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7246
e6a8fee2 7247 /* Enable select interrupts */
9caab458 7248 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
eaae7f72 7249 if (sp->config.intr_type != INTA)
ac731ab6 7250 en_dis_able_nic_intrs(sp, TX_TRAFFIC_INTR, ENABLE_INTRS);
e6a8fee2
AR
7251 else {
7252 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
9caab458 7253 interruptible |= TX_PIC_INTR;
e6a8fee2
AR
7254 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7255 }
7256
92b84437 7257 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
1da177e4
LT
7258 return 0;
7259}
7260
20346722 7261/**
1da177e4
LT
7262 * s2io_restart_nic - Resets the NIC.
7263 * @data : long pointer to the device private structure
7264 * Description:
7265 * This function is scheduled to be run by the s2io_tx_watchdog
20346722 7266 * function after 0.5 secs to reset the NIC. The idea is to reduce
1da177e4
LT
7267 * the run time of the watch dog routine which is run holding a
7268 * spin lock.
7269 */
7270
c4028958 7271static void s2io_restart_nic(struct work_struct *work)
1da177e4 7272{
1ee6dd77 7273 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
c4028958 7274 struct net_device *dev = sp->dev;
1da177e4 7275
22747d6b
FR
7276 rtnl_lock();
7277
7278 if (!netif_running(dev))
7279 goto out_unlock;
7280
e6a8fee2 7281 s2io_card_down(sp);
1da177e4
LT
7282 if (s2io_card_up(sp)) {
7283 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
7284 dev->name);
7285 }
3a3d5756 7286 s2io_wake_all_tx_queue(sp);
1da177e4
LT
7287 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
7288 dev->name);
22747d6b
FR
7289out_unlock:
7290 rtnl_unlock();
1da177e4
LT
7291}
7292
20346722
K
7293/**
7294 * s2io_tx_watchdog - Watchdog for transmit side.
1da177e4
LT
7295 * @dev : Pointer to net device structure
7296 * Description:
7297 * This function is triggered if the Tx Queue is stopped
7298 * for a pre-defined amount of time when the Interface is still up.
7299 * If the Interface is jammed in such a situation, the hardware is
7300 * reset (by s2io_close) and restarted again (by s2io_open) to
7301 * overcome any problem that might have been caused in the hardware.
7302 * Return value:
7303 * void
7304 */
7305
7306static void s2io_tx_watchdog(struct net_device *dev)
7307{
1ee6dd77 7308 struct s2io_nic *sp = dev->priv;
1da177e4
LT
7309
7310 if (netif_carrier_ok(dev)) {
c53d4945 7311 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
1da177e4 7312 schedule_work(&sp->rst_timer_task);
bd1034f0 7313 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
1da177e4
LT
7314 }
7315}
7316
7317/**
7318 * rx_osm_handler - To perform some OS related operations on SKB.
7319 * @sp: private member of the device structure,pointer to s2io_nic structure.
7320 * @skb : the socket buffer pointer.
7321 * @len : length of the packet
7322 * @cksum : FCS checksum of the frame.
7323 * @ring_no : the ring from which this RxD was extracted.
20346722 7324 * Description:
b41477f3 7325 * This function is called by the Rx interrupt serivce routine to perform
1da177e4
LT
7326 * some OS related operations on the SKB before passing it to the upper
7327 * layers. It mainly checks if the checksum is OK, if so adds it to the
7328 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7329 * to the upper layer. If the checksum is wrong, it increments the Rx
7330 * packet error count, frees the SKB and returns error.
7331 * Return value:
7332 * SUCCESS on success and -1 on failure.
7333 */
1ee6dd77 7334static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
1da177e4 7335{
1ee6dd77 7336 struct s2io_nic *sp = ring_data->nic;
0425b46a 7337 struct net_device *dev = (struct net_device *) ring_data->dev;
20346722
K
7338 struct sk_buff *skb = (struct sk_buff *)
7339 ((unsigned long) rxdp->Host_Control);
7340 int ring_no = ring_data->ring_no;
1da177e4 7341 u16 l3_csum, l4_csum;
863c11a9 7342 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
1ee6dd77 7343 struct lro *lro;
f9046eb3 7344 u8 err_mask;
da6971d8 7345
20346722 7346 skb->dev = dev;
c92ca04b 7347
863c11a9 7348 if (err) {
bd1034f0
AR
7349 /* Check for parity error */
7350 if (err & 0x1) {
7351 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7352 }
f9046eb3
OH
7353 err_mask = err >> 48;
7354 switch(err_mask) {
491976b2
SH
7355 case 1:
7356 sp->mac_control.stats_info->sw_stat.
7357 rx_parity_err_cnt++;
7358 break;
7359
7360 case 2:
7361 sp->mac_control.stats_info->sw_stat.
7362 rx_abort_cnt++;
7363 break;
7364
7365 case 3:
7366 sp->mac_control.stats_info->sw_stat.
7367 rx_parity_abort_cnt++;
7368 break;
7369
7370 case 4:
7371 sp->mac_control.stats_info->sw_stat.
7372 rx_rda_fail_cnt++;
7373 break;
7374
7375 case 5:
7376 sp->mac_control.stats_info->sw_stat.
7377 rx_unkn_prot_cnt++;
7378 break;
7379
7380 case 6:
7381 sp->mac_control.stats_info->sw_stat.
7382 rx_fcs_err_cnt++;
7383 break;
bd1034f0 7384
491976b2
SH
7385 case 7:
7386 sp->mac_control.stats_info->sw_stat.
7387 rx_buf_size_err_cnt++;
7388 break;
7389
7390 case 8:
7391 sp->mac_control.stats_info->sw_stat.
7392 rx_rxd_corrupt_cnt++;
7393 break;
7394
7395 case 15:
7396 sp->mac_control.stats_info->sw_stat.
7397 rx_unkn_err_cnt++;
7398 break;
7399 }
863c11a9
AR
7400 /*
7401 * Drop the packet if bad transfer code. Exception being
7402 * 0x5, which could be due to unsupported IPv6 extension header.
7403 * In this case, we let stack handle the packet.
7404 * Note that in this case, since checksum will be incorrect,
7405 * stack will validate the same.
7406 */
f9046eb3
OH
7407 if (err_mask != 0x5) {
7408 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7409 dev->name, err_mask);
863c11a9 7410 sp->stats.rx_crc_errors++;
8a4bdbaa 7411 sp->mac_control.stats_info->sw_stat.mem_freed
491976b2 7412 += skb->truesize;
863c11a9 7413 dev_kfree_skb(skb);
0425b46a 7414 ring_data->rx_bufs_left -= 1;
863c11a9
AR
7415 rxdp->Host_Control = 0;
7416 return 0;
7417 }
20346722 7418 }
1da177e4 7419
20346722 7420 /* Updating statistics */
0425b46a 7421 ring_data->rx_packets++;
20346722 7422 rxdp->Host_Control = 0;
da6971d8
AR
7423 if (sp->rxd_mode == RXD_MODE_1) {
7424 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
20346722 7425
0425b46a 7426 ring_data->rx_bytes += len;
da6971d8
AR
7427 skb_put(skb, len);
7428
6d517a27 7429 } else if (sp->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
7430 int get_block = ring_data->rx_curr_get_info.block_index;
7431 int get_off = ring_data->rx_curr_get_info.offset;
7432 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7433 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7434 unsigned char *buff = skb_push(skb, buf0_len);
7435
1ee6dd77 7436 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
0425b46a 7437 ring_data->rx_bytes += buf0_len + buf2_len;
da6971d8 7438 memcpy(buff, ba->ba_0, buf0_len);
6d517a27 7439 skb_put(skb, buf2_len);
da6971d8 7440 }
20346722 7441
0425b46a
SH
7442 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) ||
7443 (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
20346722
K
7444 (sp->rx_csum)) {
7445 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
1da177e4
LT
7446 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7447 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
20346722 7448 /*
1da177e4
LT
7449 * NIC verifies if the Checksum of the received
7450 * frame is Ok or not and accordingly returns
7451 * a flag in the RxD.
7452 */
7453 skb->ip_summed = CHECKSUM_UNNECESSARY;
0425b46a 7454 if (ring_data->lro) {
7d3d0439
RA
7455 u32 tcp_len;
7456 u8 *tcp;
7457 int ret = 0;
7458
0425b46a
SH
7459 ret = s2io_club_tcp_session(ring_data,
7460 skb->data, &tcp, &tcp_len, &lro,
7461 rxdp, sp);
7d3d0439
RA
7462 switch (ret) {
7463 case 3: /* Begin anew */
7464 lro->parent = skb;
7465 goto aggregate;
7466 case 1: /* Aggregate */
7467 {
7468 lro_append_pkt(sp, lro,
7469 skb, tcp_len);
7470 goto aggregate;
7471 }
7472 case 4: /* Flush session */
7473 {
7474 lro_append_pkt(sp, lro,
7475 skb, tcp_len);
cdb5bf02
SH
7476 queue_rx_frame(lro->parent,
7477 lro->vlan_tag);
7d3d0439
RA
7478 clear_lro_session(lro);
7479 sp->mac_control.stats_info->
7480 sw_stat.flush_max_pkts++;
7481 goto aggregate;
7482 }
7483 case 2: /* Flush both */
7484 lro->parent->data_len =
7485 lro->frags_len;
7486 sp->mac_control.stats_info->
7487 sw_stat.sending_both++;
cdb5bf02
SH
7488 queue_rx_frame(lro->parent,
7489 lro->vlan_tag);
7d3d0439
RA
7490 clear_lro_session(lro);
7491 goto send_up;
7492 case 0: /* sessions exceeded */
c92ca04b
AR
7493 case -1: /* non-TCP or not
7494 * L2 aggregatable
7495 */
7d3d0439
RA
7496 case 5: /*
7497 * First pkt in session not
7498 * L3/L4 aggregatable
7499 */
7500 break;
7501 default:
7502 DBG_PRINT(ERR_DBG,
7503 "%s: Samadhana!!\n",
7504 __FUNCTION__);
7505 BUG();
7506 }
7507 }
1da177e4 7508 } else {
20346722
K
7509 /*
7510 * Packet with erroneous checksum, let the
1da177e4
LT
7511 * upper layers deal with it.
7512 */
7513 skb->ip_summed = CHECKSUM_NONE;
7514 }
cdb5bf02 7515 } else
1da177e4 7516 skb->ip_summed = CHECKSUM_NONE;
cdb5bf02 7517
491976b2 7518 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7d3d0439 7519send_up:
cdb5bf02 7520 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
1da177e4 7521 dev->last_rx = jiffies;
7d3d0439 7522aggregate:
0425b46a 7523 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
1da177e4
LT
7524 return SUCCESS;
7525}
7526
7527/**
7528 * s2io_link - stops/starts the Tx queue.
7529 * @sp : private member of the device structure, which is a pointer to the
7530 * s2io_nic structure.
7531 * @link : inidicates whether link is UP/DOWN.
7532 * Description:
7533 * This function stops/starts the Tx queue depending on whether the link
20346722
K
7534 * status of the NIC is is down or up. This is called by the Alarm
7535 * interrupt handler whenever a link change interrupt comes up.
1da177e4
LT
7536 * Return value:
7537 * void.
7538 */
7539
1ee6dd77 7540static void s2io_link(struct s2io_nic * sp, int link)
1da177e4
LT
7541{
7542 struct net_device *dev = (struct net_device *) sp->dev;
7543
7544 if (link != sp->last_link_state) {
b7c5678f 7545 init_tti(sp, link);
1da177e4
LT
7546 if (link == LINK_DOWN) {
7547 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
3a3d5756 7548 s2io_stop_all_tx_queue(sp);
1da177e4 7549 netif_carrier_off(dev);
491976b2 7550 if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
8a4bdbaa 7551 sp->mac_control.stats_info->sw_stat.link_up_time =
491976b2
SH
7552 jiffies - sp->start_time;
7553 sp->mac_control.stats_info->sw_stat.link_down_cnt++;
1da177e4
LT
7554 } else {
7555 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
491976b2 7556 if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
8a4bdbaa 7557 sp->mac_control.stats_info->sw_stat.link_down_time =
491976b2
SH
7558 jiffies - sp->start_time;
7559 sp->mac_control.stats_info->sw_stat.link_up_cnt++;
1da177e4 7560 netif_carrier_on(dev);
3a3d5756 7561 s2io_wake_all_tx_queue(sp);
1da177e4
LT
7562 }
7563 }
7564 sp->last_link_state = link;
491976b2 7565 sp->start_time = jiffies;
1da177e4
LT
7566}
7567
20346722
K
7568/**
7569 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7570 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
7571 * s2io_nic structure.
7572 * Description:
7573 * This function initializes a few of the PCI and PCI-X configuration registers
7574 * with recommended values.
7575 * Return value:
7576 * void
7577 */
7578
1ee6dd77 7579static void s2io_init_pci(struct s2io_nic * sp)
1da177e4 7580{
20346722 7581 u16 pci_cmd = 0, pcix_cmd = 0;
1da177e4
LT
7582
7583 /* Enable Data Parity Error Recovery in PCI-X command register. */
7584 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7585 &(pcix_cmd));
1da177e4 7586 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7587 (pcix_cmd | 1));
1da177e4 7588 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7589 &(pcix_cmd));
1da177e4
LT
7590
7591 /* Set the PErr Response bit in PCI command register. */
7592 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7593 pci_write_config_word(sp->pdev, PCI_COMMAND,
7594 (pci_cmd | PCI_COMMAND_PARITY));
7595 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
1da177e4
LT
7596}
7597
3a3d5756
SH
7598static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7599 u8 *dev_multiq)
9dc737a7 7600{
2fda096d 7601 if ((tx_fifo_num > MAX_TX_FIFOS) ||
6cfc482b 7602 (tx_fifo_num < 1)) {
2fda096d
SR
7603 DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos "
7604 "(%d) not supported\n", tx_fifo_num);
6cfc482b
SH
7605
7606 if (tx_fifo_num < 1)
7607 tx_fifo_num = 1;
7608 else
7609 tx_fifo_num = MAX_TX_FIFOS;
7610
2fda096d
SR
7611 DBG_PRINT(ERR_DBG, "s2io: Default to %d ", tx_fifo_num);
7612 DBG_PRINT(ERR_DBG, "tx fifos\n");
9dc737a7 7613 }
2fda096d 7614
6cfc482b 7615 if (multiq)
3a3d5756 7616 *dev_multiq = multiq;
6cfc482b
SH
7617
7618 if (tx_steering_type && (1 == tx_fifo_num)) {
7619 if (tx_steering_type != TX_DEFAULT_STEERING)
7620 DBG_PRINT(ERR_DBG,
7621 "s2io: Tx steering is not supported with "
7622 "one fifo. Disabling Tx steering.\n");
7623 tx_steering_type = NO_STEERING;
7624 }
7625
7626 if ((tx_steering_type < NO_STEERING) ||
7627 (tx_steering_type > TX_DEFAULT_STEERING)) {
7628 DBG_PRINT(ERR_DBG, "s2io: Requested transmit steering not "
7629 "supported\n");
7630 DBG_PRINT(ERR_DBG, "s2io: Disabling transmit steering\n");
7631 tx_steering_type = NO_STEERING;
3a3d5756
SH
7632 }
7633
0425b46a
SH
7634 if (rx_ring_num > MAX_RX_RINGS) {
7635 DBG_PRINT(ERR_DBG, "s2io: Requested number of rx rings not "
9dc737a7 7636 "supported\n");
0425b46a
SH
7637 DBG_PRINT(ERR_DBG, "s2io: Default to %d rx rings\n",
7638 MAX_RX_RINGS);
7639 rx_ring_num = MAX_RX_RINGS;
9dc737a7 7640 }
0425b46a 7641
eccb8628 7642 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
9dc737a7
AR
7643 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7644 "Defaulting to INTA\n");
7645 *dev_intr_type = INTA;
7646 }
596c5c97 7647
9dc737a7
AR
7648 if ((*dev_intr_type == MSI_X) &&
7649 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7650 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
6aa20a22 7651 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
9dc737a7
AR
7652 "Defaulting to INTA\n");
7653 *dev_intr_type = INTA;
7654 }
fb6a825b 7655
6d517a27 7656 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
9dc737a7 7657 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6d517a27
VP
7658 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7659 rx_ring_mode = 1;
9dc737a7
AR
7660 }
7661 return SUCCESS;
7662}
7663
9fc93a41
SS
7664/**
7665 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7666 * or Traffic class respectively.
b7c5678f 7667 * @nic: device private variable
9fc93a41
SS
7668 * Description: The function configures the receive steering to
7669 * desired receive ring.
7670 * Return Value: SUCCESS on success and
7671 * '-1' on failure (endian settings incorrect).
7672 */
7673static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7674{
7675 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7676 register u64 val64 = 0;
7677
7678 if (ds_codepoint > 63)
7679 return FAILURE;
7680
7681 val64 = RTS_DS_MEM_DATA(ring);
7682 writeq(val64, &bar0->rts_ds_mem_data);
7683
7684 val64 = RTS_DS_MEM_CTRL_WE |
7685 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7686 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7687
7688 writeq(val64, &bar0->rts_ds_mem_ctrl);
7689
7690 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7691 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7692 S2IO_BIT_RESET);
7693}
7694
1da177e4 7695/**
20346722 7696 * s2io_init_nic - Initialization of the adapter .
1da177e4
LT
7697 * @pdev : structure containing the PCI related information of the device.
7698 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7699 * Description:
7700 * The function initializes an adapter identified by the pci_dec structure.
20346722
K
7701 * All OS related initialization including memory and device structure and
7702 * initlaization of the device private variable is done. Also the swapper
7703 * control register is initialized to enable read and write into the I/O
1da177e4
LT
7704 * registers of the device.
7705 * Return value:
7706 * returns 0 on success and negative on failure.
7707 */
7708
7709static int __devinit
7710s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7711{
1ee6dd77 7712 struct s2io_nic *sp;
1da177e4 7713 struct net_device *dev;
1da177e4
LT
7714 int i, j, ret;
7715 int dma_flag = FALSE;
7716 u32 mac_up, mac_down;
7717 u64 val64 = 0, tmp64 = 0;
1ee6dd77 7718 struct XENA_dev_config __iomem *bar0 = NULL;
1da177e4 7719 u16 subid;
1ee6dd77 7720 struct mac_info *mac_control;
1da177e4 7721 struct config_param *config;
541ae68f 7722 int mode;
cc6e7c44 7723 u8 dev_intr_type = intr_type;
3a3d5756 7724 u8 dev_multiq = 0;
0795af57 7725 DECLARE_MAC_BUF(mac);
1da177e4 7726
3a3d5756
SH
7727 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7728 if (ret)
9dc737a7 7729 return ret;
1da177e4
LT
7730
7731 if ((ret = pci_enable_device(pdev))) {
7732 DBG_PRINT(ERR_DBG,
7733 "s2io_init_nic: pci_enable_device failed\n");
7734 return ret;
7735 }
7736
1e7f0bd8 7737 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1da177e4
LT
7738 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7739 dma_flag = TRUE;
1da177e4 7740 if (pci_set_consistent_dma_mask
1e7f0bd8 7741 (pdev, DMA_64BIT_MASK)) {
1da177e4
LT
7742 DBG_PRINT(ERR_DBG,
7743 "Unable to obtain 64bit DMA for \
7744 consistent allocations\n");
7745 pci_disable_device(pdev);
7746 return -ENOMEM;
7747 }
1e7f0bd8 7748 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
1da177e4
LT
7749 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7750 } else {
7751 pci_disable_device(pdev);
7752 return -ENOMEM;
7753 }
eccb8628
VP
7754 if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7755 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7756 pci_disable_device(pdev);
7757 return -ENODEV;
1da177e4 7758 }
3a3d5756 7759 if (dev_multiq)
6cfc482b 7760 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
3a3d5756 7761 else
b19fa1fa 7762 dev = alloc_etherdev(sizeof(struct s2io_nic));
1da177e4
LT
7763 if (dev == NULL) {
7764 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7765 pci_disable_device(pdev);
7766 pci_release_regions(pdev);
7767 return -ENODEV;
7768 }
7769
7770 pci_set_master(pdev);
7771 pci_set_drvdata(pdev, dev);
1da177e4
LT
7772 SET_NETDEV_DEV(dev, &pdev->dev);
7773
7774 /* Private member variable initialized to s2io NIC structure */
7775 sp = dev->priv;
1ee6dd77 7776 memset(sp, 0, sizeof(struct s2io_nic));
1da177e4
LT
7777 sp->dev = dev;
7778 sp->pdev = pdev;
1da177e4 7779 sp->high_dma_flag = dma_flag;
1da177e4 7780 sp->device_enabled_once = FALSE;
da6971d8
AR
7781 if (rx_ring_mode == 1)
7782 sp->rxd_mode = RXD_MODE_1;
7783 if (rx_ring_mode == 2)
7784 sp->rxd_mode = RXD_MODE_3B;
da6971d8 7785
eaae7f72 7786 sp->config.intr_type = dev_intr_type;
1da177e4 7787
541ae68f
K
7788 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7789 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7790 sp->device_type = XFRAME_II_DEVICE;
7791 else
7792 sp->device_type = XFRAME_I_DEVICE;
7793
43b7c451 7794 sp->lro = lro_enable;
6aa20a22 7795
1da177e4
LT
7796 /* Initialize some PCI/PCI-X fields of the NIC. */
7797 s2io_init_pci(sp);
7798
20346722 7799 /*
1da177e4 7800 * Setting the device configuration parameters.
20346722
K
7801 * Most of these parameters can be specified by the user during
7802 * module insertion as they are module loadable parameters. If
7803 * these parameters are not not specified during load time, they
1da177e4
LT
7804 * are initialized with default values.
7805 */
7806 mac_control = &sp->mac_control;
7807 config = &sp->config;
7808
596c5c97 7809 config->napi = napi;
6cfc482b 7810 config->tx_steering_type = tx_steering_type;
596c5c97 7811
1da177e4 7812 /* Tx side parameters. */
6cfc482b
SH
7813 if (config->tx_steering_type == TX_PRIORITY_STEERING)
7814 config->tx_fifo_num = MAX_TX_FIFOS;
7815 else
7816 config->tx_fifo_num = tx_fifo_num;
7817
7818 /* Initialize the fifos used for tx steering */
7819 if (config->tx_fifo_num < 5) {
7820 if (config->tx_fifo_num == 1)
7821 sp->total_tcp_fifos = 1;
7822 else
7823 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7824 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7825 sp->total_udp_fifos = 1;
7826 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7827 } else {
7828 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7829 FIFO_OTHER_MAX_NUM);
7830 sp->udp_fifo_idx = sp->total_tcp_fifos;
7831 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7832 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7833 }
7834
3a3d5756 7835 config->multiq = dev_multiq;
6cfc482b 7836 for (i = 0; i < config->tx_fifo_num; i++) {
1da177e4
LT
7837 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7838 config->tx_cfg[i].fifo_priority = i;
7839 }
7840
20346722
K
7841 /* mapping the QoS priority to the configured fifos */
7842 for (i = 0; i < MAX_TX_FIFOS; i++)
3a3d5756 7843 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
20346722 7844
6cfc482b
SH
7845 /* map the hashing selector table to the configured fifos */
7846 for (i = 0; i < config->tx_fifo_num; i++)
7847 sp->fifo_selector[i] = fifo_selector[i];
7848
7849
1da177e4
LT
7850 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7851 for (i = 0; i < config->tx_fifo_num; i++) {
7852 config->tx_cfg[i].f_no_snoop =
7853 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7854 if (config->tx_cfg[i].fifo_len < 65) {
7855 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7856 break;
7857 }
7858 }
fed5eccd
AR
7859 /* + 2 because one Txd for skb->data and one Txd for UFO */
7860 config->max_txds = MAX_SKB_FRAGS + 2;
1da177e4
LT
7861
7862 /* Rx side parameters. */
1da177e4 7863 config->rx_ring_num = rx_ring_num;
0425b46a 7864 for (i = 0; i < config->rx_ring_num; i++) {
1da177e4 7865 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
da6971d8 7866 (rxd_count[sp->rxd_mode] + 1);
1da177e4 7867 config->rx_cfg[i].ring_priority = i;
0425b46a
SH
7868 mac_control->rings[i].rx_bufs_left = 0;
7869 mac_control->rings[i].rxd_mode = sp->rxd_mode;
7870 mac_control->rings[i].rxd_count = rxd_count[sp->rxd_mode];
7871 mac_control->rings[i].pdev = sp->pdev;
7872 mac_control->rings[i].dev = sp->dev;
1da177e4
LT
7873 }
7874
7875 for (i = 0; i < rx_ring_num; i++) {
7876 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7877 config->rx_cfg[i].f_no_snoop =
7878 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7879 }
7880
7881 /* Setting Mac Control parameters */
7882 mac_control->rmac_pause_time = rmac_pause_time;
7883 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7884 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7885
7886
1da177e4
LT
7887 /* initialize the shared memory used by the NIC and the host */
7888 if (init_shared_mem(sp)) {
7889 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
b41477f3 7890 dev->name);
1da177e4
LT
7891 ret = -ENOMEM;
7892 goto mem_alloc_failed;
7893 }
7894
7895 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7896 pci_resource_len(pdev, 0));
7897 if (!sp->bar0) {
19a60522 7898 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
1da177e4
LT
7899 dev->name);
7900 ret = -ENOMEM;
7901 goto bar0_remap_failed;
7902 }
7903
7904 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7905 pci_resource_len(pdev, 2));
7906 if (!sp->bar1) {
19a60522 7907 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
1da177e4
LT
7908 dev->name);
7909 ret = -ENOMEM;
7910 goto bar1_remap_failed;
7911 }
7912
7913 dev->irq = pdev->irq;
7914 dev->base_addr = (unsigned long) sp->bar0;
7915
7916 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7917 for (j = 0; j < MAX_TX_FIFOS; j++) {
1ee6dd77 7918 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
1da177e4
LT
7919 (sp->bar1 + (j * 0x00020000));
7920 }
7921
7922 /* Driver entry points */
7923 dev->open = &s2io_open;
7924 dev->stop = &s2io_close;
7925 dev->hard_start_xmit = &s2io_xmit;
7926 dev->get_stats = &s2io_get_stats;
7927 dev->set_multicast_list = &s2io_set_multicast;
7928 dev->do_ioctl = &s2io_ioctl;
2fd37688 7929 dev->set_mac_address = &s2io_set_mac_addr;
1da177e4
LT
7930 dev->change_mtu = &s2io_change_mtu;
7931 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
be3a6b02
K
7932 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7933 dev->vlan_rx_register = s2io_vlan_rx_register;
cdb5bf02 7934 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
20346722 7935
1da177e4
LT
7936 /*
7937 * will use eth_mac_addr() for dev->set_mac_address
7938 * mac address will be set every time dev->open() is called
7939 */
612eff0e
BH
7940#ifdef CONFIG_NET_POLL_CONTROLLER
7941 dev->poll_controller = s2io_netpoll;
7942#endif
7943
1da177e4
LT
7944 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7945 if (sp->high_dma_flag == TRUE)
7946 dev->features |= NETIF_F_HIGHDMA;
1da177e4 7947 dev->features |= NETIF_F_TSO;
f83ef8c0 7948 dev->features |= NETIF_F_TSO6;
db874e65 7949 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
fed5eccd
AR
7950 dev->features |= NETIF_F_UFO;
7951 dev->features |= NETIF_F_HW_CSUM;
7952 }
3a3d5756
SH
7953 if (config->multiq)
7954 dev->features |= NETIF_F_MULTI_QUEUE;
1da177e4
LT
7955 dev->tx_timeout = &s2io_tx_watchdog;
7956 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
c4028958
DH
7957 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7958 INIT_WORK(&sp->set_link_task, s2io_set_link);
1da177e4 7959
e960fc5c 7960 pci_save_state(sp->pdev);
1da177e4
LT
7961
7962 /* Setting swapper control on the NIC, for proper reset operation */
7963 if (s2io_set_swapper(sp)) {
7964 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7965 dev->name);
7966 ret = -EAGAIN;
7967 goto set_swap_failed;
7968 }
7969
541ae68f
K
7970 /* Verify if the Herc works on the slot its placed into */
7971 if (sp->device_type & XFRAME_II_DEVICE) {
7972 mode = s2io_verify_pci_mode(sp);
7973 if (mode < 0) {
7974 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7975 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7976 ret = -EBADSLT;
7977 goto set_swap_failed;
7978 }
7979 }
7980
f61e0a35
SH
7981 if (sp->config.intr_type == MSI_X) {
7982 sp->num_entries = config->rx_ring_num + 1;
7983 ret = s2io_enable_msi_x(sp);
7984
7985 if (!ret) {
7986 ret = s2io_test_msi(sp);
7987 /* rollback MSI-X, will re-enable during add_isr() */
7988 remove_msix_isr(sp);
7989 }
7990 if (ret) {
7991
7992 DBG_PRINT(ERR_DBG,
7993 "%s: MSI-X requested but failed to enable\n",
7994 dev->name);
7995 sp->config.intr_type = INTA;
7996 }
7997 }
7998
7999 if (config->intr_type == MSI_X) {
8000 for (i = 0; i < config->rx_ring_num ; i++)
8001 netif_napi_add(dev, &mac_control->rings[i].napi,
8002 s2io_poll_msix, 64);
8003 } else {
8004 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
8005 }
8006
541ae68f
K
8007 /* Not needed for Herc */
8008 if (sp->device_type & XFRAME_I_DEVICE) {
8009 /*
8010 * Fix for all "FFs" MAC address problems observed on
8011 * Alpha platforms
8012 */
8013 fix_mac_address(sp);
8014 s2io_reset(sp);
8015 }
1da177e4
LT
8016
8017 /*
1da177e4
LT
8018 * MAC address initialization.
8019 * For now only one mac address will be read and used.
8020 */
8021 bar0 = sp->bar0;
8022 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
faa4f796 8023 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
1da177e4 8024 writeq(val64, &bar0->rmac_addr_cmd_mem);
c92ca04b 8025 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41 8026 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
1da177e4
LT
8027 tmp64 = readq(&bar0->rmac_addr_data0_mem);
8028 mac_down = (u32) tmp64;
8029 mac_up = (u32) (tmp64 >> 32);
8030
1da177e4
LT
8031 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8032 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8033 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8034 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8035 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8036 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8037
1da177e4
LT
8038 /* Set the factory defined MAC address initially */
8039 dev->addr_len = ETH_ALEN;
8040 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
2fd37688 8041 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
1da177e4 8042
faa4f796
SH
8043 /* initialize number of multicast & unicast MAC entries variables */
8044 if (sp->device_type == XFRAME_I_DEVICE) {
8045 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8046 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8047 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8048 } else if (sp->device_type == XFRAME_II_DEVICE) {
8049 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8050 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8051 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8052 }
8053
8054 /* store mac addresses from CAM to s2io_nic structure */
8055 do_s2io_store_unicast_mc(sp);
8056
f61e0a35
SH
8057 /* Configure MSIX vector for number of rings configured plus one */
8058 if ((sp->device_type == XFRAME_II_DEVICE) &&
8059 (config->intr_type == MSI_X))
8060 sp->num_entries = config->rx_ring_num + 1;
8061
c77dd43e
SS
8062 /* Store the values of the MSIX table in the s2io_nic structure */
8063 store_xmsi_data(sp);
b41477f3
AR
8064 /* reset Nic and bring it to known state */
8065 s2io_reset(sp);
8066
1da177e4 8067 /*
99993af6 8068 * Initialize link state flags
541ae68f 8069 * and the card state parameter
1da177e4 8070 */
92b84437 8071 sp->state = 0;
1da177e4 8072
1da177e4 8073 /* Initialize spinlocks */
2fda096d
SR
8074 for (i = 0; i < sp->config.tx_fifo_num; i++)
8075 spin_lock_init(&mac_control->fifos[i].tx_lock);
db874e65 8076
20346722
K
8077 /*
8078 * SXE-002: Configure link and activity LED to init state
8079 * on driver load.
1da177e4
LT
8080 */
8081 subid = sp->pdev->subsystem_device;
8082 if ((subid & 0xFF) >= 0x07) {
8083 val64 = readq(&bar0->gpio_control);
8084 val64 |= 0x0000800000000000ULL;
8085 writeq(val64, &bar0->gpio_control);
8086 val64 = 0x0411040400000000ULL;
8087 writeq(val64, (void __iomem *) bar0 + 0x2700);
8088 val64 = readq(&bar0->gpio_control);
8089 }
8090
8091 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
8092
8093 if (register_netdev(dev)) {
8094 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8095 ret = -ENODEV;
8096 goto register_failed;
8097 }
9dc737a7 8098 s2io_vpd_read(sp);
0c61ed5f 8099 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
b41477f3 8100 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
44c10138 8101 sp->product_name, pdev->revision);
b41477f3
AR
8102 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8103 s2io_driver_version);
0795af57
JP
8104 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n",
8105 dev->name, print_mac(mac, dev->dev_addr));
19a60522 8106 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
9dc737a7 8107 if (sp->device_type & XFRAME_II_DEVICE) {
0b1f7ebe 8108 mode = s2io_print_pci_mode(sp);
541ae68f 8109 if (mode < 0) {
9dc737a7 8110 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
541ae68f 8111 ret = -EBADSLT;
9dc737a7 8112 unregister_netdev(dev);
541ae68f
K
8113 goto set_swap_failed;
8114 }
541ae68f 8115 }
9dc737a7
AR
8116 switch(sp->rxd_mode) {
8117 case RXD_MODE_1:
8118 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8119 dev->name);
8120 break;
8121 case RXD_MODE_3B:
8122 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8123 dev->name);
8124 break;
9dc737a7 8125 }
db874e65 8126
f61e0a35
SH
8127 switch (sp->config.napi) {
8128 case 0:
8129 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8130 break;
8131 case 1:
db874e65 8132 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
f61e0a35
SH
8133 break;
8134 }
3a3d5756
SH
8135
8136 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8137 sp->config.tx_fifo_num);
8138
0425b46a
SH
8139 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8140 sp->config.rx_ring_num);
8141
eaae7f72 8142 switch(sp->config.intr_type) {
9dc737a7
AR
8143 case INTA:
8144 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8145 break;
9dc737a7
AR
8146 case MSI_X:
8147 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8148 break;
8149 }
3a3d5756
SH
8150 if (sp->config.multiq) {
8151 for (i = 0; i < sp->config.tx_fifo_num; i++)
8152 mac_control->fifos[i].multiq = config->multiq;
8153 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8154 dev->name);
8155 } else
8156 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8157 dev->name);
8158
6cfc482b
SH
8159 switch (sp->config.tx_steering_type) {
8160 case NO_STEERING:
8161 DBG_PRINT(ERR_DBG, "%s: No steering enabled for"
8162 " transmit\n", dev->name);
8163 break;
8164 case TX_PRIORITY_STEERING:
8165 DBG_PRINT(ERR_DBG, "%s: Priority steering enabled for"
8166 " transmit\n", dev->name);
8167 break;
8168 case TX_DEFAULT_STEERING:
8169 DBG_PRINT(ERR_DBG, "%s: Default steering enabled for"
8170 " transmit\n", dev->name);
8171 }
8172
7d3d0439
RA
8173 if (sp->lro)
8174 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
9dc737a7 8175 dev->name);
db874e65
SS
8176 if (ufo)
8177 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
8178 " enabled\n", dev->name);
7ba013ac 8179 /* Initialize device name */
9dc737a7 8180 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7ba013ac 8181
20346722
K
8182 /*
8183 * Make Link state as off at this point, when the Link change
8184 * interrupt comes the state will be automatically changed to
1da177e4
LT
8185 * the right state.
8186 */
8187 netif_carrier_off(dev);
1da177e4
LT
8188
8189 return 0;
8190
8191 register_failed:
8192 set_swap_failed:
8193 iounmap(sp->bar1);
8194 bar1_remap_failed:
8195 iounmap(sp->bar0);
8196 bar0_remap_failed:
8197 mem_alloc_failed:
8198 free_shared_mem(sp);
8199 pci_disable_device(pdev);
eccb8628 8200 pci_release_regions(pdev);
1da177e4
LT
8201 pci_set_drvdata(pdev, NULL);
8202 free_netdev(dev);
8203
8204 return ret;
8205}
8206
8207/**
20346722 8208 * s2io_rem_nic - Free the PCI device
1da177e4 8209 * @pdev: structure containing the PCI related information of the device.
20346722 8210 * Description: This function is called by the Pci subsystem to release a
1da177e4 8211 * PCI device and free up all resource held up by the device. This could
20346722 8212 * be in response to a Hot plug event or when the driver is to be removed
1da177e4
LT
8213 * from memory.
8214 */
8215
8216static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8217{
8218 struct net_device *dev =
8219 (struct net_device *) pci_get_drvdata(pdev);
1ee6dd77 8220 struct s2io_nic *sp;
1da177e4
LT
8221
8222 if (dev == NULL) {
8223 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8224 return;
8225 }
8226
22747d6b
FR
8227 flush_scheduled_work();
8228
1da177e4
LT
8229 sp = dev->priv;
8230 unregister_netdev(dev);
8231
8232 free_shared_mem(sp);
8233 iounmap(sp->bar0);
8234 iounmap(sp->bar1);
eccb8628 8235 pci_release_regions(pdev);
1da177e4 8236 pci_set_drvdata(pdev, NULL);
1da177e4 8237 free_netdev(dev);
19a60522 8238 pci_disable_device(pdev);
1da177e4
LT
8239}
8240
8241/**
8242 * s2io_starter - Entry point for the driver
8243 * Description: This function is the entry point for the driver. It verifies
8244 * the module loadable parameters and initializes PCI configuration space.
8245 */
8246
43b7c451 8247static int __init s2io_starter(void)
1da177e4 8248{
29917620 8249 return pci_register_driver(&s2io_driver);
1da177e4
LT
8250}
8251
8252/**
20346722 8253 * s2io_closer - Cleanup routine for the driver
1da177e4
LT
8254 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8255 */
8256
372cc597 8257static __exit void s2io_closer(void)
1da177e4
LT
8258{
8259 pci_unregister_driver(&s2io_driver);
8260 DBG_PRINT(INIT_DBG, "cleanup done\n");
8261}
8262
8263module_init(s2io_starter);
8264module_exit(s2io_closer);
7d3d0439 8265
6aa20a22 8266static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
cdb5bf02
SH
8267 struct tcphdr **tcp, struct RxD_t *rxdp,
8268 struct s2io_nic *sp)
7d3d0439
RA
8269{
8270 int ip_off;
8271 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8272
8273 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8274 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
8275 __FUNCTION__);
8276 return -1;
8277 }
8278
cdb5bf02
SH
8279 /* Checking for DIX type or DIX type with VLAN */
8280 if ((l2_type == 0)
8281 || (l2_type == 4)) {
8282 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8283 /*
8284 * If vlan stripping is disabled and the frame is VLAN tagged,
8285 * shift the offset by the VLAN header size bytes.
8286 */
8287 if ((!vlan_strip_flag) &&
8288 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8289 ip_off += HEADER_VLAN_SIZE;
8290 } else {
7d3d0439 8291 /* LLC, SNAP etc are considered non-mergeable */
cdb5bf02 8292 return -1;
7d3d0439
RA
8293 }
8294
8295 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8296 ip_len = (u8)((*ip)->ihl);
8297 ip_len <<= 2;
8298 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8299
8300 return 0;
8301}
8302
1ee6dd77 8303static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7d3d0439
RA
8304 struct tcphdr *tcp)
8305{
8306 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8307 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
8308 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
8309 return -1;
8310 return 0;
8311}
8312
8313static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8314{
8315 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
8316}
8317
1ee6dd77 8318static void initiate_new_session(struct lro *lro, u8 *l2h,
cdb5bf02 8319 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag)
7d3d0439
RA
8320{
8321 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8322 lro->l2h = l2h;
8323 lro->iph = ip;
8324 lro->tcph = tcp;
8325 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
c8855953 8326 lro->tcp_ack = tcp->ack_seq;
7d3d0439
RA
8327 lro->sg_num = 1;
8328 lro->total_len = ntohs(ip->tot_len);
8329 lro->frags_len = 0;
cdb5bf02 8330 lro->vlan_tag = vlan_tag;
6aa20a22 8331 /*
7d3d0439
RA
8332 * check if we saw TCP timestamp. Other consistency checks have
8333 * already been done.
8334 */
8335 if (tcp->doff == 8) {
c8855953
SR
8336 __be32 *ptr;
8337 ptr = (__be32 *)(tcp+1);
7d3d0439 8338 lro->saw_ts = 1;
c8855953 8339 lro->cur_tsval = ntohl(*(ptr+1));
7d3d0439
RA
8340 lro->cur_tsecr = *(ptr+2);
8341 }
8342 lro->in_use = 1;
8343}
8344
1ee6dd77 8345static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7d3d0439
RA
8346{
8347 struct iphdr *ip = lro->iph;
8348 struct tcphdr *tcp = lro->tcph;
bd4f3ae1 8349 __sum16 nchk;
1ee6dd77 8350 struct stat_block *statinfo = sp->mac_control.stats_info;
7d3d0439
RA
8351 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8352
8353 /* Update L3 header */
8354 ip->tot_len = htons(lro->total_len);
8355 ip->check = 0;
8356 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8357 ip->check = nchk;
8358
8359 /* Update L4 header */
8360 tcp->ack_seq = lro->tcp_ack;
8361 tcp->window = lro->window;
8362
8363 /* Update tsecr field if this session has timestamps enabled */
8364 if (lro->saw_ts) {
c8855953 8365 __be32 *ptr = (__be32 *)(tcp + 1);
7d3d0439
RA
8366 *(ptr+2) = lro->cur_tsecr;
8367 }
8368
8369 /* Update counters required for calculation of
8370 * average no. of packets aggregated.
8371 */
8372 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
8373 statinfo->sw_stat.num_aggregations++;
8374}
8375
1ee6dd77 8376static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7d3d0439
RA
8377 struct tcphdr *tcp, u32 l4_pyld)
8378{
8379 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8380 lro->total_len += l4_pyld;
8381 lro->frags_len += l4_pyld;
8382 lro->tcp_next_seq += l4_pyld;
8383 lro->sg_num++;
8384
8385 /* Update ack seq no. and window ad(from this pkt) in LRO object */
8386 lro->tcp_ack = tcp->ack_seq;
8387 lro->window = tcp->window;
6aa20a22 8388
7d3d0439 8389 if (lro->saw_ts) {
c8855953 8390 __be32 *ptr;
7d3d0439 8391 /* Update tsecr and tsval from this packet */
c8855953
SR
8392 ptr = (__be32 *)(tcp+1);
8393 lro->cur_tsval = ntohl(*(ptr+1));
7d3d0439
RA
8394 lro->cur_tsecr = *(ptr + 2);
8395 }
8396}
8397
1ee6dd77 8398static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7d3d0439
RA
8399 struct tcphdr *tcp, u32 tcp_pyld_len)
8400{
7d3d0439
RA
8401 u8 *ptr;
8402
79dc1901
AM
8403 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8404
7d3d0439
RA
8405 if (!tcp_pyld_len) {
8406 /* Runt frame or a pure ack */
8407 return -1;
8408 }
8409
8410 if (ip->ihl != 5) /* IP has options */
8411 return -1;
8412
75c30b13
AR
8413 /* If we see CE codepoint in IP header, packet is not mergeable */
8414 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8415 return -1;
8416
8417 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7d3d0439 8418 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
75c30b13 8419 tcp->ece || tcp->cwr || !tcp->ack) {
7d3d0439
RA
8420 /*
8421 * Currently recognize only the ack control word and
8422 * any other control field being set would result in
8423 * flushing the LRO session
8424 */
8425 return -1;
8426 }
8427
6aa20a22 8428 /*
7d3d0439
RA
8429 * Allow only one TCP timestamp option. Don't aggregate if
8430 * any other options are detected.
8431 */
8432 if (tcp->doff != 5 && tcp->doff != 8)
8433 return -1;
8434
8435 if (tcp->doff == 8) {
6aa20a22 8436 ptr = (u8 *)(tcp + 1);
7d3d0439
RA
8437 while (*ptr == TCPOPT_NOP)
8438 ptr++;
8439 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8440 return -1;
8441
8442 /* Ensure timestamp value increases monotonically */
8443 if (l_lro)
c8855953 8444 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
7d3d0439
RA
8445 return -1;
8446
8447 /* timestamp echo reply should be non-zero */
c8855953 8448 if (*((__be32 *)(ptr+6)) == 0)
7d3d0439
RA
8449 return -1;
8450 }
8451
8452 return 0;
8453}
8454
8455static int
0425b46a
SH
8456s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8457 u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
8458 struct s2io_nic *sp)
7d3d0439
RA
8459{
8460 struct iphdr *ip;
8461 struct tcphdr *tcph;
8462 int ret = 0, i;
cdb5bf02 8463 u16 vlan_tag = 0;
7d3d0439
RA
8464
8465 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
cdb5bf02 8466 rxdp, sp))) {
7d3d0439
RA
8467 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8468 ip->saddr, ip->daddr);
cdb5bf02 8469 } else
7d3d0439 8470 return ret;
7d3d0439 8471
cdb5bf02 8472 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
7d3d0439
RA
8473 tcph = (struct tcphdr *)*tcp;
8474 *tcp_len = get_l4_pyld_length(ip, tcph);
8475 for (i=0; i<MAX_LRO_SESSIONS; i++) {
0425b46a 8476 struct lro *l_lro = &ring_data->lro0_n[i];
7d3d0439
RA
8477 if (l_lro->in_use) {
8478 if (check_for_socket_match(l_lro, ip, tcph))
8479 continue;
8480 /* Sock pair matched */
8481 *lro = l_lro;
8482
8483 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8484 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8485 "0x%x, actual 0x%x\n", __FUNCTION__,
8486 (*lro)->tcp_next_seq,
8487 ntohl(tcph->seq));
8488
8489 sp->mac_control.stats_info->
8490 sw_stat.outof_sequence_pkts++;
8491 ret = 2;
8492 break;
8493 }
8494
8495 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8496 ret = 1; /* Aggregate */
8497 else
8498 ret = 2; /* Flush both */
8499 break;
8500 }
8501 }
8502
8503 if (ret == 0) {
8504 /* Before searching for available LRO objects,
8505 * check if the pkt is L3/L4 aggregatable. If not
8506 * don't create new LRO session. Just send this
8507 * packet up.
8508 */
8509 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8510 return 5;
8511 }
8512
8513 for (i=0; i<MAX_LRO_SESSIONS; i++) {
0425b46a 8514 struct lro *l_lro = &ring_data->lro0_n[i];
7d3d0439
RA
8515 if (!(l_lro->in_use)) {
8516 *lro = l_lro;
8517 ret = 3; /* Begin anew */
8518 break;
8519 }
8520 }
8521 }
8522
8523 if (ret == 0) { /* sessions exceeded */
8524 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8525 __FUNCTION__);
8526 *lro = NULL;
8527 return ret;
8528 }
8529
8530 switch (ret) {
8531 case 3:
cdb5bf02
SH
8532 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8533 vlan_tag);
7d3d0439
RA
8534 break;
8535 case 2:
8536 update_L3L4_header(sp, *lro);
8537 break;
8538 case 1:
8539 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8540 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8541 update_L3L4_header(sp, *lro);
8542 ret = 4; /* Flush the LRO */
8543 }
8544 break;
8545 default:
8546 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8547 __FUNCTION__);
8548 break;
8549 }
8550
8551 return ret;
8552}
8553
1ee6dd77 8554static void clear_lro_session(struct lro *lro)
7d3d0439 8555{
1ee6dd77 8556 static u16 lro_struct_size = sizeof(struct lro);
7d3d0439
RA
8557
8558 memset(lro, 0, lro_struct_size);
8559}
8560
cdb5bf02 8561static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
7d3d0439
RA
8562{
8563 struct net_device *dev = skb->dev;
cdb5bf02 8564 struct s2io_nic *sp = dev->priv;
7d3d0439
RA
8565
8566 skb->protocol = eth_type_trans(skb, dev);
cdb5bf02
SH
8567 if (sp->vlgrp && vlan_tag
8568 && (vlan_strip_flag)) {
8569 /* Queueing the vlan frame to the upper layer */
8570 if (sp->config.napi)
8571 vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
8572 else
8573 vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag);
8574 } else {
8575 if (sp->config.napi)
8576 netif_receive_skb(skb);
8577 else
8578 netif_rx(skb);
8579 }
7d3d0439
RA
8580}
8581
1ee6dd77
RB
8582static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8583 struct sk_buff *skb,
7d3d0439
RA
8584 u32 tcp_len)
8585{
75c30b13 8586 struct sk_buff *first = lro->parent;
7d3d0439
RA
8587
8588 first->len += tcp_len;
8589 first->data_len = lro->frags_len;
8590 skb_pull(skb, (skb->len - tcp_len));
75c30b13
AR
8591 if (skb_shinfo(first)->frag_list)
8592 lro->last_frag->next = skb;
7d3d0439
RA
8593 else
8594 skb_shinfo(first)->frag_list = skb;
372cc597 8595 first->truesize += skb->truesize;
75c30b13 8596 lro->last_frag = skb;
7d3d0439
RA
8597 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8598 return;
8599}
d796fdb7
LV
8600
8601/**
8602 * s2io_io_error_detected - called when PCI error is detected
8603 * @pdev: Pointer to PCI device
8453d43f 8604 * @state: The current pci connection state
d796fdb7
LV
8605 *
8606 * This function is called after a PCI bus error affecting
8607 * this device has been detected.
8608 */
8609static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8610 pci_channel_state_t state)
8611{
8612 struct net_device *netdev = pci_get_drvdata(pdev);
8613 struct s2io_nic *sp = netdev->priv;
8614
8615 netif_device_detach(netdev);
8616
8617 if (netif_running(netdev)) {
8618 /* Bring down the card, while avoiding PCI I/O */
8619 do_s2io_card_down(sp, 0);
d796fdb7
LV
8620 }
8621 pci_disable_device(pdev);
8622
8623 return PCI_ERS_RESULT_NEED_RESET;
8624}
8625
8626/**
8627 * s2io_io_slot_reset - called after the pci bus has been reset.
8628 * @pdev: Pointer to PCI device
8629 *
8630 * Restart the card from scratch, as if from a cold-boot.
8631 * At this point, the card has exprienced a hard reset,
8632 * followed by fixups by BIOS, and has its config space
8633 * set up identically to what it was at cold boot.
8634 */
8635static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8636{
8637 struct net_device *netdev = pci_get_drvdata(pdev);
8638 struct s2io_nic *sp = netdev->priv;
8639
8640 if (pci_enable_device(pdev)) {
8641 printk(KERN_ERR "s2io: "
8642 "Cannot re-enable PCI device after reset.\n");
8643 return PCI_ERS_RESULT_DISCONNECT;
8644 }
8645
8646 pci_set_master(pdev);
8647 s2io_reset(sp);
8648
8649 return PCI_ERS_RESULT_RECOVERED;
8650}
8651
8652/**
8653 * s2io_io_resume - called when traffic can start flowing again.
8654 * @pdev: Pointer to PCI device
8655 *
8656 * This callback is called when the error recovery driver tells
8657 * us that its OK to resume normal operation.
8658 */
8659static void s2io_io_resume(struct pci_dev *pdev)
8660{
8661 struct net_device *netdev = pci_get_drvdata(pdev);
8662 struct s2io_nic *sp = netdev->priv;
8663
8664 if (netif_running(netdev)) {
8665 if (s2io_card_up(sp)) {
8666 printk(KERN_ERR "s2io: "
8667 "Can't bring device back up after reset.\n");
8668 return;
8669 }
8670
8671 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8672 s2io_card_down(sp);
8673 printk(KERN_ERR "s2io: "
8674 "Can't resetore mac addr after reset.\n");
8675 return;
8676 }
8677 }
8678
8679 netif_device_attach(netdev);
8680 netif_wake_queue(netdev);
8681}