]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/s2io.c
S2IO: statistics for memory allocation failuers
[net-next-2.6.git] / drivers / net / s2io.c
CommitLineData
1da177e4 1/************************************************************************
776bd20f 2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
0c61ed5f 3 * Copyright(c) 2002-2007 Neterion Inc.
1da177e4
LT
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
20346722
K
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
1da177e4
LT
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
20346722 21 * Francois Romieu : For pointing out all code part that were
1da177e4 22 * deprecated and also styling related comments.
20346722 23 * Grant Grundler : For helping me get rid of some Architecture
1da177e4
LT
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
20346722 26 *
1da177e4
LT
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
9dc737a7 29 *
20346722
K
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
9dc737a7
AR
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
da6971d8
AR
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2 and 3.
1da177e4 36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
20346722 37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
1da177e4 38 * Tx descriptors that can be associated with each corresponding FIFO.
9dc737a7
AR
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 1(MSI), 2(MSI_X). Default value is '0(INTA)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
926930b2
SS
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
1da177e4
LT
53 ************************************************************************/
54
1da177e4
LT
55#include <linux/module.h>
56#include <linux/types.h>
57#include <linux/errno.h>
58#include <linux/ioport.h>
59#include <linux/pci.h>
1e7f0bd8 60#include <linux/dma-mapping.h>
1da177e4
LT
61#include <linux/kernel.h>
62#include <linux/netdevice.h>
63#include <linux/etherdevice.h>
64#include <linux/skbuff.h>
65#include <linux/init.h>
66#include <linux/delay.h>
67#include <linux/stddef.h>
68#include <linux/ioctl.h>
69#include <linux/timex.h>
1da177e4 70#include <linux/ethtool.h>
1da177e4 71#include <linux/workqueue.h>
be3a6b02 72#include <linux/if_vlan.h>
7d3d0439
RA
73#include <linux/ip.h>
74#include <linux/tcp.h>
75#include <net/tcp.h>
1da177e4 76
1da177e4
LT
77#include <asm/system.h>
78#include <asm/uaccess.h>
20346722 79#include <asm/io.h>
fe931395 80#include <asm/div64.h>
330ce0de 81#include <asm/irq.h>
1da177e4
LT
82
83/* local include */
84#include "s2io.h"
85#include "s2io-regs.h"
86
3ef34b80 87#define DRV_VERSION "2.0.22.1"
6c1792f4 88
1da177e4 89/* S2io Driver name & version. */
20346722 90static char s2io_driver_name[] = "Neterion";
6c1792f4 91static char s2io_driver_version[] = DRV_VERSION;
1da177e4 92
26df54bf
AB
93static int rxd_size[4] = {32,48,48,64};
94static int rxd_count[4] = {127,85,85,63};
da6971d8 95
1ee6dd77 96static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
5e25b9dd
K
97{
98 int ret;
99
100 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
102
103 return ret;
104}
105
20346722 106/*
1da177e4
LT
107 * Cards with following subsystem_id have a link state indication
108 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109 * macro below identifies these cards given the subsystem_id.
110 */
541ae68f
K
111#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112 (dev_type == XFRAME_I_DEVICE) ? \
113 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
1da177e4
LT
115
116#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
119#define PANIC 1
120#define LOW 2
1ee6dd77 121static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
1da177e4 122{
1ee6dd77 123 struct mac_info *mac_control;
20346722
K
124
125 mac_control = &sp->mac_control;
863c11a9
AR
126 if (rxb_size <= rxd_count[sp->rxd_mode])
127 return PANIC;
128 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
129 return LOW;
130 return 0;
1da177e4
LT
131}
132
133/* Ethtool related variables and Macros. */
134static char s2io_gstrings[][ETH_GSTRING_LEN] = {
135 "Register test\t(offline)",
136 "Eeprom test\t(offline)",
137 "Link test\t(online)",
138 "RLDRAM test\t(offline)",
139 "BIST Test\t(offline)"
140};
141
fa1f0cb3 142static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
1da177e4
LT
143 {"tmac_frms"},
144 {"tmac_data_octets"},
145 {"tmac_drop_frms"},
146 {"tmac_mcst_frms"},
147 {"tmac_bcst_frms"},
148 {"tmac_pause_ctrl_frms"},
bd1034f0
AR
149 {"tmac_ttl_octets"},
150 {"tmac_ucst_frms"},
151 {"tmac_nucst_frms"},
1da177e4 152 {"tmac_any_err_frms"},
bd1034f0 153 {"tmac_ttl_less_fb_octets"},
1da177e4
LT
154 {"tmac_vld_ip_octets"},
155 {"tmac_vld_ip"},
156 {"tmac_drop_ip"},
157 {"tmac_icmp"},
158 {"tmac_rst_tcp"},
159 {"tmac_tcp"},
160 {"tmac_udp"},
161 {"rmac_vld_frms"},
162 {"rmac_data_octets"},
163 {"rmac_fcs_err_frms"},
164 {"rmac_drop_frms"},
165 {"rmac_vld_mcst_frms"},
166 {"rmac_vld_bcst_frms"},
167 {"rmac_in_rng_len_err_frms"},
bd1034f0 168 {"rmac_out_rng_len_err_frms"},
1da177e4
LT
169 {"rmac_long_frms"},
170 {"rmac_pause_ctrl_frms"},
bd1034f0
AR
171 {"rmac_unsup_ctrl_frms"},
172 {"rmac_ttl_octets"},
173 {"rmac_accepted_ucst_frms"},
174 {"rmac_accepted_nucst_frms"},
1da177e4 175 {"rmac_discarded_frms"},
bd1034f0
AR
176 {"rmac_drop_events"},
177 {"rmac_ttl_less_fb_octets"},
178 {"rmac_ttl_frms"},
1da177e4
LT
179 {"rmac_usized_frms"},
180 {"rmac_osized_frms"},
181 {"rmac_frag_frms"},
182 {"rmac_jabber_frms"},
bd1034f0
AR
183 {"rmac_ttl_64_frms"},
184 {"rmac_ttl_65_127_frms"},
185 {"rmac_ttl_128_255_frms"},
186 {"rmac_ttl_256_511_frms"},
187 {"rmac_ttl_512_1023_frms"},
188 {"rmac_ttl_1024_1518_frms"},
1da177e4
LT
189 {"rmac_ip"},
190 {"rmac_ip_octets"},
191 {"rmac_hdr_err_ip"},
192 {"rmac_drop_ip"},
193 {"rmac_icmp"},
194 {"rmac_tcp"},
195 {"rmac_udp"},
196 {"rmac_err_drp_udp"},
bd1034f0
AR
197 {"rmac_xgmii_err_sym"},
198 {"rmac_frms_q0"},
199 {"rmac_frms_q1"},
200 {"rmac_frms_q2"},
201 {"rmac_frms_q3"},
202 {"rmac_frms_q4"},
203 {"rmac_frms_q5"},
204 {"rmac_frms_q6"},
205 {"rmac_frms_q7"},
206 {"rmac_full_q0"},
207 {"rmac_full_q1"},
208 {"rmac_full_q2"},
209 {"rmac_full_q3"},
210 {"rmac_full_q4"},
211 {"rmac_full_q5"},
212 {"rmac_full_q6"},
213 {"rmac_full_q7"},
1da177e4 214 {"rmac_pause_cnt"},
bd1034f0
AR
215 {"rmac_xgmii_data_err_cnt"},
216 {"rmac_xgmii_ctrl_err_cnt"},
1da177e4
LT
217 {"rmac_accepted_ip"},
218 {"rmac_err_tcp"},
bd1034f0
AR
219 {"rd_req_cnt"},
220 {"new_rd_req_cnt"},
221 {"new_rd_req_rtry_cnt"},
222 {"rd_rtry_cnt"},
223 {"wr_rtry_rd_ack_cnt"},
224 {"wr_req_cnt"},
225 {"new_wr_req_cnt"},
226 {"new_wr_req_rtry_cnt"},
227 {"wr_rtry_cnt"},
228 {"wr_disc_cnt"},
229 {"rd_rtry_wr_ack_cnt"},
230 {"txp_wr_cnt"},
231 {"txd_rd_cnt"},
232 {"txd_wr_cnt"},
233 {"rxd_rd_cnt"},
234 {"rxd_wr_cnt"},
235 {"txf_rd_cnt"},
fa1f0cb3
SS
236 {"rxf_wr_cnt"}
237};
238
239static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
bd1034f0
AR
240 {"rmac_ttl_1519_4095_frms"},
241 {"rmac_ttl_4096_8191_frms"},
242 {"rmac_ttl_8192_max_frms"},
243 {"rmac_ttl_gt_max_frms"},
244 {"rmac_osized_alt_frms"},
245 {"rmac_jabber_alt_frms"},
246 {"rmac_gt_max_alt_frms"},
247 {"rmac_vlan_frms"},
248 {"rmac_len_discard"},
249 {"rmac_fcs_discard"},
250 {"rmac_pf_discard"},
251 {"rmac_da_discard"},
252 {"rmac_red_discard"},
253 {"rmac_rts_discard"},
254 {"rmac_ingm_full_discard"},
fa1f0cb3
SS
255 {"link_fault_cnt"}
256};
257
258static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
7ba013ac
K
259 {"\n DRIVER STATISTICS"},
260 {"single_bit_ecc_errs"},
261 {"double_bit_ecc_errs"},
bd1034f0
AR
262 {"parity_err_cnt"},
263 {"serious_err_cnt"},
264 {"soft_reset_cnt"},
265 {"fifo_full_cnt"},
266 {"ring_full_cnt"},
267 ("alarm_transceiver_temp_high"),
268 ("alarm_transceiver_temp_low"),
269 ("alarm_laser_bias_current_high"),
270 ("alarm_laser_bias_current_low"),
271 ("alarm_laser_output_power_high"),
272 ("alarm_laser_output_power_low"),
273 ("warn_transceiver_temp_high"),
274 ("warn_transceiver_temp_low"),
275 ("warn_laser_bias_current_high"),
276 ("warn_laser_bias_current_low"),
277 ("warn_laser_output_power_high"),
278 ("warn_laser_output_power_low"),
7d3d0439
RA
279 ("lro_aggregated_pkts"),
280 ("lro_flush_both_count"),
281 ("lro_out_of_sequence_pkts"),
282 ("lro_flush_due_to_max_pkts"),
283 ("lro_avg_aggr_pkts"),
c53d4945
SH
284 ("mem_alloc_fail_cnt"),
285 ("watchdog_timer_cnt")
1da177e4
LT
286};
287
fa1f0cb3
SS
288#define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
289#define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
290 ETH_GSTRING_LEN
291#define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
292
293#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
294#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
295
296#define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
297#define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
1da177e4
LT
298
299#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
300#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
301
25fff88e
K
302#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
303 init_timer(&timer); \
304 timer.function = handle; \
305 timer.data = (unsigned long) arg; \
306 mod_timer(&timer, (jiffies + exp)) \
307
be3a6b02
K
308/* Add the vlan */
309static void s2io_vlan_rx_register(struct net_device *dev,
310 struct vlan_group *grp)
311{
1ee6dd77 312 struct s2io_nic *nic = dev->priv;
be3a6b02
K
313 unsigned long flags;
314
315 spin_lock_irqsave(&nic->tx_lock, flags);
316 nic->vlgrp = grp;
317 spin_unlock_irqrestore(&nic->tx_lock, flags);
318}
319
926930b2 320/* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
7b490343 321static int vlan_strip_flag;
926930b2 322
be3a6b02
K
323/* Unregister the vlan */
324static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
325{
1ee6dd77 326 struct s2io_nic *nic = dev->priv;
be3a6b02
K
327 unsigned long flags;
328
329 spin_lock_irqsave(&nic->tx_lock, flags);
5c15bdec 330 vlan_group_set_device(nic->vlgrp, vid, NULL);
be3a6b02
K
331 spin_unlock_irqrestore(&nic->tx_lock, flags);
332}
333
20346722 334/*
1da177e4
LT
335 * Constants to be programmed into the Xena's registers, to configure
336 * the XAUI.
337 */
338
1da177e4 339#define END_SIGN 0x0
f71e1309 340static const u64 herc_act_dtx_cfg[] = {
541ae68f 341 /* Set address */
e960fc5c 342 0x8000051536750000ULL, 0x80000515367500E0ULL,
541ae68f 343 /* Write data */
e960fc5c 344 0x8000051536750004ULL, 0x80000515367500E4ULL,
541ae68f
K
345 /* Set address */
346 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
347 /* Write data */
348 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
349 /* Set address */
e960fc5c 350 0x801205150D440000ULL, 0x801205150D4400E0ULL,
351 /* Write data */
352 0x801205150D440004ULL, 0x801205150D4400E4ULL,
353 /* Set address */
541ae68f
K
354 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
355 /* Write data */
356 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
357 /* Done */
358 END_SIGN
359};
360
f71e1309 361static const u64 xena_dtx_cfg[] = {
c92ca04b 362 /* Set address */
1da177e4 363 0x8000051500000000ULL, 0x80000515000000E0ULL,
c92ca04b
AR
364 /* Write data */
365 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
366 /* Set address */
367 0x8001051500000000ULL, 0x80010515000000E0ULL,
368 /* Write data */
369 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
370 /* Set address */
1da177e4 371 0x8002051500000000ULL, 0x80020515000000E0ULL,
c92ca04b
AR
372 /* Write data */
373 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1da177e4
LT
374 END_SIGN
375};
376
20346722 377/*
1da177e4
LT
378 * Constants for Fixing the MacAddress problem seen mostly on
379 * Alpha machines.
380 */
f71e1309 381static const u64 fix_mac[] = {
1da177e4
LT
382 0x0060000000000000ULL, 0x0060600000000000ULL,
383 0x0040600000000000ULL, 0x0000600000000000ULL,
384 0x0020600000000000ULL, 0x0060600000000000ULL,
385 0x0020600000000000ULL, 0x0060600000000000ULL,
386 0x0020600000000000ULL, 0x0060600000000000ULL,
387 0x0020600000000000ULL, 0x0060600000000000ULL,
388 0x0020600000000000ULL, 0x0060600000000000ULL,
389 0x0020600000000000ULL, 0x0060600000000000ULL,
390 0x0020600000000000ULL, 0x0060600000000000ULL,
391 0x0020600000000000ULL, 0x0060600000000000ULL,
392 0x0020600000000000ULL, 0x0060600000000000ULL,
393 0x0020600000000000ULL, 0x0060600000000000ULL,
394 0x0020600000000000ULL, 0x0000600000000000ULL,
395 0x0040600000000000ULL, 0x0060600000000000ULL,
396 END_SIGN
397};
398
b41477f3
AR
399MODULE_LICENSE("GPL");
400MODULE_VERSION(DRV_VERSION);
401
402
1da177e4 403/* Module Loadable parameters. */
b41477f3
AR
404S2IO_PARM_INT(tx_fifo_num, 1);
405S2IO_PARM_INT(rx_ring_num, 1);
406
407
408S2IO_PARM_INT(rx_ring_mode, 1);
409S2IO_PARM_INT(use_continuous_tx_intrs, 1);
410S2IO_PARM_INT(rmac_pause_time, 0x100);
411S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
412S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
413S2IO_PARM_INT(shared_splits, 0);
414S2IO_PARM_INT(tmac_util_period, 5);
415S2IO_PARM_INT(rmac_util_period, 5);
416S2IO_PARM_INT(bimodal, 0);
417S2IO_PARM_INT(l3l4hdr_size, 128);
303bcb4b 418/* Frequency of Rx desc syncs expressed as power of 2 */
b41477f3 419S2IO_PARM_INT(rxsync_frequency, 3);
cc6e7c44 420/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
b41477f3 421S2IO_PARM_INT(intr_type, 0);
7d3d0439 422/* Large receive offload feature */
b41477f3 423S2IO_PARM_INT(lro, 0);
7d3d0439
RA
424/* Max pkts to be aggregated by LRO at one time. If not specified,
425 * aggregation happens until we hit max IP pkt size(64K)
426 */
b41477f3 427S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
b41477f3 428S2IO_PARM_INT(indicate_max_pkts, 0);
db874e65
SS
429
430S2IO_PARM_INT(napi, 1);
431S2IO_PARM_INT(ufo, 0);
926930b2 432S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
b41477f3
AR
433
434static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
435 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
436static unsigned int rx_ring_sz[MAX_RX_RINGS] =
437 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
438static unsigned int rts_frm_len[MAX_RX_RINGS] =
439 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
440
441module_param_array(tx_fifo_len, uint, NULL, 0);
442module_param_array(rx_ring_sz, uint, NULL, 0);
443module_param_array(rts_frm_len, uint, NULL, 0);
1da177e4 444
20346722 445/*
1da177e4 446 * S2IO device table.
20346722 447 * This table lists all the devices that this driver supports.
1da177e4
LT
448 */
449static struct pci_device_id s2io_tbl[] __devinitdata = {
450 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
451 PCI_ANY_ID, PCI_ANY_ID},
452 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
453 PCI_ANY_ID, PCI_ANY_ID},
454 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
20346722
K
455 PCI_ANY_ID, PCI_ANY_ID},
456 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
457 PCI_ANY_ID, PCI_ANY_ID},
1da177e4
LT
458 {0,}
459};
460
461MODULE_DEVICE_TABLE(pci, s2io_tbl);
462
463static struct pci_driver s2io_driver = {
464 .name = "S2IO",
465 .id_table = s2io_tbl,
466 .probe = s2io_init_nic,
467 .remove = __devexit_p(s2io_rem_nic),
468};
469
470/* A simplifier macro used both by init and free shared_mem Fns(). */
471#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
472
473/**
474 * init_shared_mem - Allocation and Initialization of Memory
475 * @nic: Device private variable.
20346722
K
476 * Description: The function allocates all the memory areas shared
477 * between the NIC and the driver. This includes Tx descriptors,
1da177e4
LT
478 * Rx descriptors and the statistics block.
479 */
480
481static int init_shared_mem(struct s2io_nic *nic)
482{
483 u32 size;
484 void *tmp_v_addr, *tmp_v_addr_next;
485 dma_addr_t tmp_p_addr, tmp_p_addr_next;
1ee6dd77 486 struct RxD_block *pre_rxd_blk = NULL;
372cc597 487 int i, j, blk_cnt;
1da177e4
LT
488 int lst_size, lst_per_page;
489 struct net_device *dev = nic->dev;
8ae418cf 490 unsigned long tmp;
1ee6dd77 491 struct buffAdd *ba;
1da177e4 492
1ee6dd77 493 struct mac_info *mac_control;
1da177e4
LT
494 struct config_param *config;
495
496 mac_control = &nic->mac_control;
497 config = &nic->config;
498
499
500 /* Allocation and initialization of TXDLs in FIOFs */
501 size = 0;
502 for (i = 0; i < config->tx_fifo_num; i++) {
503 size += config->tx_cfg[i].fifo_len;
504 }
505 if (size > MAX_AVAILABLE_TXDS) {
b41477f3 506 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
0b1f7ebe 507 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
b41477f3 508 return -EINVAL;
1da177e4
LT
509 }
510
1ee6dd77 511 lst_size = (sizeof(struct TxD) * config->max_txds);
1da177e4
LT
512 lst_per_page = PAGE_SIZE / lst_size;
513
514 for (i = 0; i < config->tx_fifo_num; i++) {
515 int fifo_len = config->tx_cfg[i].fifo_len;
1ee6dd77 516 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
20346722
K
517 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
518 GFP_KERNEL);
519 if (!mac_control->fifos[i].list_info) {
0c61ed5f 520 DBG_PRINT(INFO_DBG,
1da177e4
LT
521 "Malloc failed for list_info\n");
522 return -ENOMEM;
523 }
20346722 524 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
1da177e4
LT
525 }
526 for (i = 0; i < config->tx_fifo_num; i++) {
527 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
528 lst_per_page);
20346722
K
529 mac_control->fifos[i].tx_curr_put_info.offset = 0;
530 mac_control->fifos[i].tx_curr_put_info.fifo_len =
1da177e4 531 config->tx_cfg[i].fifo_len - 1;
20346722
K
532 mac_control->fifos[i].tx_curr_get_info.offset = 0;
533 mac_control->fifos[i].tx_curr_get_info.fifo_len =
1da177e4 534 config->tx_cfg[i].fifo_len - 1;
20346722
K
535 mac_control->fifos[i].fifo_no = i;
536 mac_control->fifos[i].nic = nic;
fed5eccd 537 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
20346722 538
1da177e4
LT
539 for (j = 0; j < page_num; j++) {
540 int k = 0;
541 dma_addr_t tmp_p;
542 void *tmp_v;
543 tmp_v = pci_alloc_consistent(nic->pdev,
544 PAGE_SIZE, &tmp_p);
545 if (!tmp_v) {
0c61ed5f 546 DBG_PRINT(INFO_DBG,
1da177e4 547 "pci_alloc_consistent ");
0c61ed5f 548 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
1da177e4
LT
549 return -ENOMEM;
550 }
776bd20f 551 /* If we got a zero DMA address(can happen on
552 * certain platforms like PPC), reallocate.
553 * Store virtual address of page we don't want,
554 * to be freed later.
555 */
556 if (!tmp_p) {
557 mac_control->zerodma_virt_addr = tmp_v;
6aa20a22 558 DBG_PRINT(INIT_DBG,
776bd20f 559 "%s: Zero DMA address for TxDL. ", dev->name);
6aa20a22 560 DBG_PRINT(INIT_DBG,
6b4d617d 561 "Virtual address %p\n", tmp_v);
776bd20f 562 tmp_v = pci_alloc_consistent(nic->pdev,
563 PAGE_SIZE, &tmp_p);
564 if (!tmp_v) {
0c61ed5f 565 DBG_PRINT(INFO_DBG,
776bd20f 566 "pci_alloc_consistent ");
0c61ed5f 567 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
776bd20f 568 return -ENOMEM;
569 }
570 }
1da177e4
LT
571 while (k < lst_per_page) {
572 int l = (j * lst_per_page) + k;
573 if (l == config->tx_cfg[i].fifo_len)
20346722
K
574 break;
575 mac_control->fifos[i].list_info[l].list_virt_addr =
1da177e4 576 tmp_v + (k * lst_size);
20346722 577 mac_control->fifos[i].list_info[l].list_phy_addr =
1da177e4
LT
578 tmp_p + (k * lst_size);
579 k++;
580 }
581 }
582 }
1da177e4 583
4384247b 584 nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
fed5eccd
AR
585 if (!nic->ufo_in_band_v)
586 return -ENOMEM;
587
1da177e4
LT
588 /* Allocation and initialization of RXDs in Rings */
589 size = 0;
590 for (i = 0; i < config->rx_ring_num; i++) {
da6971d8
AR
591 if (config->rx_cfg[i].num_rxd %
592 (rxd_count[nic->rxd_mode] + 1)) {
1da177e4
LT
593 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
594 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
595 i);
596 DBG_PRINT(ERR_DBG, "RxDs per Block");
597 return FAILURE;
598 }
599 size += config->rx_cfg[i].num_rxd;
20346722 600 mac_control->rings[i].block_count =
da6971d8
AR
601 config->rx_cfg[i].num_rxd /
602 (rxd_count[nic->rxd_mode] + 1 );
603 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
604 mac_control->rings[i].block_count;
1da177e4 605 }
da6971d8 606 if (nic->rxd_mode == RXD_MODE_1)
1ee6dd77 607 size = (size * (sizeof(struct RxD1)));
da6971d8 608 else
1ee6dd77 609 size = (size * (sizeof(struct RxD3)));
1da177e4
LT
610
611 for (i = 0; i < config->rx_ring_num; i++) {
20346722
K
612 mac_control->rings[i].rx_curr_get_info.block_index = 0;
613 mac_control->rings[i].rx_curr_get_info.offset = 0;
614 mac_control->rings[i].rx_curr_get_info.ring_len =
1da177e4 615 config->rx_cfg[i].num_rxd - 1;
20346722
K
616 mac_control->rings[i].rx_curr_put_info.block_index = 0;
617 mac_control->rings[i].rx_curr_put_info.offset = 0;
618 mac_control->rings[i].rx_curr_put_info.ring_len =
1da177e4 619 config->rx_cfg[i].num_rxd - 1;
20346722
K
620 mac_control->rings[i].nic = nic;
621 mac_control->rings[i].ring_no = i;
622
da6971d8
AR
623 blk_cnt = config->rx_cfg[i].num_rxd /
624 (rxd_count[nic->rxd_mode] + 1);
1da177e4
LT
625 /* Allocating all the Rx blocks */
626 for (j = 0; j < blk_cnt; j++) {
1ee6dd77 627 struct rx_block_info *rx_blocks;
da6971d8
AR
628 int l;
629
630 rx_blocks = &mac_control->rings[i].rx_blocks[j];
631 size = SIZE_OF_BLOCK; //size is always page size
1da177e4
LT
632 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
633 &tmp_p_addr);
634 if (tmp_v_addr == NULL) {
635 /*
20346722
K
636 * In case of failure, free_shared_mem()
637 * is called, which should free any
638 * memory that was alloced till the
1da177e4
LT
639 * failure happened.
640 */
da6971d8 641 rx_blocks->block_virt_addr = tmp_v_addr;
1da177e4
LT
642 return -ENOMEM;
643 }
644 memset(tmp_v_addr, 0, size);
da6971d8
AR
645 rx_blocks->block_virt_addr = tmp_v_addr;
646 rx_blocks->block_dma_addr = tmp_p_addr;
1ee6dd77 647 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
da6971d8
AR
648 rxd_count[nic->rxd_mode],
649 GFP_KERNEL);
372cc597
SS
650 if (!rx_blocks->rxds)
651 return -ENOMEM;
da6971d8
AR
652 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
653 rx_blocks->rxds[l].virt_addr =
654 rx_blocks->block_virt_addr +
655 (rxd_size[nic->rxd_mode] * l);
656 rx_blocks->rxds[l].dma_addr =
657 rx_blocks->block_dma_addr +
658 (rxd_size[nic->rxd_mode] * l);
659 }
1da177e4
LT
660 }
661 /* Interlinking all Rx Blocks */
662 for (j = 0; j < blk_cnt; j++) {
20346722
K
663 tmp_v_addr =
664 mac_control->rings[i].rx_blocks[j].block_virt_addr;
1da177e4 665 tmp_v_addr_next =
20346722 666 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4 667 blk_cnt].block_virt_addr;
20346722
K
668 tmp_p_addr =
669 mac_control->rings[i].rx_blocks[j].block_dma_addr;
1da177e4 670 tmp_p_addr_next =
20346722 671 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4
LT
672 blk_cnt].block_dma_addr;
673
1ee6dd77 674 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
1da177e4
LT
675 pre_rxd_blk->reserved_2_pNext_RxD_block =
676 (unsigned long) tmp_v_addr_next;
1da177e4
LT
677 pre_rxd_blk->pNext_RxD_Blk_physical =
678 (u64) tmp_p_addr_next;
679 }
680 }
da6971d8
AR
681 if (nic->rxd_mode >= RXD_MODE_3A) {
682 /*
683 * Allocation of Storages for buffer addresses in 2BUFF mode
684 * and the buffers as well.
685 */
686 for (i = 0; i < config->rx_ring_num; i++) {
687 blk_cnt = config->rx_cfg[i].num_rxd /
688 (rxd_count[nic->rxd_mode]+ 1);
689 mac_control->rings[i].ba =
1ee6dd77 690 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
1da177e4 691 GFP_KERNEL);
da6971d8 692 if (!mac_control->rings[i].ba)
1da177e4 693 return -ENOMEM;
da6971d8
AR
694 for (j = 0; j < blk_cnt; j++) {
695 int k = 0;
696 mac_control->rings[i].ba[j] =
1ee6dd77 697 kmalloc((sizeof(struct buffAdd) *
da6971d8
AR
698 (rxd_count[nic->rxd_mode] + 1)),
699 GFP_KERNEL);
700 if (!mac_control->rings[i].ba[j])
1da177e4 701 return -ENOMEM;
da6971d8
AR
702 while (k != rxd_count[nic->rxd_mode]) {
703 ba = &mac_control->rings[i].ba[j][k];
704
705 ba->ba_0_org = (void *) kmalloc
706 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
707 if (!ba->ba_0_org)
708 return -ENOMEM;
709 tmp = (unsigned long)ba->ba_0_org;
710 tmp += ALIGN_SIZE;
711 tmp &= ~((unsigned long) ALIGN_SIZE);
712 ba->ba_0 = (void *) tmp;
713
714 ba->ba_1_org = (void *) kmalloc
715 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
716 if (!ba->ba_1_org)
717 return -ENOMEM;
718 tmp = (unsigned long) ba->ba_1_org;
719 tmp += ALIGN_SIZE;
720 tmp &= ~((unsigned long) ALIGN_SIZE);
721 ba->ba_1 = (void *) tmp;
722 k++;
723 }
1da177e4
LT
724 }
725 }
726 }
1da177e4
LT
727
728 /* Allocation and initialization of Statistics block */
1ee6dd77 729 size = sizeof(struct stat_block);
1da177e4
LT
730 mac_control->stats_mem = pci_alloc_consistent
731 (nic->pdev, size, &mac_control->stats_mem_phy);
732
733 if (!mac_control->stats_mem) {
20346722
K
734 /*
735 * In case of failure, free_shared_mem() is called, which
736 * should free any memory that was alloced till the
1da177e4
LT
737 * failure happened.
738 */
739 return -ENOMEM;
740 }
741 mac_control->stats_mem_sz = size;
742
743 tmp_v_addr = mac_control->stats_mem;
1ee6dd77 744 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
1da177e4 745 memset(tmp_v_addr, 0, size);
1da177e4
LT
746 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
747 (unsigned long long) tmp_p_addr);
748
749 return SUCCESS;
750}
751
20346722
K
752/**
753 * free_shared_mem - Free the allocated Memory
1da177e4
LT
754 * @nic: Device private variable.
755 * Description: This function is to free all memory locations allocated by
756 * the init_shared_mem() function and return it to the kernel.
757 */
758
759static void free_shared_mem(struct s2io_nic *nic)
760{
761 int i, j, blk_cnt, size;
762 void *tmp_v_addr;
763 dma_addr_t tmp_p_addr;
1ee6dd77 764 struct mac_info *mac_control;
1da177e4
LT
765 struct config_param *config;
766 int lst_size, lst_per_page;
776bd20f 767 struct net_device *dev = nic->dev;
1da177e4
LT
768
769 if (!nic)
770 return;
771
772 mac_control = &nic->mac_control;
773 config = &nic->config;
774
1ee6dd77 775 lst_size = (sizeof(struct TxD) * config->max_txds);
1da177e4
LT
776 lst_per_page = PAGE_SIZE / lst_size;
777
778 for (i = 0; i < config->tx_fifo_num; i++) {
779 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
780 lst_per_page);
781 for (j = 0; j < page_num; j++) {
782 int mem_blks = (j * lst_per_page);
776bd20f 783 if (!mac_control->fifos[i].list_info)
6aa20a22 784 return;
776bd20f 785 if (!mac_control->fifos[i].list_info[mem_blks].
786 list_virt_addr)
1da177e4
LT
787 break;
788 pci_free_consistent(nic->pdev, PAGE_SIZE,
20346722
K
789 mac_control->fifos[i].
790 list_info[mem_blks].
1da177e4 791 list_virt_addr,
20346722
K
792 mac_control->fifos[i].
793 list_info[mem_blks].
1da177e4
LT
794 list_phy_addr);
795 }
776bd20f 796 /* If we got a zero DMA address during allocation,
797 * free the page now
798 */
799 if (mac_control->zerodma_virt_addr) {
800 pci_free_consistent(nic->pdev, PAGE_SIZE,
801 mac_control->zerodma_virt_addr,
802 (dma_addr_t)0);
6aa20a22 803 DBG_PRINT(INIT_DBG,
6b4d617d
AM
804 "%s: Freeing TxDL with zero DMA addr. ",
805 dev->name);
806 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
807 mac_control->zerodma_virt_addr);
776bd20f 808 }
20346722 809 kfree(mac_control->fifos[i].list_info);
1da177e4
LT
810 }
811
1da177e4 812 size = SIZE_OF_BLOCK;
1da177e4 813 for (i = 0; i < config->rx_ring_num; i++) {
20346722 814 blk_cnt = mac_control->rings[i].block_count;
1da177e4 815 for (j = 0; j < blk_cnt; j++) {
20346722
K
816 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
817 block_virt_addr;
818 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
819 block_dma_addr;
1da177e4
LT
820 if (tmp_v_addr == NULL)
821 break;
822 pci_free_consistent(nic->pdev, size,
823 tmp_v_addr, tmp_p_addr);
da6971d8 824 kfree(mac_control->rings[i].rx_blocks[j].rxds);
1da177e4
LT
825 }
826 }
827
da6971d8
AR
828 if (nic->rxd_mode >= RXD_MODE_3A) {
829 /* Freeing buffer storage addresses in 2BUFF mode. */
830 for (i = 0; i < config->rx_ring_num; i++) {
831 blk_cnt = config->rx_cfg[i].num_rxd /
832 (rxd_count[nic->rxd_mode] + 1);
833 for (j = 0; j < blk_cnt; j++) {
834 int k = 0;
835 if (!mac_control->rings[i].ba[j])
836 continue;
837 while (k != rxd_count[nic->rxd_mode]) {
1ee6dd77 838 struct buffAdd *ba =
da6971d8
AR
839 &mac_control->rings[i].ba[j][k];
840 kfree(ba->ba_0_org);
841 kfree(ba->ba_1_org);
842 k++;
843 }
844 kfree(mac_control->rings[i].ba[j]);
1da177e4 845 }
da6971d8 846 kfree(mac_control->rings[i].ba);
1da177e4 847 }
1da177e4 848 }
1da177e4
LT
849
850 if (mac_control->stats_mem) {
851 pci_free_consistent(nic->pdev,
852 mac_control->stats_mem_sz,
853 mac_control->stats_mem,
854 mac_control->stats_mem_phy);
855 }
fed5eccd
AR
856 if (nic->ufo_in_band_v)
857 kfree(nic->ufo_in_band_v);
1da177e4
LT
858}
859
541ae68f
K
860/**
861 * s2io_verify_pci_mode -
862 */
863
1ee6dd77 864static int s2io_verify_pci_mode(struct s2io_nic *nic)
541ae68f 865{
1ee6dd77 866 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f
K
867 register u64 val64 = 0;
868 int mode;
869
870 val64 = readq(&bar0->pci_mode);
871 mode = (u8)GET_PCI_MODE(val64);
872
873 if ( val64 & PCI_MODE_UNKNOWN_MODE)
874 return -1; /* Unknown PCI mode */
875 return mode;
876}
877
c92ca04b
AR
878#define NEC_VENID 0x1033
879#define NEC_DEVID 0x0125
880static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
881{
882 struct pci_dev *tdev = NULL;
26d36b64
AC
883 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
884 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
c92ca04b 885 if (tdev->bus == s2io_pdev->bus->parent)
26d36b64 886 pci_dev_put(tdev);
c92ca04b
AR
887 return 1;
888 }
889 }
890 return 0;
891}
541ae68f 892
7b32a312 893static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
541ae68f
K
894/**
895 * s2io_print_pci_mode -
896 */
1ee6dd77 897static int s2io_print_pci_mode(struct s2io_nic *nic)
541ae68f 898{
1ee6dd77 899 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f
K
900 register u64 val64 = 0;
901 int mode;
902 struct config_param *config = &nic->config;
903
904 val64 = readq(&bar0->pci_mode);
905 mode = (u8)GET_PCI_MODE(val64);
906
907 if ( val64 & PCI_MODE_UNKNOWN_MODE)
908 return -1; /* Unknown PCI mode */
909
c92ca04b
AR
910 config->bus_speed = bus_speed[mode];
911
912 if (s2io_on_nec_bridge(nic->pdev)) {
913 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
914 nic->dev->name);
915 return mode;
916 }
917
541ae68f
K
918 if (val64 & PCI_MODE_32_BITS) {
919 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
920 } else {
921 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
922 }
923
924 switch(mode) {
925 case PCI_MODE_PCI_33:
926 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
541ae68f
K
927 break;
928 case PCI_MODE_PCI_66:
929 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
541ae68f
K
930 break;
931 case PCI_MODE_PCIX_M1_66:
932 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
541ae68f
K
933 break;
934 case PCI_MODE_PCIX_M1_100:
935 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
541ae68f
K
936 break;
937 case PCI_MODE_PCIX_M1_133:
938 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
541ae68f
K
939 break;
940 case PCI_MODE_PCIX_M2_66:
941 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
541ae68f
K
942 break;
943 case PCI_MODE_PCIX_M2_100:
944 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
541ae68f
K
945 break;
946 case PCI_MODE_PCIX_M2_133:
947 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
541ae68f
K
948 break;
949 default:
950 return -1; /* Unsupported bus speed */
951 }
952
953 return mode;
954}
955
20346722
K
956/**
957 * init_nic - Initialization of hardware
1da177e4 958 * @nic: device peivate variable
20346722
K
959 * Description: The function sequentially configures every block
960 * of the H/W from their reset values.
961 * Return Value: SUCCESS on success and
1da177e4
LT
962 * '-1' on failure (endian settings incorrect).
963 */
964
965static int init_nic(struct s2io_nic *nic)
966{
1ee6dd77 967 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
968 struct net_device *dev = nic->dev;
969 register u64 val64 = 0;
970 void __iomem *add;
971 u32 time;
972 int i, j;
1ee6dd77 973 struct mac_info *mac_control;
1da177e4 974 struct config_param *config;
c92ca04b 975 int dtx_cnt = 0;
1da177e4 976 unsigned long long mem_share;
20346722 977 int mem_size;
1da177e4
LT
978
979 mac_control = &nic->mac_control;
980 config = &nic->config;
981
5e25b9dd 982 /* to set the swapper controle on the card */
20346722 983 if(s2io_set_swapper(nic)) {
1da177e4
LT
984 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
985 return -1;
986 }
987
541ae68f
K
988 /*
989 * Herc requires EOI to be removed from reset before XGXS, so..
990 */
991 if (nic->device_type & XFRAME_II_DEVICE) {
992 val64 = 0xA500000000ULL;
993 writeq(val64, &bar0->sw_reset);
994 msleep(500);
995 val64 = readq(&bar0->sw_reset);
996 }
997
1da177e4
LT
998 /* Remove XGXS from reset state */
999 val64 = 0;
1000 writeq(val64, &bar0->sw_reset);
1da177e4 1001 msleep(500);
20346722 1002 val64 = readq(&bar0->sw_reset);
1da177e4
LT
1003
1004 /* Enable Receiving broadcasts */
1005 add = &bar0->mac_cfg;
1006 val64 = readq(&bar0->mac_cfg);
1007 val64 |= MAC_RMAC_BCAST_ENABLE;
1008 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1009 writel((u32) val64, add);
1010 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1011 writel((u32) (val64 >> 32), (add + 4));
1012
1013 /* Read registers in all blocks */
1014 val64 = readq(&bar0->mac_int_mask);
1015 val64 = readq(&bar0->mc_int_mask);
1016 val64 = readq(&bar0->xgxs_int_mask);
1017
1018 /* Set MTU */
1019 val64 = dev->mtu;
1020 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1021
541ae68f
K
1022 if (nic->device_type & XFRAME_II_DEVICE) {
1023 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
303bcb4b 1024 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1da177e4 1025 &bar0->dtx_control, UF);
541ae68f
K
1026 if (dtx_cnt & 0x1)
1027 msleep(1); /* Necessary!! */
1da177e4
LT
1028 dtx_cnt++;
1029 }
541ae68f 1030 } else {
c92ca04b
AR
1031 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1032 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1033 &bar0->dtx_control, UF);
1034 val64 = readq(&bar0->dtx_control);
1035 dtx_cnt++;
1da177e4
LT
1036 }
1037 }
1038
1039 /* Tx DMA Initialization */
1040 val64 = 0;
1041 writeq(val64, &bar0->tx_fifo_partition_0);
1042 writeq(val64, &bar0->tx_fifo_partition_1);
1043 writeq(val64, &bar0->tx_fifo_partition_2);
1044 writeq(val64, &bar0->tx_fifo_partition_3);
1045
1046
1047 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1048 val64 |=
1049 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1050 13) | vBIT(config->tx_cfg[i].fifo_priority,
1051 ((i * 32) + 5), 3);
1052
1053 if (i == (config->tx_fifo_num - 1)) {
1054 if (i % 2 == 0)
1055 i++;
1056 }
1057
1058 switch (i) {
1059 case 1:
1060 writeq(val64, &bar0->tx_fifo_partition_0);
1061 val64 = 0;
1062 break;
1063 case 3:
1064 writeq(val64, &bar0->tx_fifo_partition_1);
1065 val64 = 0;
1066 break;
1067 case 5:
1068 writeq(val64, &bar0->tx_fifo_partition_2);
1069 val64 = 0;
1070 break;
1071 case 7:
1072 writeq(val64, &bar0->tx_fifo_partition_3);
1073 break;
1074 }
1075 }
1076
5e25b9dd
K
1077 /*
1078 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1079 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1080 */
541ae68f
K
1081 if ((nic->device_type == XFRAME_I_DEVICE) &&
1082 (get_xena_rev_id(nic->pdev) < 4))
5e25b9dd
K
1083 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1084
1da177e4
LT
1085 val64 = readq(&bar0->tx_fifo_partition_0);
1086 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1087 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1088
20346722
K
1089 /*
1090 * Initialization of Tx_PA_CONFIG register to ignore packet
1da177e4
LT
1091 * integrity checking.
1092 */
1093 val64 = readq(&bar0->tx_pa_cfg);
1094 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1095 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1096 writeq(val64, &bar0->tx_pa_cfg);
1097
1098 /* Rx DMA intialization. */
1099 val64 = 0;
1100 for (i = 0; i < config->rx_ring_num; i++) {
1101 val64 |=
1102 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1103 3);
1104 }
1105 writeq(val64, &bar0->rx_queue_priority);
1106
20346722
K
1107 /*
1108 * Allocating equal share of memory to all the
1da177e4
LT
1109 * configured Rings.
1110 */
1111 val64 = 0;
541ae68f
K
1112 if (nic->device_type & XFRAME_II_DEVICE)
1113 mem_size = 32;
1114 else
1115 mem_size = 64;
1116
1da177e4
LT
1117 for (i = 0; i < config->rx_ring_num; i++) {
1118 switch (i) {
1119 case 0:
20346722
K
1120 mem_share = (mem_size / config->rx_ring_num +
1121 mem_size % config->rx_ring_num);
1da177e4
LT
1122 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1123 continue;
1124 case 1:
20346722 1125 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1126 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1127 continue;
1128 case 2:
20346722 1129 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1130 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1131 continue;
1132 case 3:
20346722 1133 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1134 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1135 continue;
1136 case 4:
20346722 1137 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1138 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1139 continue;
1140 case 5:
20346722 1141 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1142 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1143 continue;
1144 case 6:
20346722 1145 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1146 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1147 continue;
1148 case 7:
20346722 1149 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1150 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1151 continue;
1152 }
1153 }
1154 writeq(val64, &bar0->rx_queue_cfg);
1155
20346722 1156 /*
5e25b9dd
K
1157 * Filling Tx round robin registers
1158 * as per the number of FIFOs
1da177e4 1159 */
5e25b9dd
K
1160 switch (config->tx_fifo_num) {
1161 case 1:
1162 val64 = 0x0000000000000000ULL;
1163 writeq(val64, &bar0->tx_w_round_robin_0);
1164 writeq(val64, &bar0->tx_w_round_robin_1);
1165 writeq(val64, &bar0->tx_w_round_robin_2);
1166 writeq(val64, &bar0->tx_w_round_robin_3);
1167 writeq(val64, &bar0->tx_w_round_robin_4);
1168 break;
1169 case 2:
1170 val64 = 0x0000010000010000ULL;
1171 writeq(val64, &bar0->tx_w_round_robin_0);
1172 val64 = 0x0100000100000100ULL;
1173 writeq(val64, &bar0->tx_w_round_robin_1);
1174 val64 = 0x0001000001000001ULL;
1175 writeq(val64, &bar0->tx_w_round_robin_2);
1176 val64 = 0x0000010000010000ULL;
1177 writeq(val64, &bar0->tx_w_round_robin_3);
1178 val64 = 0x0100000000000000ULL;
1179 writeq(val64, &bar0->tx_w_round_robin_4);
1180 break;
1181 case 3:
1182 val64 = 0x0001000102000001ULL;
1183 writeq(val64, &bar0->tx_w_round_robin_0);
1184 val64 = 0x0001020000010001ULL;
1185 writeq(val64, &bar0->tx_w_round_robin_1);
1186 val64 = 0x0200000100010200ULL;
1187 writeq(val64, &bar0->tx_w_round_robin_2);
1188 val64 = 0x0001000102000001ULL;
1189 writeq(val64, &bar0->tx_w_round_robin_3);
1190 val64 = 0x0001020000000000ULL;
1191 writeq(val64, &bar0->tx_w_round_robin_4);
1192 break;
1193 case 4:
1194 val64 = 0x0001020300010200ULL;
1195 writeq(val64, &bar0->tx_w_round_robin_0);
1196 val64 = 0x0100000102030001ULL;
1197 writeq(val64, &bar0->tx_w_round_robin_1);
1198 val64 = 0x0200010000010203ULL;
1199 writeq(val64, &bar0->tx_w_round_robin_2);
1200 val64 = 0x0001020001000001ULL;
1201 writeq(val64, &bar0->tx_w_round_robin_3);
1202 val64 = 0x0203000100000000ULL;
1203 writeq(val64, &bar0->tx_w_round_robin_4);
1204 break;
1205 case 5:
1206 val64 = 0x0001000203000102ULL;
1207 writeq(val64, &bar0->tx_w_round_robin_0);
1208 val64 = 0x0001020001030004ULL;
1209 writeq(val64, &bar0->tx_w_round_robin_1);
1210 val64 = 0x0001000203000102ULL;
1211 writeq(val64, &bar0->tx_w_round_robin_2);
1212 val64 = 0x0001020001030004ULL;
1213 writeq(val64, &bar0->tx_w_round_robin_3);
1214 val64 = 0x0001000000000000ULL;
1215 writeq(val64, &bar0->tx_w_round_robin_4);
1216 break;
1217 case 6:
1218 val64 = 0x0001020304000102ULL;
1219 writeq(val64, &bar0->tx_w_round_robin_0);
1220 val64 = 0x0304050001020001ULL;
1221 writeq(val64, &bar0->tx_w_round_robin_1);
1222 val64 = 0x0203000100000102ULL;
1223 writeq(val64, &bar0->tx_w_round_robin_2);
1224 val64 = 0x0304000102030405ULL;
1225 writeq(val64, &bar0->tx_w_round_robin_3);
1226 val64 = 0x0001000200000000ULL;
1227 writeq(val64, &bar0->tx_w_round_robin_4);
1228 break;
1229 case 7:
1230 val64 = 0x0001020001020300ULL;
1231 writeq(val64, &bar0->tx_w_round_robin_0);
1232 val64 = 0x0102030400010203ULL;
1233 writeq(val64, &bar0->tx_w_round_robin_1);
1234 val64 = 0x0405060001020001ULL;
1235 writeq(val64, &bar0->tx_w_round_robin_2);
1236 val64 = 0x0304050000010200ULL;
1237 writeq(val64, &bar0->tx_w_round_robin_3);
1238 val64 = 0x0102030000000000ULL;
1239 writeq(val64, &bar0->tx_w_round_robin_4);
1240 break;
1241 case 8:
1242 val64 = 0x0001020300040105ULL;
1243 writeq(val64, &bar0->tx_w_round_robin_0);
1244 val64 = 0x0200030106000204ULL;
1245 writeq(val64, &bar0->tx_w_round_robin_1);
1246 val64 = 0x0103000502010007ULL;
1247 writeq(val64, &bar0->tx_w_round_robin_2);
1248 val64 = 0x0304010002060500ULL;
1249 writeq(val64, &bar0->tx_w_round_robin_3);
1250 val64 = 0x0103020400000000ULL;
1251 writeq(val64, &bar0->tx_w_round_robin_4);
1252 break;
1253 }
1254
b41477f3 1255 /* Enable all configured Tx FIFO partitions */
5d3213cc
AR
1256 val64 = readq(&bar0->tx_fifo_partition_0);
1257 val64 |= (TX_FIFO_PARTITION_EN);
1258 writeq(val64, &bar0->tx_fifo_partition_0);
1259
5e25b9dd
K
1260 /* Filling the Rx round robin registers as per the
1261 * number of Rings and steering based on QoS.
1262 */
1263 switch (config->rx_ring_num) {
1264 case 1:
1265 val64 = 0x8080808080808080ULL;
1266 writeq(val64, &bar0->rts_qos_steering);
1267 break;
1268 case 2:
1269 val64 = 0x0000010000010000ULL;
1270 writeq(val64, &bar0->rx_w_round_robin_0);
1271 val64 = 0x0100000100000100ULL;
1272 writeq(val64, &bar0->rx_w_round_robin_1);
1273 val64 = 0x0001000001000001ULL;
1274 writeq(val64, &bar0->rx_w_round_robin_2);
1275 val64 = 0x0000010000010000ULL;
1276 writeq(val64, &bar0->rx_w_round_robin_3);
1277 val64 = 0x0100000000000000ULL;
1278 writeq(val64, &bar0->rx_w_round_robin_4);
1279
1280 val64 = 0x8080808040404040ULL;
1281 writeq(val64, &bar0->rts_qos_steering);
1282 break;
1283 case 3:
1284 val64 = 0x0001000102000001ULL;
1285 writeq(val64, &bar0->rx_w_round_robin_0);
1286 val64 = 0x0001020000010001ULL;
1287 writeq(val64, &bar0->rx_w_round_robin_1);
1288 val64 = 0x0200000100010200ULL;
1289 writeq(val64, &bar0->rx_w_round_robin_2);
1290 val64 = 0x0001000102000001ULL;
1291 writeq(val64, &bar0->rx_w_round_robin_3);
1292 val64 = 0x0001020000000000ULL;
1293 writeq(val64, &bar0->rx_w_round_robin_4);
1294
1295 val64 = 0x8080804040402020ULL;
1296 writeq(val64, &bar0->rts_qos_steering);
1297 break;
1298 case 4:
1299 val64 = 0x0001020300010200ULL;
1300 writeq(val64, &bar0->rx_w_round_robin_0);
1301 val64 = 0x0100000102030001ULL;
1302 writeq(val64, &bar0->rx_w_round_robin_1);
1303 val64 = 0x0200010000010203ULL;
1304 writeq(val64, &bar0->rx_w_round_robin_2);
6aa20a22 1305 val64 = 0x0001020001000001ULL;
5e25b9dd
K
1306 writeq(val64, &bar0->rx_w_round_robin_3);
1307 val64 = 0x0203000100000000ULL;
1308 writeq(val64, &bar0->rx_w_round_robin_4);
1309
1310 val64 = 0x8080404020201010ULL;
1311 writeq(val64, &bar0->rts_qos_steering);
1312 break;
1313 case 5:
1314 val64 = 0x0001000203000102ULL;
1315 writeq(val64, &bar0->rx_w_round_robin_0);
1316 val64 = 0x0001020001030004ULL;
1317 writeq(val64, &bar0->rx_w_round_robin_1);
1318 val64 = 0x0001000203000102ULL;
1319 writeq(val64, &bar0->rx_w_round_robin_2);
1320 val64 = 0x0001020001030004ULL;
1321 writeq(val64, &bar0->rx_w_round_robin_3);
1322 val64 = 0x0001000000000000ULL;
1323 writeq(val64, &bar0->rx_w_round_robin_4);
1324
1325 val64 = 0x8080404020201008ULL;
1326 writeq(val64, &bar0->rts_qos_steering);
1327 break;
1328 case 6:
1329 val64 = 0x0001020304000102ULL;
1330 writeq(val64, &bar0->rx_w_round_robin_0);
1331 val64 = 0x0304050001020001ULL;
1332 writeq(val64, &bar0->rx_w_round_robin_1);
1333 val64 = 0x0203000100000102ULL;
1334 writeq(val64, &bar0->rx_w_round_robin_2);
1335 val64 = 0x0304000102030405ULL;
1336 writeq(val64, &bar0->rx_w_round_robin_3);
1337 val64 = 0x0001000200000000ULL;
1338 writeq(val64, &bar0->rx_w_round_robin_4);
1339
1340 val64 = 0x8080404020100804ULL;
1341 writeq(val64, &bar0->rts_qos_steering);
1342 break;
1343 case 7:
1344 val64 = 0x0001020001020300ULL;
1345 writeq(val64, &bar0->rx_w_round_robin_0);
1346 val64 = 0x0102030400010203ULL;
1347 writeq(val64, &bar0->rx_w_round_robin_1);
1348 val64 = 0x0405060001020001ULL;
1349 writeq(val64, &bar0->rx_w_round_robin_2);
1350 val64 = 0x0304050000010200ULL;
1351 writeq(val64, &bar0->rx_w_round_robin_3);
1352 val64 = 0x0102030000000000ULL;
1353 writeq(val64, &bar0->rx_w_round_robin_4);
1354
1355 val64 = 0x8080402010080402ULL;
1356 writeq(val64, &bar0->rts_qos_steering);
1357 break;
1358 case 8:
1359 val64 = 0x0001020300040105ULL;
1360 writeq(val64, &bar0->rx_w_round_robin_0);
1361 val64 = 0x0200030106000204ULL;
1362 writeq(val64, &bar0->rx_w_round_robin_1);
1363 val64 = 0x0103000502010007ULL;
1364 writeq(val64, &bar0->rx_w_round_robin_2);
1365 val64 = 0x0304010002060500ULL;
1366 writeq(val64, &bar0->rx_w_round_robin_3);
1367 val64 = 0x0103020400000000ULL;
1368 writeq(val64, &bar0->rx_w_round_robin_4);
1369
1370 val64 = 0x8040201008040201ULL;
1371 writeq(val64, &bar0->rts_qos_steering);
1372 break;
1373 }
1da177e4
LT
1374
1375 /* UDP Fix */
1376 val64 = 0;
20346722 1377 for (i = 0; i < 8; i++)
1da177e4
LT
1378 writeq(val64, &bar0->rts_frm_len_n[i]);
1379
5e25b9dd
K
1380 /* Set the default rts frame length for the rings configured */
1381 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1382 for (i = 0 ; i < config->rx_ring_num ; i++)
1383 writeq(val64, &bar0->rts_frm_len_n[i]);
1384
1385 /* Set the frame length for the configured rings
1386 * desired by the user
1387 */
1388 for (i = 0; i < config->rx_ring_num; i++) {
1389 /* If rts_frm_len[i] == 0 then it is assumed that user not
1390 * specified frame length steering.
1391 * If the user provides the frame length then program
1392 * the rts_frm_len register for those values or else
1393 * leave it as it is.
1394 */
1395 if (rts_frm_len[i] != 0) {
1396 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1397 &bar0->rts_frm_len_n[i]);
1398 }
1399 }
926930b2 1400
9fc93a41
SS
1401 /* Disable differentiated services steering logic */
1402 for (i = 0; i < 64; i++) {
1403 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1404 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1405 dev->name);
1406 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1407 return FAILURE;
1408 }
1409 }
1410
20346722 1411 /* Program statistics memory */
1da177e4 1412 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1da177e4 1413
541ae68f
K
1414 if (nic->device_type == XFRAME_II_DEVICE) {
1415 val64 = STAT_BC(0x320);
1416 writeq(val64, &bar0->stat_byte_cnt);
1417 }
1418
20346722 1419 /*
1da177e4
LT
1420 * Initializing the sampling rate for the device to calculate the
1421 * bandwidth utilization.
1422 */
1423 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1424 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1425 writeq(val64, &bar0->mac_link_util);
1426
1427
20346722
K
1428 /*
1429 * Initializing the Transmit and Receive Traffic Interrupt
1da177e4
LT
1430 * Scheme.
1431 */
20346722
K
1432 /*
1433 * TTI Initialization. Default Tx timer gets us about
1da177e4
LT
1434 * 250 interrupts per sec. Continuous interrupts are enabled
1435 * by default.
1436 */
541ae68f
K
1437 if (nic->device_type == XFRAME_II_DEVICE) {
1438 int count = (nic->config.bus_speed * 125)/2;
1439 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1440 } else {
1441
1442 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1443 }
1444 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1da177e4 1445 TTI_DATA1_MEM_TX_URNG_B(0x10) |
5e25b9dd 1446 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
541ae68f
K
1447 if (use_continuous_tx_intrs)
1448 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1da177e4
LT
1449 writeq(val64, &bar0->tti_data1_mem);
1450
1451 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1452 TTI_DATA2_MEM_TX_UFC_B(0x20) |
19a60522 1453 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1da177e4
LT
1454 writeq(val64, &bar0->tti_data2_mem);
1455
1456 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1457 writeq(val64, &bar0->tti_command_mem);
1458
20346722 1459 /*
1da177e4
LT
1460 * Once the operation completes, the Strobe bit of the command
1461 * register will be reset. We poll for this particular condition
1462 * We wait for a maximum of 500ms for the operation to complete,
1463 * if it's not complete by then we return error.
1464 */
1465 time = 0;
1466 while (TRUE) {
1467 val64 = readq(&bar0->tti_command_mem);
1468 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1469 break;
1470 }
1471 if (time > 10) {
1472 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1473 dev->name);
1474 return -1;
1475 }
1476 msleep(50);
1477 time++;
1478 }
1479
b6e3f982
K
1480 if (nic->config.bimodal) {
1481 int k = 0;
1482 for (k = 0; k < config->rx_ring_num; k++) {
1483 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1484 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1485 writeq(val64, &bar0->tti_command_mem);
541ae68f 1486
541ae68f 1487 /*
b6e3f982
K
1488 * Once the operation completes, the Strobe bit of the command
1489 * register will be reset. We poll for this particular condition
1490 * We wait for a maximum of 500ms for the operation to complete,
1491 * if it's not complete by then we return error.
1492 */
1493 time = 0;
1494 while (TRUE) {
1495 val64 = readq(&bar0->tti_command_mem);
1496 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1497 break;
1498 }
1499 if (time > 10) {
1500 DBG_PRINT(ERR_DBG,
1501 "%s: TTI init Failed\n",
1502 dev->name);
1503 return -1;
1504 }
1505 time++;
1506 msleep(50);
1507 }
1508 }
541ae68f 1509 } else {
1da177e4 1510
b6e3f982
K
1511 /* RTI Initialization */
1512 if (nic->device_type == XFRAME_II_DEVICE) {
1513 /*
1514 * Programmed to generate Apprx 500 Intrs per
1515 * second
1516 */
1517 int count = (nic->config.bus_speed * 125)/4;
1518 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1519 } else {
1520 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1521 }
1522 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1523 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1524 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1da177e4 1525
b6e3f982 1526 writeq(val64, &bar0->rti_data1_mem);
1da177e4 1527
b6e3f982 1528 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
cc6e7c44
RA
1529 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1530 if (nic->intr_type == MSI_X)
1531 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1532 RTI_DATA2_MEM_RX_UFC_D(0x40));
1533 else
1534 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1535 RTI_DATA2_MEM_RX_UFC_D(0x80));
b6e3f982 1536 writeq(val64, &bar0->rti_data2_mem);
1da177e4 1537
b6e3f982
K
1538 for (i = 0; i < config->rx_ring_num; i++) {
1539 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1540 | RTI_CMD_MEM_OFFSET(i);
1541 writeq(val64, &bar0->rti_command_mem);
1542
1543 /*
1544 * Once the operation completes, the Strobe bit of the
1545 * command register will be reset. We poll for this
1546 * particular condition. We wait for a maximum of 500ms
1547 * for the operation to complete, if it's not complete
1548 * by then we return error.
1549 */
1550 time = 0;
1551 while (TRUE) {
1552 val64 = readq(&bar0->rti_command_mem);
1553 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1554 break;
1555 }
1556 if (time > 10) {
1557 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1558 dev->name);
1559 return -1;
1560 }
1561 time++;
1562 msleep(50);
1563 }
1da177e4 1564 }
1da177e4
LT
1565 }
1566
20346722
K
1567 /*
1568 * Initializing proper values as Pause threshold into all
1da177e4
LT
1569 * the 8 Queues on Rx side.
1570 */
1571 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1572 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1573
1574 /* Disable RMAC PAD STRIPPING */
509a2671 1575 add = &bar0->mac_cfg;
1da177e4
LT
1576 val64 = readq(&bar0->mac_cfg);
1577 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1578 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1579 writel((u32) (val64), add);
1580 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1581 writel((u32) (val64 >> 32), (add + 4));
1582 val64 = readq(&bar0->mac_cfg);
1583
7d3d0439
RA
1584 /* Enable FCS stripping by adapter */
1585 add = &bar0->mac_cfg;
1586 val64 = readq(&bar0->mac_cfg);
1587 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1588 if (nic->device_type == XFRAME_II_DEVICE)
1589 writeq(val64, &bar0->mac_cfg);
1590 else {
1591 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1592 writel((u32) (val64), add);
1593 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1594 writel((u32) (val64 >> 32), (add + 4));
1595 }
1596
20346722
K
1597 /*
1598 * Set the time value to be inserted in the pause frame
1da177e4
LT
1599 * generated by xena.
1600 */
1601 val64 = readq(&bar0->rmac_pause_cfg);
1602 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1603 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1604 writeq(val64, &bar0->rmac_pause_cfg);
1605
20346722 1606 /*
1da177e4
LT
1607 * Set the Threshold Limit for Generating the pause frame
1608 * If the amount of data in any Queue exceeds ratio of
1609 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1610 * pause frame is generated
1611 */
1612 val64 = 0;
1613 for (i = 0; i < 4; i++) {
1614 val64 |=
1615 (((u64) 0xFF00 | nic->mac_control.
1616 mc_pause_threshold_q0q3)
1617 << (i * 2 * 8));
1618 }
1619 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1620
1621 val64 = 0;
1622 for (i = 0; i < 4; i++) {
1623 val64 |=
1624 (((u64) 0xFF00 | nic->mac_control.
1625 mc_pause_threshold_q4q7)
1626 << (i * 2 * 8));
1627 }
1628 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1629
20346722
K
1630 /*
1631 * TxDMA will stop Read request if the number of read split has
1da177e4
LT
1632 * exceeded the limit pointed by shared_splits
1633 */
1634 val64 = readq(&bar0->pic_control);
1635 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1636 writeq(val64, &bar0->pic_control);
1637
863c11a9
AR
1638 if (nic->config.bus_speed == 266) {
1639 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1640 writeq(0x0, &bar0->read_retry_delay);
1641 writeq(0x0, &bar0->write_retry_delay);
1642 }
1643
541ae68f
K
1644 /*
1645 * Programming the Herc to split every write transaction
1646 * that does not start on an ADB to reduce disconnects.
1647 */
1648 if (nic->device_type == XFRAME_II_DEVICE) {
19a60522
SS
1649 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1650 MISC_LINK_STABILITY_PRD(3);
863c11a9
AR
1651 writeq(val64, &bar0->misc_control);
1652 val64 = readq(&bar0->pic_control2);
1653 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1654 writeq(val64, &bar0->pic_control2);
541ae68f 1655 }
c92ca04b
AR
1656 if (strstr(nic->product_name, "CX4")) {
1657 val64 = TMAC_AVG_IPG(0x17);
1658 writeq(val64, &bar0->tmac_avg_ipg);
a371a07d
K
1659 }
1660
1da177e4
LT
1661 return SUCCESS;
1662}
a371a07d
K
1663#define LINK_UP_DOWN_INTERRUPT 1
1664#define MAC_RMAC_ERR_TIMER 2
1665
1ee6dd77 1666static int s2io_link_fault_indication(struct s2io_nic *nic)
a371a07d 1667{
cc6e7c44
RA
1668 if (nic->intr_type != INTA)
1669 return MAC_RMAC_ERR_TIMER;
a371a07d
K
1670 if (nic->device_type == XFRAME_II_DEVICE)
1671 return LINK_UP_DOWN_INTERRUPT;
1672 else
1673 return MAC_RMAC_ERR_TIMER;
1674}
1da177e4 1675
20346722
K
1676/**
1677 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1da177e4
LT
1678 * @nic: device private variable,
1679 * @mask: A mask indicating which Intr block must be modified and,
1680 * @flag: A flag indicating whether to enable or disable the Intrs.
1681 * Description: This function will either disable or enable the interrupts
20346722
K
1682 * depending on the flag argument. The mask argument can be used to
1683 * enable/disable any Intr block.
1da177e4
LT
1684 * Return Value: NONE.
1685 */
1686
1687static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1688{
1ee6dd77 1689 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
1690 register u64 val64 = 0, temp64 = 0;
1691
1692 /* Top level interrupt classification */
1693 /* PIC Interrupts */
1694 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1695 /* Enable PIC Intrs in the general intr mask register */
a113ae06 1696 val64 = TXPIC_INT_M;
1da177e4
LT
1697 if (flag == ENABLE_INTRS) {
1698 temp64 = readq(&bar0->general_int_mask);
1699 temp64 &= ~((u64) val64);
1700 writeq(temp64, &bar0->general_int_mask);
20346722 1701 /*
a371a07d 1702 * If Hercules adapter enable GPIO otherwise
b41477f3 1703 * disable all PCIX, Flash, MDIO, IIC and GPIO
20346722
K
1704 * interrupts for now.
1705 * TODO
1da177e4 1706 */
a371a07d
K
1707 if (s2io_link_fault_indication(nic) ==
1708 LINK_UP_DOWN_INTERRUPT ) {
1709 temp64 = readq(&bar0->pic_int_mask);
1710 temp64 &= ~((u64) PIC_INT_GPIO);
1711 writeq(temp64, &bar0->pic_int_mask);
1712 temp64 = readq(&bar0->gpio_int_mask);
1713 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1714 writeq(temp64, &bar0->gpio_int_mask);
1715 } else {
1716 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1717 }
20346722 1718 /*
1da177e4
LT
1719 * No MSI Support is available presently, so TTI and
1720 * RTI interrupts are also disabled.
1721 */
1722 } else if (flag == DISABLE_INTRS) {
20346722
K
1723 /*
1724 * Disable PIC Intrs in the general
1725 * intr mask register
1da177e4
LT
1726 */
1727 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1728 temp64 = readq(&bar0->general_int_mask);
1729 val64 |= temp64;
1730 writeq(val64, &bar0->general_int_mask);
1731 }
1732 }
1733
1da177e4
LT
1734 /* MAC Interrupts */
1735 /* Enabling/Disabling MAC interrupts */
1736 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1737 val64 = TXMAC_INT_M | RXMAC_INT_M;
1738 if (flag == ENABLE_INTRS) {
1739 temp64 = readq(&bar0->general_int_mask);
1740 temp64 &= ~((u64) val64);
1741 writeq(temp64, &bar0->general_int_mask);
20346722
K
1742 /*
1743 * All MAC block error interrupts are disabled for now
1da177e4
LT
1744 * TODO
1745 */
1da177e4 1746 } else if (flag == DISABLE_INTRS) {
20346722
K
1747 /*
1748 * Disable MAC Intrs in the general intr mask register
1da177e4
LT
1749 */
1750 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1751 writeq(DISABLE_ALL_INTRS,
1752 &bar0->mac_rmac_err_mask);
1753
1754 temp64 = readq(&bar0->general_int_mask);
1755 val64 |= temp64;
1756 writeq(val64, &bar0->general_int_mask);
1757 }
1758 }
1759
1da177e4
LT
1760 /* Tx traffic interrupts */
1761 if (mask & TX_TRAFFIC_INTR) {
1762 val64 = TXTRAFFIC_INT_M;
1763 if (flag == ENABLE_INTRS) {
1764 temp64 = readq(&bar0->general_int_mask);
1765 temp64 &= ~((u64) val64);
1766 writeq(temp64, &bar0->general_int_mask);
20346722 1767 /*
1da177e4 1768 * Enable all the Tx side interrupts
20346722 1769 * writing 0 Enables all 64 TX interrupt levels
1da177e4
LT
1770 */
1771 writeq(0x0, &bar0->tx_traffic_mask);
1772 } else if (flag == DISABLE_INTRS) {
20346722
K
1773 /*
1774 * Disable Tx Traffic Intrs in the general intr mask
1da177e4
LT
1775 * register.
1776 */
1777 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1778 temp64 = readq(&bar0->general_int_mask);
1779 val64 |= temp64;
1780 writeq(val64, &bar0->general_int_mask);
1781 }
1782 }
1783
1784 /* Rx traffic interrupts */
1785 if (mask & RX_TRAFFIC_INTR) {
1786 val64 = RXTRAFFIC_INT_M;
1787 if (flag == ENABLE_INTRS) {
1788 temp64 = readq(&bar0->general_int_mask);
1789 temp64 &= ~((u64) val64);
1790 writeq(temp64, &bar0->general_int_mask);
1791 /* writing 0 Enables all 8 RX interrupt levels */
1792 writeq(0x0, &bar0->rx_traffic_mask);
1793 } else if (flag == DISABLE_INTRS) {
20346722
K
1794 /*
1795 * Disable Rx Traffic Intrs in the general intr mask
1da177e4
LT
1796 * register.
1797 */
1798 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1799 temp64 = readq(&bar0->general_int_mask);
1800 val64 |= temp64;
1801 writeq(val64, &bar0->general_int_mask);
1802 }
1803 }
1804}
1805
19a60522
SS
1806/**
1807 * verify_pcc_quiescent- Checks for PCC quiescent state
1808 * Return: 1 If PCC is quiescence
1809 * 0 If PCC is not quiescence
1810 */
1ee6dd77 1811static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
20346722 1812{
19a60522 1813 int ret = 0, herc;
1ee6dd77 1814 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522
SS
1815 u64 val64 = readq(&bar0->adapter_status);
1816
1817 herc = (sp->device_type == XFRAME_II_DEVICE);
20346722
K
1818
1819 if (flag == FALSE) {
19a60522
SS
1820 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1821 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 1822 ret = 1;
19a60522
SS
1823 } else {
1824 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 1825 ret = 1;
20346722
K
1826 }
1827 } else {
19a60522 1828 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
5e25b9dd 1829 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
19a60522 1830 ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 1831 ret = 1;
5e25b9dd
K
1832 } else {
1833 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
19a60522 1834 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 1835 ret = 1;
20346722
K
1836 }
1837 }
1838
1839 return ret;
1840}
1841/**
1842 * verify_xena_quiescence - Checks whether the H/W is ready
1da177e4 1843 * Description: Returns whether the H/W is ready to go or not. Depending
20346722 1844 * on whether adapter enable bit was written or not the comparison
1da177e4
LT
1845 * differs and the calling function passes the input argument flag to
1846 * indicate this.
20346722 1847 * Return: 1 If xena is quiescence
1da177e4
LT
1848 * 0 If Xena is not quiescence
1849 */
1850
1ee6dd77 1851static int verify_xena_quiescence(struct s2io_nic *sp)
1da177e4 1852{
19a60522 1853 int mode;
1ee6dd77 1854 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522
SS
1855 u64 val64 = readq(&bar0->adapter_status);
1856 mode = s2io_verify_pci_mode(sp);
1da177e4 1857
19a60522
SS
1858 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
1859 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
1860 return 0;
1861 }
1862 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
1863 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
1864 return 0;
1865 }
1866 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
1867 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
1868 return 0;
1869 }
1870 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
1871 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
1872 return 0;
1873 }
1874 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
1875 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
1876 return 0;
1877 }
1878 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
1879 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
1880 return 0;
1881 }
1882 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
1883 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
1884 return 0;
1885 }
1886 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
1887 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
1888 return 0;
1da177e4
LT
1889 }
1890
19a60522
SS
1891 /*
1892 * In PCI 33 mode, the P_PLL is not used, and therefore,
1893 * the the P_PLL_LOCK bit in the adapter_status register will
1894 * not be asserted.
1895 */
1896 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
1897 sp->device_type == XFRAME_II_DEVICE && mode !=
1898 PCI_MODE_PCI_33) {
1899 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
1900 return 0;
1901 }
1902 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1903 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1904 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
1905 return 0;
1906 }
1907 return 1;
1da177e4
LT
1908}
1909
1910/**
1911 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1912 * @sp: Pointer to device specifc structure
20346722 1913 * Description :
1da177e4
LT
1914 * New procedure to clear mac address reading problems on Alpha platforms
1915 *
1916 */
1917
1ee6dd77 1918static void fix_mac_address(struct s2io_nic * sp)
1da177e4 1919{
1ee6dd77 1920 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
1921 u64 val64;
1922 int i = 0;
1923
1924 while (fix_mac[i] != END_SIGN) {
1925 writeq(fix_mac[i++], &bar0->gpio_control);
20346722 1926 udelay(10);
1da177e4
LT
1927 val64 = readq(&bar0->gpio_control);
1928 }
1929}
1930
1931/**
20346722 1932 * start_nic - Turns the device on
1da177e4 1933 * @nic : device private variable.
20346722
K
1934 * Description:
1935 * This function actually turns the device on. Before this function is
1936 * called,all Registers are configured from their reset states
1937 * and shared memory is allocated but the NIC is still quiescent. On
1da177e4
LT
1938 * calling this function, the device interrupts are cleared and the NIC is
1939 * literally switched on by writing into the adapter control register.
20346722 1940 * Return Value:
1da177e4
LT
1941 * SUCCESS on success and -1 on failure.
1942 */
1943
1944static int start_nic(struct s2io_nic *nic)
1945{
1ee6dd77 1946 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
1947 struct net_device *dev = nic->dev;
1948 register u64 val64 = 0;
20346722 1949 u16 subid, i;
1ee6dd77 1950 struct mac_info *mac_control;
1da177e4
LT
1951 struct config_param *config;
1952
1953 mac_control = &nic->mac_control;
1954 config = &nic->config;
1955
1956 /* PRC Initialization and configuration */
1957 for (i = 0; i < config->rx_ring_num; i++) {
20346722 1958 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1da177e4
LT
1959 &bar0->prc_rxd0_n[i]);
1960
1961 val64 = readq(&bar0->prc_ctrl_n[i]);
b6e3f982
K
1962 if (nic->config.bimodal)
1963 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
da6971d8
AR
1964 if (nic->rxd_mode == RXD_MODE_1)
1965 val64 |= PRC_CTRL_RC_ENABLED;
1966 else
1967 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
863c11a9
AR
1968 if (nic->device_type == XFRAME_II_DEVICE)
1969 val64 |= PRC_CTRL_GROUP_READS;
1970 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
1971 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1da177e4
LT
1972 writeq(val64, &bar0->prc_ctrl_n[i]);
1973 }
1974
da6971d8
AR
1975 if (nic->rxd_mode == RXD_MODE_3B) {
1976 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1977 val64 = readq(&bar0->rx_pa_cfg);
1978 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1979 writeq(val64, &bar0->rx_pa_cfg);
1980 }
1da177e4 1981
926930b2
SS
1982 if (vlan_tag_strip == 0) {
1983 val64 = readq(&bar0->rx_pa_cfg);
1984 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
1985 writeq(val64, &bar0->rx_pa_cfg);
1986 vlan_strip_flag = 0;
1987 }
1988
20346722 1989 /*
1da177e4
LT
1990 * Enabling MC-RLDRAM. After enabling the device, we timeout
1991 * for around 100ms, which is approximately the time required
1992 * for the device to be ready for operation.
1993 */
1994 val64 = readq(&bar0->mc_rldram_mrs);
1995 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1996 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1997 val64 = readq(&bar0->mc_rldram_mrs);
1998
20346722 1999 msleep(100); /* Delay by around 100 ms. */
1da177e4
LT
2000
2001 /* Enabling ECC Protection. */
2002 val64 = readq(&bar0->adapter_control);
2003 val64 &= ~ADAPTER_ECC_EN;
2004 writeq(val64, &bar0->adapter_control);
2005
20346722
K
2006 /*
2007 * Clearing any possible Link state change interrupts that
1da177e4
LT
2008 * could have popped up just before Enabling the card.
2009 */
2010 val64 = readq(&bar0->mac_rmac_err_reg);
2011 if (val64)
2012 writeq(val64, &bar0->mac_rmac_err_reg);
2013
20346722
K
2014 /*
2015 * Verify if the device is ready to be enabled, if so enable
1da177e4
LT
2016 * it.
2017 */
2018 val64 = readq(&bar0->adapter_status);
19a60522 2019 if (!verify_xena_quiescence(nic)) {
1da177e4
LT
2020 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2021 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2022 (unsigned long long) val64);
2023 return FAILURE;
2024 }
2025
20346722 2026 /*
1da177e4 2027 * With some switches, link might be already up at this point.
20346722
K
2028 * Because of this weird behavior, when we enable laser,
2029 * we may not get link. We need to handle this. We cannot
2030 * figure out which switch is misbehaving. So we are forced to
2031 * make a global change.
1da177e4
LT
2032 */
2033
2034 /* Enabling Laser. */
2035 val64 = readq(&bar0->adapter_control);
2036 val64 |= ADAPTER_EOI_TX_ON;
2037 writeq(val64, &bar0->adapter_control);
2038
c92ca04b
AR
2039 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2040 /*
2041 * Dont see link state interrupts initally on some switches,
2042 * so directly scheduling the link state task here.
2043 */
2044 schedule_work(&nic->set_link_task);
2045 }
1da177e4
LT
2046 /* SXE-002: Initialize link and activity LED */
2047 subid = nic->pdev->subsystem_device;
541ae68f
K
2048 if (((subid & 0xFF) >= 0x07) &&
2049 (nic->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
2050 val64 = readq(&bar0->gpio_control);
2051 val64 |= 0x0000800000000000ULL;
2052 writeq(val64, &bar0->gpio_control);
2053 val64 = 0x0411040400000000ULL;
509a2671 2054 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
2055 }
2056
1da177e4
LT
2057 return SUCCESS;
2058}
fed5eccd
AR
2059/**
2060 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2061 */
1ee6dd77
RB
2062static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2063 TxD *txdlp, int get_off)
fed5eccd 2064{
1ee6dd77 2065 struct s2io_nic *nic = fifo_data->nic;
fed5eccd 2066 struct sk_buff *skb;
1ee6dd77 2067 struct TxD *txds;
fed5eccd
AR
2068 u16 j, frg_cnt;
2069
2070 txds = txdlp;
26b7625c 2071 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
fed5eccd
AR
2072 pci_unmap_single(nic->pdev, (dma_addr_t)
2073 txds->Buffer_Pointer, sizeof(u64),
2074 PCI_DMA_TODEVICE);
2075 txds++;
2076 }
2077
2078 skb = (struct sk_buff *) ((unsigned long)
2079 txds->Host_Control);
2080 if (!skb) {
1ee6dd77 2081 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
fed5eccd
AR
2082 return NULL;
2083 }
2084 pci_unmap_single(nic->pdev, (dma_addr_t)
2085 txds->Buffer_Pointer,
2086 skb->len - skb->data_len,
2087 PCI_DMA_TODEVICE);
2088 frg_cnt = skb_shinfo(skb)->nr_frags;
2089 if (frg_cnt) {
2090 txds++;
2091 for (j = 0; j < frg_cnt; j++, txds++) {
2092 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2093 if (!txds->Buffer_Pointer)
2094 break;
6aa20a22 2095 pci_unmap_page(nic->pdev, (dma_addr_t)
fed5eccd
AR
2096 txds->Buffer_Pointer,
2097 frag->size, PCI_DMA_TODEVICE);
2098 }
2099 }
1ee6dd77 2100 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
fed5eccd
AR
2101 return(skb);
2102}
1da177e4 2103
20346722
K
2104/**
2105 * free_tx_buffers - Free all queued Tx buffers
1da177e4 2106 * @nic : device private variable.
20346722 2107 * Description:
1da177e4 2108 * Free all queued Tx buffers.
20346722 2109 * Return Value: void
1da177e4
LT
2110*/
2111
2112static void free_tx_buffers(struct s2io_nic *nic)
2113{
2114 struct net_device *dev = nic->dev;
2115 struct sk_buff *skb;
1ee6dd77 2116 struct TxD *txdp;
1da177e4 2117 int i, j;
1ee6dd77 2118 struct mac_info *mac_control;
1da177e4 2119 struct config_param *config;
fed5eccd 2120 int cnt = 0;
1da177e4
LT
2121
2122 mac_control = &nic->mac_control;
2123 config = &nic->config;
2124
2125 for (i = 0; i < config->tx_fifo_num; i++) {
2126 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1ee6dd77 2127 txdp = (struct TxD *) mac_control->fifos[i].list_info[j].
1da177e4 2128 list_virt_addr;
fed5eccd
AR
2129 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2130 if (skb) {
2131 dev_kfree_skb(skb);
2132 cnt++;
1da177e4 2133 }
1da177e4
LT
2134 }
2135 DBG_PRINT(INTR_DBG,
2136 "%s:forcibly freeing %d skbs on FIFO%d\n",
2137 dev->name, cnt, i);
20346722
K
2138 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2139 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1da177e4
LT
2140 }
2141}
2142
20346722
K
2143/**
2144 * stop_nic - To stop the nic
1da177e4 2145 * @nic ; device private variable.
20346722
K
2146 * Description:
2147 * This function does exactly the opposite of what the start_nic()
1da177e4
LT
2148 * function does. This function is called to stop the device.
2149 * Return Value:
2150 * void.
2151 */
2152
2153static void stop_nic(struct s2io_nic *nic)
2154{
1ee6dd77 2155 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4 2156 register u64 val64 = 0;
5d3213cc 2157 u16 interruptible;
1ee6dd77 2158 struct mac_info *mac_control;
1da177e4
LT
2159 struct config_param *config;
2160
2161 mac_control = &nic->mac_control;
2162 config = &nic->config;
2163
2164 /* Disable all interrupts */
e960fc5c 2165 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
a371a07d
K
2166 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2167 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1da177e4
LT
2168 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2169
5d3213cc
AR
2170 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2171 val64 = readq(&bar0->adapter_control);
2172 val64 &= ~(ADAPTER_CNTL_EN);
2173 writeq(val64, &bar0->adapter_control);
1da177e4
LT
2174}
2175
1ee6dd77
RB
2176static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \
2177 sk_buff *skb)
da6971d8
AR
2178{
2179 struct net_device *dev = nic->dev;
2180 struct sk_buff *frag_list;
50eb8006 2181 void *tmp;
da6971d8
AR
2182
2183 /* Buffer-1 receives L3/L4 headers */
1ee6dd77 2184 ((struct RxD3*)rxdp)->Buffer1_ptr = pci_map_single
da6971d8
AR
2185 (nic->pdev, skb->data, l3l4hdr_size + 4,
2186 PCI_DMA_FROMDEVICE);
2187
2188 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2189 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2190 if (skb_shinfo(skb)->frag_list == NULL) {
c53d4945 2191 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
0c61ed5f 2192 DBG_PRINT(INFO_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
da6971d8
AR
2193 return -ENOMEM ;
2194 }
2195 frag_list = skb_shinfo(skb)->frag_list;
372cc597 2196 skb->truesize += frag_list->truesize;
da6971d8 2197 frag_list->next = NULL;
50eb8006
JG
2198 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2199 frag_list->data = tmp;
27a884dc 2200 skb_reset_tail_pointer(frag_list);
da6971d8
AR
2201
2202 /* Buffer-2 receives L4 data payload */
1ee6dd77 2203 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
da6971d8
AR
2204 frag_list->data, dev->mtu,
2205 PCI_DMA_FROMDEVICE);
2206 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2207 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2208
2209 return SUCCESS;
2210}
2211
20346722
K
2212/**
2213 * fill_rx_buffers - Allocates the Rx side skbs
1da177e4 2214 * @nic: device private variable
20346722
K
2215 * @ring_no: ring number
2216 * Description:
1da177e4
LT
2217 * The function allocates Rx side skbs and puts the physical
2218 * address of these buffers into the RxD buffer pointers, so that the NIC
2219 * can DMA the received frame into these locations.
2220 * The NIC supports 3 receive modes, viz
2221 * 1. single buffer,
2222 * 2. three buffer and
2223 * 3. Five buffer modes.
20346722
K
2224 * Each mode defines how many fragments the received frame will be split
2225 * up into by the NIC. The frame is split into L3 header, L4 Header,
1da177e4
LT
2226 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2227 * is split into 3 fragments. As of now only single buffer mode is
2228 * supported.
2229 * Return Value:
2230 * SUCCESS on success or an appropriate -ve value on failure.
2231 */
2232
ac1f60db 2233static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1da177e4
LT
2234{
2235 struct net_device *dev = nic->dev;
2236 struct sk_buff *skb;
1ee6dd77 2237 struct RxD_t *rxdp;
1da177e4 2238 int off, off1, size, block_no, block_no1;
1da177e4 2239 u32 alloc_tab = 0;
20346722 2240 u32 alloc_cnt;
1ee6dd77 2241 struct mac_info *mac_control;
1da177e4 2242 struct config_param *config;
20346722 2243 u64 tmp;
1ee6dd77 2244 struct buffAdd *ba;
1da177e4 2245 unsigned long flags;
1ee6dd77 2246 struct RxD_t *first_rxdp = NULL;
363dc367 2247 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
1da177e4
LT
2248
2249 mac_control = &nic->mac_control;
2250 config = &nic->config;
20346722
K
2251 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2252 atomic_read(&nic->rx_bufs_left[ring_no]);
1da177e4 2253
5d3213cc 2254 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
863c11a9 2255 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1da177e4 2256 while (alloc_tab < alloc_cnt) {
20346722 2257 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2258 block_index;
20346722 2259 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
1da177e4 2260
da6971d8
AR
2261 rxdp = mac_control->rings[ring_no].
2262 rx_blocks[block_no].rxds[off].virt_addr;
2263
2264 if ((block_no == block_no1) && (off == off1) &&
2265 (rxdp->Host_Control)) {
2266 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2267 dev->name);
1da177e4
LT
2268 DBG_PRINT(INTR_DBG, " info equated\n");
2269 goto end;
2270 }
da6971d8 2271 if (off && (off == rxd_count[nic->rxd_mode])) {
20346722 2272 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2273 block_index++;
da6971d8
AR
2274 if (mac_control->rings[ring_no].rx_curr_put_info.
2275 block_index == mac_control->rings[ring_no].
2276 block_count)
2277 mac_control->rings[ring_no].rx_curr_put_info.
2278 block_index = 0;
2279 block_no = mac_control->rings[ring_no].
2280 rx_curr_put_info.block_index;
2281 if (off == rxd_count[nic->rxd_mode])
2282 off = 0;
20346722 2283 mac_control->rings[ring_no].rx_curr_put_info.
da6971d8
AR
2284 offset = off;
2285 rxdp = mac_control->rings[ring_no].
2286 rx_blocks[block_no].block_virt_addr;
1da177e4
LT
2287 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2288 dev->name, rxdp);
2289 }
db874e65
SS
2290 if(!napi) {
2291 spin_lock_irqsave(&nic->put_lock, flags);
2292 mac_control->rings[ring_no].put_pos =
2293 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2294 spin_unlock_irqrestore(&nic->put_lock, flags);
2295 } else {
2296 mac_control->rings[ring_no].put_pos =
2297 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2298 }
da6971d8
AR
2299 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2300 ((nic->rxd_mode >= RXD_MODE_3A) &&
2301 (rxdp->Control_2 & BIT(0)))) {
20346722 2302 mac_control->rings[ring_no].rx_curr_put_info.
da6971d8 2303 offset = off;
1da177e4
LT
2304 goto end;
2305 }
da6971d8
AR
2306 /* calculate size of skb based on ring mode */
2307 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2308 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2309 if (nic->rxd_mode == RXD_MODE_1)
2310 size += NET_IP_ALIGN;
2311 else if (nic->rxd_mode == RXD_MODE_3B)
2312 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2313 else
2314 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
1da177e4 2315
da6971d8
AR
2316 /* allocate skb */
2317 skb = dev_alloc_skb(size);
2318 if(!skb) {
0c61ed5f
RV
2319 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2320 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
303bcb4b
K
2321 if (first_rxdp) {
2322 wmb();
2323 first_rxdp->Control_1 |= RXD_OWN_XENA;
2324 }
c53d4945
SH
2325 nic->mac_control.stats_info->sw_stat. \
2326 mem_alloc_fail_cnt++;
da6971d8
AR
2327 return -ENOMEM ;
2328 }
2329 if (nic->rxd_mode == RXD_MODE_1) {
2330 /* 1 buffer mode - normal operation mode */
1ee6dd77 2331 memset(rxdp, 0, sizeof(struct RxD1));
da6971d8 2332 skb_reserve(skb, NET_IP_ALIGN);
1ee6dd77 2333 ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single
863c11a9
AR
2334 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2335 PCI_DMA_FROMDEVICE);
2336 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
da6971d8
AR
2337
2338 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2339 /*
2340 * 2 or 3 buffer mode -
2341 * Both 2 buffer mode and 3 buffer mode provides 128
2342 * byte aligned receive buffers.
2343 *
2344 * 3 buffer mode provides header separation where in
2345 * skb->data will have L3/L4 headers where as
2346 * skb_shinfo(skb)->frag_list will have the L4 data
2347 * payload
2348 */
2349
363dc367
RV
2350 /* save the buffer pointers to avoid frequent dma mapping */
2351 Buffer0_ptr = ((struct RxD3*)rxdp)->Buffer0_ptr;
2352 Buffer1_ptr = ((struct RxD3*)rxdp)->Buffer1_ptr;
1ee6dd77 2353 memset(rxdp, 0, sizeof(struct RxD3));
363dc367
RV
2354 /* restore the buffer pointers for dma sync*/
2355 ((struct RxD3*)rxdp)->Buffer0_ptr = Buffer0_ptr;
2356 ((struct RxD3*)rxdp)->Buffer1_ptr = Buffer1_ptr;
2357
da6971d8
AR
2358 ba = &mac_control->rings[ring_no].ba[block_no][off];
2359 skb_reserve(skb, BUF0_LEN);
2360 tmp = (u64)(unsigned long) skb->data;
2361 tmp += ALIGN_SIZE;
2362 tmp &= ~ALIGN_SIZE;
2363 skb->data = (void *) (unsigned long)tmp;
27a884dc 2364 skb_reset_tail_pointer(skb);
da6971d8 2365
1ee6dd77
RB
2366 if (!(((struct RxD3*)rxdp)->Buffer0_ptr))
2367 ((struct RxD3*)rxdp)->Buffer0_ptr =
75c30b13 2368 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
da6971d8 2369 PCI_DMA_FROMDEVICE);
75c30b13
AR
2370 else
2371 pci_dma_sync_single_for_device(nic->pdev,
1ee6dd77 2372 (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr,
75c30b13 2373 BUF0_LEN, PCI_DMA_FROMDEVICE);
da6971d8
AR
2374 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2375 if (nic->rxd_mode == RXD_MODE_3B) {
2376 /* Two buffer mode */
2377
2378 /*
6aa20a22 2379 * Buffer2 will have L3/L4 header plus
da6971d8
AR
2380 * L4 payload
2381 */
1ee6dd77 2382 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single
da6971d8
AR
2383 (nic->pdev, skb->data, dev->mtu + 4,
2384 PCI_DMA_FROMDEVICE);
2385
75c30b13 2386 /* Buffer-1 will be dummy buffer. Not used */
1ee6dd77
RB
2387 if (!(((struct RxD3*)rxdp)->Buffer1_ptr)) {
2388 ((struct RxD3*)rxdp)->Buffer1_ptr =
6aa20a22 2389 pci_map_single(nic->pdev,
75c30b13
AR
2390 ba->ba_1, BUF1_LEN,
2391 PCI_DMA_FROMDEVICE);
2392 }
da6971d8
AR
2393 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2394 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2395 (dev->mtu + 4);
2396 } else {
2397 /* 3 buffer mode */
2398 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2399 dev_kfree_skb_irq(skb);
2400 if (first_rxdp) {
2401 wmb();
2402 first_rxdp->Control_1 |=
2403 RXD_OWN_XENA;
2404 }
2405 return -ENOMEM ;
2406 }
2407 }
2408 rxdp->Control_2 |= BIT(0);
1da177e4 2409 }
1da177e4 2410 rxdp->Host_Control = (unsigned long) (skb);
303bcb4b
K
2411 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2412 rxdp->Control_1 |= RXD_OWN_XENA;
1da177e4 2413 off++;
da6971d8
AR
2414 if (off == (rxd_count[nic->rxd_mode] + 1))
2415 off = 0;
20346722 2416 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
20346722 2417
da6971d8 2418 rxdp->Control_2 |= SET_RXD_MARKER;
303bcb4b
K
2419 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2420 if (first_rxdp) {
2421 wmb();
2422 first_rxdp->Control_1 |= RXD_OWN_XENA;
2423 }
2424 first_rxdp = rxdp;
2425 }
1da177e4
LT
2426 atomic_inc(&nic->rx_bufs_left[ring_no]);
2427 alloc_tab++;
2428 }
2429
2430 end:
303bcb4b
K
2431 /* Transfer ownership of first descriptor to adapter just before
2432 * exiting. Before that, use memory barrier so that ownership
2433 * and other fields are seen by adapter correctly.
2434 */
2435 if (first_rxdp) {
2436 wmb();
2437 first_rxdp->Control_1 |= RXD_OWN_XENA;
2438 }
2439
1da177e4
LT
2440 return SUCCESS;
2441}
2442
da6971d8
AR
2443static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2444{
2445 struct net_device *dev = sp->dev;
2446 int j;
2447 struct sk_buff *skb;
1ee6dd77
RB
2448 struct RxD_t *rxdp;
2449 struct mac_info *mac_control;
2450 struct buffAdd *ba;
da6971d8
AR
2451
2452 mac_control = &sp->mac_control;
2453 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2454 rxdp = mac_control->rings[ring_no].
2455 rx_blocks[blk].rxds[j].virt_addr;
2456 skb = (struct sk_buff *)
2457 ((unsigned long) rxdp->Host_Control);
2458 if (!skb) {
2459 continue;
2460 }
2461 if (sp->rxd_mode == RXD_MODE_1) {
2462 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2463 ((struct RxD1*)rxdp)->Buffer0_ptr,
da6971d8
AR
2464 dev->mtu +
2465 HEADER_ETHERNET_II_802_3_SIZE
2466 + HEADER_802_2_SIZE +
2467 HEADER_SNAP_SIZE,
2468 PCI_DMA_FROMDEVICE);
1ee6dd77 2469 memset(rxdp, 0, sizeof(struct RxD1));
da6971d8
AR
2470 } else if(sp->rxd_mode == RXD_MODE_3B) {
2471 ba = &mac_control->rings[ring_no].
2472 ba[blk][j];
2473 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2474 ((struct RxD3*)rxdp)->Buffer0_ptr,
da6971d8
AR
2475 BUF0_LEN,
2476 PCI_DMA_FROMDEVICE);
2477 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2478 ((struct RxD3*)rxdp)->Buffer1_ptr,
da6971d8
AR
2479 BUF1_LEN,
2480 PCI_DMA_FROMDEVICE);
2481 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2482 ((struct RxD3*)rxdp)->Buffer2_ptr,
da6971d8
AR
2483 dev->mtu + 4,
2484 PCI_DMA_FROMDEVICE);
1ee6dd77 2485 memset(rxdp, 0, sizeof(struct RxD3));
da6971d8
AR
2486 } else {
2487 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2488 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
da6971d8
AR
2489 PCI_DMA_FROMDEVICE);
2490 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2491 ((struct RxD3*)rxdp)->Buffer1_ptr,
da6971d8
AR
2492 l3l4hdr_size + 4,
2493 PCI_DMA_FROMDEVICE);
2494 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2495 ((struct RxD3*)rxdp)->Buffer2_ptr, dev->mtu,
da6971d8 2496 PCI_DMA_FROMDEVICE);
1ee6dd77 2497 memset(rxdp, 0, sizeof(struct RxD3));
da6971d8
AR
2498 }
2499 dev_kfree_skb(skb);
2500 atomic_dec(&sp->rx_bufs_left[ring_no]);
2501 }
2502}
2503
1da177e4 2504/**
20346722 2505 * free_rx_buffers - Frees all Rx buffers
1da177e4 2506 * @sp: device private variable.
20346722 2507 * Description:
1da177e4
LT
2508 * This function will free all Rx buffers allocated by host.
2509 * Return Value:
2510 * NONE.
2511 */
2512
2513static void free_rx_buffers(struct s2io_nic *sp)
2514{
2515 struct net_device *dev = sp->dev;
da6971d8 2516 int i, blk = 0, buf_cnt = 0;
1ee6dd77 2517 struct mac_info *mac_control;
1da177e4 2518 struct config_param *config;
1da177e4
LT
2519
2520 mac_control = &sp->mac_control;
2521 config = &sp->config;
2522
2523 for (i = 0; i < config->rx_ring_num; i++) {
da6971d8
AR
2524 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2525 free_rxd_blk(sp,i,blk);
1da177e4 2526
20346722
K
2527 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2528 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2529 mac_control->rings[i].rx_curr_put_info.offset = 0;
2530 mac_control->rings[i].rx_curr_get_info.offset = 0;
1da177e4
LT
2531 atomic_set(&sp->rx_bufs_left[i], 0);
2532 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2533 dev->name, buf_cnt, i);
2534 }
2535}
2536
2537/**
2538 * s2io_poll - Rx interrupt handler for NAPI support
2539 * @dev : pointer to the device structure.
20346722 2540 * @budget : The number of packets that were budgeted to be processed
1da177e4
LT
2541 * during one pass through the 'Poll" function.
2542 * Description:
2543 * Comes into picture only if NAPI support has been incorporated. It does
2544 * the same thing that rx_intr_handler does, but not in a interrupt context
2545 * also It will process only a given number of packets.
2546 * Return value:
2547 * 0 on success and 1 if there are No Rx packets to be processed.
2548 */
2549
1da177e4
LT
2550static int s2io_poll(struct net_device *dev, int *budget)
2551{
1ee6dd77 2552 struct s2io_nic *nic = dev->priv;
20346722 2553 int pkt_cnt = 0, org_pkts_to_process;
1ee6dd77 2554 struct mac_info *mac_control;
1da177e4 2555 struct config_param *config;
1ee6dd77 2556 struct XENA_dev_config __iomem *bar0 = nic->bar0;
20346722 2557 int i;
1da177e4 2558
7ba013ac 2559 atomic_inc(&nic->isr_cnt);
1da177e4
LT
2560 mac_control = &nic->mac_control;
2561 config = &nic->config;
2562
20346722
K
2563 nic->pkts_to_process = *budget;
2564 if (nic->pkts_to_process > dev->quota)
2565 nic->pkts_to_process = dev->quota;
2566 org_pkts_to_process = nic->pkts_to_process;
1da177e4 2567
19a60522
SS
2568 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2569 readl(&bar0->rx_traffic_int);
1da177e4
LT
2570
2571 for (i = 0; i < config->rx_ring_num; i++) {
20346722
K
2572 rx_intr_handler(&mac_control->rings[i]);
2573 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2574 if (!nic->pkts_to_process) {
2575 /* Quota for the current iteration has been met */
2576 goto no_rx;
1da177e4 2577 }
1da177e4
LT
2578 }
2579 if (!pkt_cnt)
2580 pkt_cnt = 1;
2581
2582 dev->quota -= pkt_cnt;
2583 *budget -= pkt_cnt;
2584 netif_rx_complete(dev);
2585
2586 for (i = 0; i < config->rx_ring_num; i++) {
2587 if (fill_rx_buffers(nic, i) == -ENOMEM) {
0c61ed5f
RV
2588 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2589 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
1da177e4
LT
2590 break;
2591 }
2592 }
2593 /* Re enable the Rx interrupts. */
c92ca04b 2594 writeq(0x0, &bar0->rx_traffic_mask);
19a60522 2595 readl(&bar0->rx_traffic_mask);
7ba013ac 2596 atomic_dec(&nic->isr_cnt);
1da177e4
LT
2597 return 0;
2598
20346722 2599no_rx:
1da177e4
LT
2600 dev->quota -= pkt_cnt;
2601 *budget -= pkt_cnt;
2602
2603 for (i = 0; i < config->rx_ring_num; i++) {
2604 if (fill_rx_buffers(nic, i) == -ENOMEM) {
0c61ed5f
RV
2605 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2606 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
1da177e4
LT
2607 break;
2608 }
2609 }
7ba013ac 2610 atomic_dec(&nic->isr_cnt);
1da177e4
LT
2611 return 1;
2612}
20346722 2613
b41477f3 2614#ifdef CONFIG_NET_POLL_CONTROLLER
612eff0e 2615/**
b41477f3 2616 * s2io_netpoll - netpoll event handler entry point
612eff0e
BH
2617 * @dev : pointer to the device structure.
2618 * Description:
b41477f3
AR
2619 * This function will be called by upper layer to check for events on the
2620 * interface in situations where interrupts are disabled. It is used for
2621 * specific in-kernel networking tasks, such as remote consoles and kernel
2622 * debugging over the network (example netdump in RedHat).
612eff0e 2623 */
612eff0e
BH
2624static void s2io_netpoll(struct net_device *dev)
2625{
1ee6dd77
RB
2626 struct s2io_nic *nic = dev->priv;
2627 struct mac_info *mac_control;
612eff0e 2628 struct config_param *config;
1ee6dd77 2629 struct XENA_dev_config __iomem *bar0 = nic->bar0;
b41477f3 2630 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
612eff0e
BH
2631 int i;
2632
2633 disable_irq(dev->irq);
2634
2635 atomic_inc(&nic->isr_cnt);
2636 mac_control = &nic->mac_control;
2637 config = &nic->config;
2638
612eff0e 2639 writeq(val64, &bar0->rx_traffic_int);
b41477f3
AR
2640 writeq(val64, &bar0->tx_traffic_int);
2641
6aa20a22 2642 /* we need to free up the transmitted skbufs or else netpoll will
b41477f3
AR
2643 * run out of skbs and will fail and eventually netpoll application such
2644 * as netdump will fail.
2645 */
2646 for (i = 0; i < config->tx_fifo_num; i++)
2647 tx_intr_handler(&mac_control->fifos[i]);
612eff0e 2648
b41477f3 2649 /* check for received packet and indicate up to network */
612eff0e
BH
2650 for (i = 0; i < config->rx_ring_num; i++)
2651 rx_intr_handler(&mac_control->rings[i]);
2652
2653 for (i = 0; i < config->rx_ring_num; i++) {
2654 if (fill_rx_buffers(nic, i) == -ENOMEM) {
0c61ed5f
RV
2655 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2656 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
612eff0e
BH
2657 break;
2658 }
2659 }
2660 atomic_dec(&nic->isr_cnt);
2661 enable_irq(dev->irq);
2662 return;
2663}
2664#endif
2665
20346722 2666/**
1da177e4
LT
2667 * rx_intr_handler - Rx interrupt handler
2668 * @nic: device private variable.
20346722
K
2669 * Description:
2670 * If the interrupt is because of a received frame or if the
1da177e4 2671 * receive ring contains fresh as yet un-processed frames,this function is
20346722
K
2672 * called. It picks out the RxD at which place the last Rx processing had
2673 * stopped and sends the skb to the OSM's Rx handler and then increments
1da177e4
LT
2674 * the offset.
2675 * Return Value:
2676 * NONE.
2677 */
1ee6dd77 2678static void rx_intr_handler(struct ring_info *ring_data)
1da177e4 2679{
1ee6dd77 2680 struct s2io_nic *nic = ring_data->nic;
1da177e4 2681 struct net_device *dev = (struct net_device *) nic->dev;
da6971d8 2682 int get_block, put_block, put_offset;
1ee6dd77
RB
2683 struct rx_curr_get_info get_info, put_info;
2684 struct RxD_t *rxdp;
1da177e4 2685 struct sk_buff *skb;
20346722 2686 int pkt_cnt = 0;
7d3d0439
RA
2687 int i;
2688
7ba013ac
K
2689 spin_lock(&nic->rx_lock);
2690 if (atomic_read(&nic->card_state) == CARD_DOWN) {
776bd20f 2691 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
7ba013ac
K
2692 __FUNCTION__, dev->name);
2693 spin_unlock(&nic->rx_lock);
776bd20f 2694 return;
7ba013ac
K
2695 }
2696
20346722
K
2697 get_info = ring_data->rx_curr_get_info;
2698 get_block = get_info.block_index;
1ee6dd77 2699 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
20346722 2700 put_block = put_info.block_index;
da6971d8 2701 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
db874e65
SS
2702 if (!napi) {
2703 spin_lock(&nic->put_lock);
2704 put_offset = ring_data->put_pos;
2705 spin_unlock(&nic->put_lock);
2706 } else
2707 put_offset = ring_data->put_pos;
2708
da6971d8 2709 while (RXD_IS_UP2DT(rxdp)) {
db874e65
SS
2710 /*
2711 * If your are next to put index then it's
2712 * FIFO full condition
2713 */
da6971d8
AR
2714 if ((get_block == put_block) &&
2715 (get_info.offset + 1) == put_info.offset) {
75c30b13 2716 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
da6971d8
AR
2717 break;
2718 }
20346722
K
2719 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2720 if (skb == NULL) {
2721 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2722 dev->name);
2723 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
7ba013ac 2724 spin_unlock(&nic->rx_lock);
20346722 2725 return;
1da177e4 2726 }
da6971d8
AR
2727 if (nic->rxd_mode == RXD_MODE_1) {
2728 pci_unmap_single(nic->pdev, (dma_addr_t)
1ee6dd77 2729 ((struct RxD1*)rxdp)->Buffer0_ptr,
20346722
K
2730 dev->mtu +
2731 HEADER_ETHERNET_II_802_3_SIZE +
2732 HEADER_802_2_SIZE +
2733 HEADER_SNAP_SIZE,
2734 PCI_DMA_FROMDEVICE);
da6971d8 2735 } else if (nic->rxd_mode == RXD_MODE_3B) {
75c30b13 2736 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
1ee6dd77 2737 ((struct RxD3*)rxdp)->Buffer0_ptr,
20346722 2738 BUF0_LEN, PCI_DMA_FROMDEVICE);
da6971d8 2739 pci_unmap_single(nic->pdev, (dma_addr_t)
1ee6dd77 2740 ((struct RxD3*)rxdp)->Buffer2_ptr,
da6971d8 2741 dev->mtu + 4,
20346722 2742 PCI_DMA_FROMDEVICE);
da6971d8 2743 } else {
75c30b13 2744 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
1ee6dd77 2745 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
da6971d8
AR
2746 PCI_DMA_FROMDEVICE);
2747 pci_unmap_single(nic->pdev, (dma_addr_t)
1ee6dd77 2748 ((struct RxD3*)rxdp)->Buffer1_ptr,
da6971d8
AR
2749 l3l4hdr_size + 4,
2750 PCI_DMA_FROMDEVICE);
2751 pci_unmap_single(nic->pdev, (dma_addr_t)
1ee6dd77 2752 ((struct RxD3*)rxdp)->Buffer2_ptr,
da6971d8
AR
2753 dev->mtu, PCI_DMA_FROMDEVICE);
2754 }
863c11a9 2755 prefetch(skb->data);
20346722
K
2756 rx_osm_handler(ring_data, rxdp);
2757 get_info.offset++;
da6971d8
AR
2758 ring_data->rx_curr_get_info.offset = get_info.offset;
2759 rxdp = ring_data->rx_blocks[get_block].
2760 rxds[get_info.offset].virt_addr;
2761 if (get_info.offset == rxd_count[nic->rxd_mode]) {
20346722 2762 get_info.offset = 0;
da6971d8 2763 ring_data->rx_curr_get_info.offset = get_info.offset;
20346722 2764 get_block++;
da6971d8
AR
2765 if (get_block == ring_data->block_count)
2766 get_block = 0;
2767 ring_data->rx_curr_get_info.block_index = get_block;
20346722
K
2768 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2769 }
1da177e4 2770
20346722 2771 nic->pkts_to_process -= 1;
db874e65 2772 if ((napi) && (!nic->pkts_to_process))
20346722 2773 break;
20346722 2774 pkt_cnt++;
1da177e4
LT
2775 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2776 break;
2777 }
7d3d0439
RA
2778 if (nic->lro) {
2779 /* Clear all LRO sessions before exiting */
2780 for (i=0; i<MAX_LRO_SESSIONS; i++) {
1ee6dd77 2781 struct lro *lro = &nic->lro0_n[i];
7d3d0439
RA
2782 if (lro->in_use) {
2783 update_L3L4_header(nic, lro);
2784 queue_rx_frame(lro->parent);
2785 clear_lro_session(lro);
2786 }
2787 }
2788 }
2789
7ba013ac 2790 spin_unlock(&nic->rx_lock);
1da177e4 2791}
20346722
K
2792
2793/**
1da177e4
LT
2794 * tx_intr_handler - Transmit interrupt handler
2795 * @nic : device private variable
20346722
K
2796 * Description:
2797 * If an interrupt was raised to indicate DMA complete of the
2798 * Tx packet, this function is called. It identifies the last TxD
2799 * whose buffer was freed and frees all skbs whose data have already
1da177e4
LT
2800 * DMA'ed into the NICs internal memory.
2801 * Return Value:
2802 * NONE
2803 */
2804
1ee6dd77 2805static void tx_intr_handler(struct fifo_info *fifo_data)
1da177e4 2806{
1ee6dd77 2807 struct s2io_nic *nic = fifo_data->nic;
1da177e4 2808 struct net_device *dev = (struct net_device *) nic->dev;
1ee6dd77 2809 struct tx_curr_get_info get_info, put_info;
1da177e4 2810 struct sk_buff *skb;
1ee6dd77 2811 struct TxD *txdlp;
1da177e4 2812
20346722 2813 get_info = fifo_data->tx_curr_get_info;
1ee6dd77
RB
2814 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2815 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
20346722
K
2816 list_virt_addr;
2817 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2818 (get_info.offset != put_info.offset) &&
2819 (txdlp->Host_Control)) {
2820 /* Check for TxD errors */
2821 if (txdlp->Control_1 & TXD_T_CODE) {
2822 unsigned long long err;
2823 err = txdlp->Control_1 & TXD_T_CODE;
bd1034f0
AR
2824 if (err & 0x1) {
2825 nic->mac_control.stats_info->sw_stat.
2826 parity_err_cnt++;
2827 }
776bd20f 2828 if ((err >> 48) == 0xA) {
2829 DBG_PRINT(TX_DBG, "TxD returned due \
19a60522 2830 to loss of link\n");
776bd20f 2831 }
2832 else {
19a60522 2833 DBG_PRINT(ERR_DBG, "***TxD error %llx\n", err);
776bd20f 2834 }
20346722 2835 }
1da177e4 2836
fed5eccd 2837 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
20346722
K
2838 if (skb == NULL) {
2839 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2840 __FUNCTION__);
2841 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2842 return;
2843 }
2844
20346722 2845 /* Updating the statistics block */
20346722
K
2846 nic->stats.tx_bytes += skb->len;
2847 dev_kfree_skb_irq(skb);
2848
2849 get_info.offset++;
863c11a9
AR
2850 if (get_info.offset == get_info.fifo_len + 1)
2851 get_info.offset = 0;
1ee6dd77 2852 txdlp = (struct TxD *) fifo_data->list_info
20346722
K
2853 [get_info.offset].list_virt_addr;
2854 fifo_data->tx_curr_get_info.offset =
2855 get_info.offset;
1da177e4
LT
2856 }
2857
2858 spin_lock(&nic->tx_lock);
2859 if (netif_queue_stopped(dev))
2860 netif_wake_queue(dev);
2861 spin_unlock(&nic->tx_lock);
2862}
2863
bd1034f0
AR
2864/**
2865 * s2io_mdio_write - Function to write in to MDIO registers
2866 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2867 * @addr : address value
2868 * @value : data value
2869 * @dev : pointer to net_device structure
2870 * Description:
2871 * This function is used to write values to the MDIO registers
2872 * NONE
2873 */
2874static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2875{
2876 u64 val64 = 0x0;
1ee6dd77
RB
2877 struct s2io_nic *sp = dev->priv;
2878 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0
AR
2879
2880 //address transaction
2881 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2882 | MDIO_MMD_DEV_ADDR(mmd_type)
2883 | MDIO_MMS_PRT_ADDR(0x0);
2884 writeq(val64, &bar0->mdio_control);
2885 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2886 writeq(val64, &bar0->mdio_control);
2887 udelay(100);
2888
2889 //Data transaction
2890 val64 = 0x0;
2891 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2892 | MDIO_MMD_DEV_ADDR(mmd_type)
2893 | MDIO_MMS_PRT_ADDR(0x0)
2894 | MDIO_MDIO_DATA(value)
2895 | MDIO_OP(MDIO_OP_WRITE_TRANS);
2896 writeq(val64, &bar0->mdio_control);
2897 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2898 writeq(val64, &bar0->mdio_control);
2899 udelay(100);
2900
2901 val64 = 0x0;
2902 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2903 | MDIO_MMD_DEV_ADDR(mmd_type)
2904 | MDIO_MMS_PRT_ADDR(0x0)
2905 | MDIO_OP(MDIO_OP_READ_TRANS);
2906 writeq(val64, &bar0->mdio_control);
2907 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2908 writeq(val64, &bar0->mdio_control);
2909 udelay(100);
2910
2911}
2912
2913/**
2914 * s2io_mdio_read - Function to write in to MDIO registers
2915 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2916 * @addr : address value
2917 * @dev : pointer to net_device structure
2918 * Description:
2919 * This function is used to read values to the MDIO registers
2920 * NONE
2921 */
2922static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2923{
2924 u64 val64 = 0x0;
2925 u64 rval64 = 0x0;
1ee6dd77
RB
2926 struct s2io_nic *sp = dev->priv;
2927 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0
AR
2928
2929 /* address transaction */
2930 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2931 | MDIO_MMD_DEV_ADDR(mmd_type)
2932 | MDIO_MMS_PRT_ADDR(0x0);
2933 writeq(val64, &bar0->mdio_control);
2934 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2935 writeq(val64, &bar0->mdio_control);
2936 udelay(100);
2937
2938 /* Data transaction */
2939 val64 = 0x0;
2940 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2941 | MDIO_MMD_DEV_ADDR(mmd_type)
2942 | MDIO_MMS_PRT_ADDR(0x0)
2943 | MDIO_OP(MDIO_OP_READ_TRANS);
2944 writeq(val64, &bar0->mdio_control);
2945 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2946 writeq(val64, &bar0->mdio_control);
2947 udelay(100);
2948
2949 /* Read the value from regs */
2950 rval64 = readq(&bar0->mdio_control);
2951 rval64 = rval64 & 0xFFFF0000;
2952 rval64 = rval64 >> 16;
2953 return rval64;
2954}
2955/**
2956 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
2957 * @counter : couter value to be updated
2958 * @flag : flag to indicate the status
2959 * @type : counter type
2960 * Description:
2961 * This function is to check the status of the xpak counters value
2962 * NONE
2963 */
2964
2965static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
2966{
2967 u64 mask = 0x3;
2968 u64 val64;
2969 int i;
2970 for(i = 0; i <index; i++)
2971 mask = mask << 0x2;
2972
2973 if(flag > 0)
2974 {
2975 *counter = *counter + 1;
2976 val64 = *regs_stat & mask;
2977 val64 = val64 >> (index * 0x2);
2978 val64 = val64 + 1;
2979 if(val64 == 3)
2980 {
2981 switch(type)
2982 {
2983 case 1:
2984 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2985 "service. Excessive temperatures may "
2986 "result in premature transceiver "
2987 "failure \n");
2988 break;
2989 case 2:
2990 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2991 "service Excessive bias currents may "
2992 "indicate imminent laser diode "
2993 "failure \n");
2994 break;
2995 case 3:
2996 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2997 "service Excessive laser output "
2998 "power may saturate far-end "
2999 "receiver\n");
3000 break;
3001 default:
3002 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3003 "type \n");
3004 }
3005 val64 = 0x0;
3006 }
3007 val64 = val64 << (index * 0x2);
3008 *regs_stat = (*regs_stat & (~mask)) | (val64);
3009
3010 } else {
3011 *regs_stat = *regs_stat & (~mask);
3012 }
3013}
3014
3015/**
3016 * s2io_updt_xpak_counter - Function to update the xpak counters
3017 * @dev : pointer to net_device struct
3018 * Description:
3019 * This function is to upate the status of the xpak counters value
3020 * NONE
3021 */
3022static void s2io_updt_xpak_counter(struct net_device *dev)
3023{
3024 u16 flag = 0x0;
3025 u16 type = 0x0;
3026 u16 val16 = 0x0;
3027 u64 val64 = 0x0;
3028 u64 addr = 0x0;
3029
1ee6dd77
RB
3030 struct s2io_nic *sp = dev->priv;
3031 struct stat_block *stat_info = sp->mac_control.stats_info;
bd1034f0
AR
3032
3033 /* Check the communication with the MDIO slave */
3034 addr = 0x0000;
3035 val64 = 0x0;
3036 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3037 if((val64 == 0xFFFF) || (val64 == 0x0000))
3038 {
3039 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3040 "Returned %llx\n", (unsigned long long)val64);
3041 return;
3042 }
3043
3044 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3045 if(val64 != 0x2040)
3046 {
3047 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3048 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3049 (unsigned long long)val64);
3050 return;
3051 }
3052
3053 /* Loading the DOM register to MDIO register */
3054 addr = 0xA100;
3055 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3056 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3057
3058 /* Reading the Alarm flags */
3059 addr = 0xA070;
3060 val64 = 0x0;
3061 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3062
3063 flag = CHECKBIT(val64, 0x7);
3064 type = 1;
3065 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3066 &stat_info->xpak_stat.xpak_regs_stat,
3067 0x0, flag, type);
3068
3069 if(CHECKBIT(val64, 0x6))
3070 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3071
3072 flag = CHECKBIT(val64, 0x3);
3073 type = 2;
3074 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3075 &stat_info->xpak_stat.xpak_regs_stat,
3076 0x2, flag, type);
3077
3078 if(CHECKBIT(val64, 0x2))
3079 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3080
3081 flag = CHECKBIT(val64, 0x1);
3082 type = 3;
3083 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3084 &stat_info->xpak_stat.xpak_regs_stat,
3085 0x4, flag, type);
3086
3087 if(CHECKBIT(val64, 0x0))
3088 stat_info->xpak_stat.alarm_laser_output_power_low++;
3089
3090 /* Reading the Warning flags */
3091 addr = 0xA074;
3092 val64 = 0x0;
3093 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3094
3095 if(CHECKBIT(val64, 0x7))
3096 stat_info->xpak_stat.warn_transceiver_temp_high++;
3097
3098 if(CHECKBIT(val64, 0x6))
3099 stat_info->xpak_stat.warn_transceiver_temp_low++;
3100
3101 if(CHECKBIT(val64, 0x3))
3102 stat_info->xpak_stat.warn_laser_bias_current_high++;
3103
3104 if(CHECKBIT(val64, 0x2))
3105 stat_info->xpak_stat.warn_laser_bias_current_low++;
3106
3107 if(CHECKBIT(val64, 0x1))
3108 stat_info->xpak_stat.warn_laser_output_power_high++;
3109
3110 if(CHECKBIT(val64, 0x0))
3111 stat_info->xpak_stat.warn_laser_output_power_low++;
3112}
3113
20346722 3114/**
1da177e4
LT
3115 * alarm_intr_handler - Alarm Interrrupt handler
3116 * @nic: device private variable
20346722 3117 * Description: If the interrupt was neither because of Rx packet or Tx
1da177e4 3118 * complete, this function is called. If the interrupt was to indicate
20346722
K
3119 * a loss of link, the OSM link status handler is invoked for any other
3120 * alarm interrupt the block that raised the interrupt is displayed
1da177e4
LT
3121 * and a H/W reset is issued.
3122 * Return Value:
3123 * NONE
3124*/
3125
3126static void alarm_intr_handler(struct s2io_nic *nic)
3127{
3128 struct net_device *dev = (struct net_device *) nic->dev;
1ee6dd77 3129 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4 3130 register u64 val64 = 0, err_reg = 0;
bd1034f0
AR
3131 u64 cnt;
3132 int i;
372cc597
SS
3133 if (atomic_read(&nic->card_state) == CARD_DOWN)
3134 return;
bd1034f0
AR
3135 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3136 /* Handling the XPAK counters update */
3137 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3138 /* waiting for an hour */
3139 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3140 } else {
3141 s2io_updt_xpak_counter(dev);
3142 /* reset the count to zero */
3143 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3144 }
1da177e4
LT
3145
3146 /* Handling link status change error Intr */
a371a07d
K
3147 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3148 err_reg = readq(&bar0->mac_rmac_err_reg);
3149 writeq(err_reg, &bar0->mac_rmac_err_reg);
3150 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3151 schedule_work(&nic->set_link_task);
3152 }
1da177e4
LT
3153 }
3154
5e25b9dd
K
3155 /* Handling Ecc errors */
3156 val64 = readq(&bar0->mc_err_reg);
3157 writeq(val64, &bar0->mc_err_reg);
3158 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3159 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
7ba013ac
K
3160 nic->mac_control.stats_info->sw_stat.
3161 double_ecc_errs++;
776bd20f 3162 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
5e25b9dd 3163 dev->name);
776bd20f 3164 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
e960fc5c 3165 if (nic->device_type != XFRAME_II_DEVICE) {
776bd20f 3166 /* Reset XframeI only if critical error */
3167 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3168 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3169 netif_stop_queue(dev);
3170 schedule_work(&nic->rst_timer_task);
bd1034f0
AR
3171 nic->mac_control.stats_info->sw_stat.
3172 soft_reset_cnt++;
776bd20f 3173 }
e960fc5c 3174 }
5e25b9dd 3175 } else {
7ba013ac
K
3176 nic->mac_control.stats_info->sw_stat.
3177 single_ecc_errs++;
5e25b9dd
K
3178 }
3179 }
3180
1da177e4
LT
3181 /* In case of a serious error, the device will be Reset. */
3182 val64 = readq(&bar0->serr_source);
3183 if (val64 & SERR_SOURCE_ANY) {
bd1034f0 3184 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
1da177e4 3185 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
6aa20a22 3186 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
776bd20f 3187 (unsigned long long)val64);
1da177e4
LT
3188 netif_stop_queue(dev);
3189 schedule_work(&nic->rst_timer_task);
bd1034f0 3190 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
1da177e4
LT
3191 }
3192
3193 /*
3194 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3195 * Error occurs, the adapter will be recycled by disabling the
20346722 3196 * adapter enable bit and enabling it again after the device
1da177e4
LT
3197 * becomes Quiescent.
3198 */
3199 val64 = readq(&bar0->pcc_err_reg);
3200 writeq(val64, &bar0->pcc_err_reg);
3201 if (val64 & PCC_FB_ECC_DB_ERR) {
3202 u64 ac = readq(&bar0->adapter_control);
3203 ac &= ~(ADAPTER_CNTL_EN);
3204 writeq(ac, &bar0->adapter_control);
3205 ac = readq(&bar0->adapter_control);
3206 schedule_work(&nic->set_link_task);
3207 }
bd1034f0
AR
3208 /* Check for data parity error */
3209 val64 = readq(&bar0->pic_int_status);
3210 if (val64 & PIC_INT_GPIO) {
3211 val64 = readq(&bar0->gpio_int_reg);
3212 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3213 nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3214 schedule_work(&nic->rst_timer_task);
3215 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3216 }
3217 }
3218
3219 /* Check for ring full counter */
3220 if (nic->device_type & XFRAME_II_DEVICE) {
3221 val64 = readq(&bar0->ring_bump_counter1);
3222 for (i=0; i<4; i++) {
3223 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3224 cnt >>= 64 - ((i+1)*16);
3225 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3226 += cnt;
3227 }
3228
3229 val64 = readq(&bar0->ring_bump_counter2);
3230 for (i=0; i<4; i++) {
3231 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3232 cnt >>= 64 - ((i+1)*16);
3233 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3234 += cnt;
3235 }
3236 }
1da177e4
LT
3237
3238 /* Other type of interrupts are not being handled now, TODO */
3239}
3240
20346722 3241/**
1da177e4 3242 * wait_for_cmd_complete - waits for a command to complete.
20346722 3243 * @sp : private member of the device structure, which is a pointer to the
1da177e4 3244 * s2io_nic structure.
20346722
K
3245 * Description: Function that waits for a command to Write into RMAC
3246 * ADDR DATA registers to be completed and returns either success or
3247 * error depending on whether the command was complete or not.
1da177e4
LT
3248 * Return value:
3249 * SUCCESS on success and FAILURE on failure.
3250 */
3251
9fc93a41
SS
3252static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3253 int bit_state)
1da177e4 3254{
9fc93a41 3255 int ret = FAILURE, cnt = 0, delay = 1;
1da177e4
LT
3256 u64 val64;
3257
9fc93a41
SS
3258 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3259 return FAILURE;
3260
3261 do {
c92ca04b 3262 val64 = readq(addr);
9fc93a41
SS
3263 if (bit_state == S2IO_BIT_RESET) {
3264 if (!(val64 & busy_bit)) {
3265 ret = SUCCESS;
3266 break;
3267 }
3268 } else {
3269 if (!(val64 & busy_bit)) {
3270 ret = SUCCESS;
3271 break;
3272 }
1da177e4 3273 }
c92ca04b
AR
3274
3275 if(in_interrupt())
9fc93a41 3276 mdelay(delay);
c92ca04b 3277 else
9fc93a41 3278 msleep(delay);
c92ca04b 3279
9fc93a41
SS
3280 if (++cnt >= 10)
3281 delay = 50;
3282 } while (cnt < 20);
1da177e4
LT
3283 return ret;
3284}
19a60522
SS
3285/*
3286 * check_pci_device_id - Checks if the device id is supported
3287 * @id : device id
3288 * Description: Function to check if the pci device id is supported by driver.
3289 * Return value: Actual device id if supported else PCI_ANY_ID
3290 */
3291static u16 check_pci_device_id(u16 id)
3292{
3293 switch (id) {
3294 case PCI_DEVICE_ID_HERC_WIN:
3295 case PCI_DEVICE_ID_HERC_UNI:
3296 return XFRAME_II_DEVICE;
3297 case PCI_DEVICE_ID_S2IO_UNI:
3298 case PCI_DEVICE_ID_S2IO_WIN:
3299 return XFRAME_I_DEVICE;
3300 default:
3301 return PCI_ANY_ID;
3302 }
3303}
1da177e4 3304
20346722
K
3305/**
3306 * s2io_reset - Resets the card.
1da177e4
LT
3307 * @sp : private member of the device structure.
3308 * Description: Function to Reset the card. This function then also
20346722 3309 * restores the previously saved PCI configuration space registers as
1da177e4
LT
3310 * the card reset also resets the configuration space.
3311 * Return value:
3312 * void.
3313 */
3314
1ee6dd77 3315static void s2io_reset(struct s2io_nic * sp)
1da177e4 3316{
1ee6dd77 3317 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 3318 u64 val64;
5e25b9dd 3319 u16 subid, pci_cmd;
19a60522
SS
3320 int i;
3321 u16 val16;
363dc367 3322 unsigned long long reset_cnt = 0;
19a60522
SS
3323 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3324 __FUNCTION__, sp->dev->name);
1da177e4 3325
0b1f7ebe 3326 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
e960fc5c 3327 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
0b1f7ebe 3328
19a60522
SS
3329 if (sp->device_type == XFRAME_II_DEVICE) {
3330 int ret;
3331 ret = pci_set_power_state(sp->pdev, 3);
3332 if (!ret)
3333 ret = pci_set_power_state(sp->pdev, 0);
3334 else {
3335 DBG_PRINT(ERR_DBG,"%s PME based SW_Reset failed!\n",
3336 __FUNCTION__);
3337 goto old_way;
3338 }
3339 msleep(20);
3340 goto new_way;
3341 }
3342old_way:
1da177e4
LT
3343 val64 = SW_RESET_ALL;
3344 writeq(val64, &bar0->sw_reset);
19a60522 3345new_way:
c92ca04b
AR
3346 if (strstr(sp->product_name, "CX4")) {
3347 msleep(750);
3348 }
19a60522
SS
3349 msleep(250);
3350 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
1da177e4 3351
19a60522
SS
3352 /* Restore the PCI state saved during initialization. */
3353 pci_restore_state(sp->pdev);
3354 pci_read_config_word(sp->pdev, 0x2, &val16);
3355 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3356 break;
3357 msleep(200);
3358 }
1da177e4 3359
19a60522
SS
3360 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3361 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3362 }
3363
3364 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3365
3366 s2io_init_pci(sp);
1da177e4 3367
20346722
K
3368 /* Set swapper to enable I/O register access */
3369 s2io_set_swapper(sp);
3370
cc6e7c44
RA
3371 /* Restore the MSIX table entries from local variables */
3372 restore_xmsi_data(sp);
3373
5e25b9dd 3374 /* Clear certain PCI/PCI-X fields after reset */
303bcb4b 3375 if (sp->device_type == XFRAME_II_DEVICE) {
b41477f3 3376 /* Clear "detected parity error" bit */
303bcb4b 3377 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
5e25b9dd 3378
303bcb4b
K
3379 /* Clearing PCIX Ecc status register */
3380 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
5e25b9dd 3381
303bcb4b
K
3382 /* Clearing PCI_STATUS error reflected here */
3383 writeq(BIT(62), &bar0->txpic_int_reg);
3384 }
5e25b9dd 3385
20346722
K
3386 /* Reset device statistics maintained by OS */
3387 memset(&sp->stats, 0, sizeof (struct net_device_stats));
363dc367
RV
3388 /* save reset count */
3389 reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3390 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3391 /* restore reset count */
3392 sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
20346722 3393
1da177e4
LT
3394 /* SXE-002: Configure link and activity LED to turn it off */
3395 subid = sp->pdev->subsystem_device;
541ae68f
K
3396 if (((subid & 0xFF) >= 0x07) &&
3397 (sp->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
3398 val64 = readq(&bar0->gpio_control);
3399 val64 |= 0x0000800000000000ULL;
3400 writeq(val64, &bar0->gpio_control);
3401 val64 = 0x0411040400000000ULL;
509a2671 3402 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
3403 }
3404
541ae68f
K
3405 /*
3406 * Clear spurious ECC interrupts that would have occured on
3407 * XFRAME II cards after reset.
3408 */
3409 if (sp->device_type == XFRAME_II_DEVICE) {
3410 val64 = readq(&bar0->pcc_err_reg);
3411 writeq(val64, &bar0->pcc_err_reg);
3412 }
3413
d8d70caf
SS
3414 /* restore the previously assigned mac address */
3415 s2io_set_mac_addr(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3416
1da177e4
LT
3417 sp->device_enabled_once = FALSE;
3418}
3419
3420/**
20346722
K
3421 * s2io_set_swapper - to set the swapper controle on the card
3422 * @sp : private member of the device structure,
1da177e4 3423 * pointer to the s2io_nic structure.
20346722 3424 * Description: Function to set the swapper control on the card
1da177e4
LT
3425 * correctly depending on the 'endianness' of the system.
3426 * Return value:
3427 * SUCCESS on success and FAILURE on failure.
3428 */
3429
1ee6dd77 3430static int s2io_set_swapper(struct s2io_nic * sp)
1da177e4
LT
3431{
3432 struct net_device *dev = sp->dev;
1ee6dd77 3433 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
3434 u64 val64, valt, valr;
3435
20346722 3436 /*
1da177e4
LT
3437 * Set proper endian settings and verify the same by reading
3438 * the PIF Feed-back register.
3439 */
3440
3441 val64 = readq(&bar0->pif_rd_swapper_fb);
3442 if (val64 != 0x0123456789ABCDEFULL) {
3443 int i = 0;
3444 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3445 0x8100008181000081ULL, /* FE=1, SE=0 */
3446 0x4200004242000042ULL, /* FE=0, SE=1 */
3447 0}; /* FE=0, SE=0 */
3448
3449 while(i<4) {
3450 writeq(value[i], &bar0->swapper_ctrl);
3451 val64 = readq(&bar0->pif_rd_swapper_fb);
3452 if (val64 == 0x0123456789ABCDEFULL)
3453 break;
3454 i++;
3455 }
3456 if (i == 4) {
3457 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3458 dev->name);
3459 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3460 (unsigned long long) val64);
3461 return FAILURE;
3462 }
3463 valr = value[i];
3464 } else {
3465 valr = readq(&bar0->swapper_ctrl);
3466 }
3467
3468 valt = 0x0123456789ABCDEFULL;
3469 writeq(valt, &bar0->xmsi_address);
3470 val64 = readq(&bar0->xmsi_address);
3471
3472 if(val64 != valt) {
3473 int i = 0;
3474 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3475 0x0081810000818100ULL, /* FE=1, SE=0 */
3476 0x0042420000424200ULL, /* FE=0, SE=1 */
3477 0}; /* FE=0, SE=0 */
3478
3479 while(i<4) {
3480 writeq((value[i] | valr), &bar0->swapper_ctrl);
3481 writeq(valt, &bar0->xmsi_address);
3482 val64 = readq(&bar0->xmsi_address);
3483 if(val64 == valt)
3484 break;
3485 i++;
3486 }
3487 if(i == 4) {
20346722 3488 unsigned long long x = val64;
1da177e4 3489 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
20346722 3490 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
1da177e4
LT
3491 return FAILURE;
3492 }
3493 }
3494 val64 = readq(&bar0->swapper_ctrl);
3495 val64 &= 0xFFFF000000000000ULL;
3496
3497#ifdef __BIG_ENDIAN
20346722
K
3498 /*
3499 * The device by default set to a big endian format, so a
1da177e4
LT
3500 * big endian driver need not set anything.
3501 */
3502 val64 |= (SWAPPER_CTRL_TXP_FE |
3503 SWAPPER_CTRL_TXP_SE |
3504 SWAPPER_CTRL_TXD_R_FE |
3505 SWAPPER_CTRL_TXD_W_FE |
3506 SWAPPER_CTRL_TXF_R_FE |
3507 SWAPPER_CTRL_RXD_R_FE |
3508 SWAPPER_CTRL_RXD_W_FE |
3509 SWAPPER_CTRL_RXF_W_FE |
3510 SWAPPER_CTRL_XMSI_FE |
1da177e4 3511 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
92383340 3512 if (sp->intr_type == INTA)
cc6e7c44 3513 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3514 writeq(val64, &bar0->swapper_ctrl);
3515#else
20346722 3516 /*
1da177e4 3517 * Initially we enable all bits to make it accessible by the
20346722 3518 * driver, then we selectively enable only those bits that
1da177e4
LT
3519 * we want to set.
3520 */
3521 val64 |= (SWAPPER_CTRL_TXP_FE |
3522 SWAPPER_CTRL_TXP_SE |
3523 SWAPPER_CTRL_TXD_R_FE |
3524 SWAPPER_CTRL_TXD_R_SE |
3525 SWAPPER_CTRL_TXD_W_FE |
3526 SWAPPER_CTRL_TXD_W_SE |
3527 SWAPPER_CTRL_TXF_R_FE |
3528 SWAPPER_CTRL_RXD_R_FE |
3529 SWAPPER_CTRL_RXD_R_SE |
3530 SWAPPER_CTRL_RXD_W_FE |
3531 SWAPPER_CTRL_RXD_W_SE |
3532 SWAPPER_CTRL_RXF_W_FE |
3533 SWAPPER_CTRL_XMSI_FE |
1da177e4 3534 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
cc6e7c44
RA
3535 if (sp->intr_type == INTA)
3536 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3537 writeq(val64, &bar0->swapper_ctrl);
3538#endif
3539 val64 = readq(&bar0->swapper_ctrl);
3540
20346722
K
3541 /*
3542 * Verifying if endian settings are accurate by reading a
1da177e4
LT
3543 * feedback register.
3544 */
3545 val64 = readq(&bar0->pif_rd_swapper_fb);
3546 if (val64 != 0x0123456789ABCDEFULL) {
3547 /* Endian settings are incorrect, calls for another dekko. */
3548 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3549 dev->name);
3550 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3551 (unsigned long long) val64);
3552 return FAILURE;
3553 }
3554
3555 return SUCCESS;
3556}
3557
1ee6dd77 3558static int wait_for_msix_trans(struct s2io_nic *nic, int i)
cc6e7c44 3559{
1ee6dd77 3560 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3561 u64 val64;
3562 int ret = 0, cnt = 0;
3563
3564 do {
3565 val64 = readq(&bar0->xmsi_access);
3566 if (!(val64 & BIT(15)))
3567 break;
3568 mdelay(1);
3569 cnt++;
3570 } while(cnt < 5);
3571 if (cnt == 5) {
3572 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3573 ret = 1;
3574 }
3575
3576 return ret;
3577}
3578
1ee6dd77 3579static void restore_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3580{
1ee6dd77 3581 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3582 u64 val64;
3583 int i;
3584
75c30b13 3585 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
cc6e7c44
RA
3586 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3587 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3588 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3589 writeq(val64, &bar0->xmsi_access);
3590 if (wait_for_msix_trans(nic, i)) {
3591 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3592 continue;
3593 }
3594 }
3595}
3596
1ee6dd77 3597static void store_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3598{
1ee6dd77 3599 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3600 u64 val64, addr, data;
3601 int i;
3602
3603 /* Store and display */
75c30b13 3604 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
cc6e7c44
RA
3605 val64 = (BIT(15) | vBIT(i, 26, 6));
3606 writeq(val64, &bar0->xmsi_access);
3607 if (wait_for_msix_trans(nic, i)) {
3608 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3609 continue;
3610 }
3611 addr = readq(&bar0->xmsi_address);
3612 data = readq(&bar0->xmsi_data);
3613 if (addr && data) {
3614 nic->msix_info[i].addr = addr;
3615 nic->msix_info[i].data = data;
3616 }
3617 }
3618}
3619
1ee6dd77 3620int s2io_enable_msi(struct s2io_nic *nic)
cc6e7c44 3621{
1ee6dd77 3622 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3623 u16 msi_ctrl, msg_val;
3624 struct config_param *config = &nic->config;
3625 struct net_device *dev = nic->dev;
3626 u64 val64, tx_mat, rx_mat;
3627 int i, err;
3628
3629 val64 = readq(&bar0->pic_control);
3630 val64 &= ~BIT(1);
3631 writeq(val64, &bar0->pic_control);
3632
3633 err = pci_enable_msi(nic->pdev);
3634 if (err) {
3635 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3636 nic->dev->name);
3637 return err;
3638 }
3639
3640 /*
3641 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3642 * for interrupt handling.
3643 */
3644 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3645 msg_val ^= 0x1;
3646 pci_write_config_word(nic->pdev, 0x4c, msg_val);
3647 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3648
3649 pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3650 msi_ctrl |= 0x10;
3651 pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3652
3653 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3654 tx_mat = readq(&bar0->tx_mat0_n[0]);
3655 for (i=0; i<config->tx_fifo_num; i++) {
3656 tx_mat |= TX_MAT_SET(i, 1);
3657 }
3658 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3659
3660 rx_mat = readq(&bar0->rx_mat);
3661 for (i=0; i<config->rx_ring_num; i++) {
3662 rx_mat |= RX_MAT_SET(i, 1);
3663 }
3664 writeq(rx_mat, &bar0->rx_mat);
3665
3666 dev->irq = nic->pdev->irq;
3667 return 0;
3668}
3669
1ee6dd77 3670static int s2io_enable_msi_x(struct s2io_nic *nic)
cc6e7c44 3671{
1ee6dd77 3672 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3673 u64 tx_mat, rx_mat;
3674 u16 msi_control; /* Temp variable */
3675 int ret, i, j, msix_indx = 1;
3676
3677 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3678 GFP_KERNEL);
3679 if (nic->entries == NULL) {
0c61ed5f 3680 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
c53d4945 3681 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
cc6e7c44
RA
3682 return -ENOMEM;
3683 }
3684 memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3685
3686 nic->s2io_entries =
3687 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3688 GFP_KERNEL);
3689 if (nic->s2io_entries == NULL) {
0c61ed5f 3690 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
c53d4945 3691 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
cc6e7c44
RA
3692 kfree(nic->entries);
3693 return -ENOMEM;
3694 }
3695 memset(nic->s2io_entries, 0,
3696 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3697
3698 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3699 nic->entries[i].entry = i;
3700 nic->s2io_entries[i].entry = i;
3701 nic->s2io_entries[i].arg = NULL;
3702 nic->s2io_entries[i].in_use = 0;
3703 }
3704
3705 tx_mat = readq(&bar0->tx_mat0_n[0]);
3706 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3707 tx_mat |= TX_MAT_SET(i, msix_indx);
3708 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3709 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3710 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3711 }
3712 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3713
3714 if (!nic->config.bimodal) {
3715 rx_mat = readq(&bar0->rx_mat);
3716 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3717 rx_mat |= RX_MAT_SET(j, msix_indx);
3718 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3719 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3720 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3721 }
3722 writeq(rx_mat, &bar0->rx_mat);
3723 } else {
3724 tx_mat = readq(&bar0->tx_mat0_n[7]);
3725 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3726 tx_mat |= TX_MAT_SET(i, msix_indx);
3727 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3728 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3729 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3730 }
3731 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3732 }
3733
c92ca04b 3734 nic->avail_msix_vectors = 0;
cc6e7c44 3735 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
c92ca04b
AR
3736 /* We fail init if error or we get less vectors than min required */
3737 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3738 nic->avail_msix_vectors = ret;
3739 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3740 }
cc6e7c44
RA
3741 if (ret) {
3742 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3743 kfree(nic->entries);
3744 kfree(nic->s2io_entries);
3745 nic->entries = NULL;
3746 nic->s2io_entries = NULL;
c92ca04b 3747 nic->avail_msix_vectors = 0;
cc6e7c44
RA
3748 return -ENOMEM;
3749 }
c92ca04b
AR
3750 if (!nic->avail_msix_vectors)
3751 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
cc6e7c44
RA
3752
3753 /*
3754 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3755 * in the herc NIC. (Temp change, needs to be removed later)
3756 */
3757 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3758 msi_control |= 0x1; /* Enable MSI */
3759 pci_write_config_word(nic->pdev, 0x42, msi_control);
3760
3761 return 0;
3762}
3763
1da177e4
LT
3764/* ********************************************************* *
3765 * Functions defined below concern the OS part of the driver *
3766 * ********************************************************* */
3767
20346722 3768/**
1da177e4
LT
3769 * s2io_open - open entry point of the driver
3770 * @dev : pointer to the device structure.
3771 * Description:
3772 * This function is the open entry point of the driver. It mainly calls a
3773 * function to allocate Rx buffers and inserts them into the buffer
20346722 3774 * descriptors and then enables the Rx part of the NIC.
1da177e4
LT
3775 * Return value:
3776 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3777 * file on failure.
3778 */
3779
ac1f60db 3780static int s2io_open(struct net_device *dev)
1da177e4 3781{
1ee6dd77 3782 struct s2io_nic *sp = dev->priv;
1da177e4
LT
3783 int err = 0;
3784
20346722
K
3785 /*
3786 * Make sure you have link off by default every time
1da177e4
LT
3787 * Nic is initialized
3788 */
3789 netif_carrier_off(dev);
0b1f7ebe 3790 sp->last_link_state = 0;
1da177e4
LT
3791
3792 /* Initialize H/W and enable interrupts */
c92ca04b
AR
3793 err = s2io_card_up(sp);
3794 if (err) {
1da177e4
LT
3795 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3796 dev->name);
e6a8fee2 3797 goto hw_init_failed;
1da177e4
LT
3798 }
3799
3800 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3801 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
e6a8fee2 3802 s2io_card_down(sp);
20346722 3803 err = -ENODEV;
e6a8fee2 3804 goto hw_init_failed;
1da177e4
LT
3805 }
3806
3807 netif_start_queue(dev);
3808 return 0;
20346722 3809
20346722 3810hw_init_failed:
cc6e7c44
RA
3811 if (sp->intr_type == MSI_X) {
3812 if (sp->entries)
3813 kfree(sp->entries);
3814 if (sp->s2io_entries)
3815 kfree(sp->s2io_entries);
3816 }
20346722 3817 return err;
1da177e4
LT
3818}
3819
3820/**
3821 * s2io_close -close entry point of the driver
3822 * @dev : device pointer.
3823 * Description:
3824 * This is the stop entry point of the driver. It needs to undo exactly
3825 * whatever was done by the open entry point,thus it's usually referred to
3826 * as the close function.Among other things this function mainly stops the
3827 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3828 * Return value:
3829 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3830 * file on failure.
3831 */
3832
ac1f60db 3833static int s2io_close(struct net_device *dev)
1da177e4 3834{
1ee6dd77 3835 struct s2io_nic *sp = dev->priv;
cc6e7c44 3836
1da177e4
LT
3837 netif_stop_queue(dev);
3838 /* Reset card, kill tasklet and free Tx and Rx buffers. */
e6a8fee2 3839 s2io_card_down(sp);
cc6e7c44 3840
1da177e4
LT
3841 sp->device_close_flag = TRUE; /* Device is shut down. */
3842 return 0;
3843}
3844
3845/**
3846 * s2io_xmit - Tx entry point of te driver
3847 * @skb : the socket buffer containing the Tx data.
3848 * @dev : device pointer.
3849 * Description :
3850 * This function is the Tx entry point of the driver. S2IO NIC supports
3851 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3852 * NOTE: when device cant queue the pkt,just the trans_start variable will
3853 * not be upadted.
3854 * Return value:
3855 * 0 on success & 1 on failure.
3856 */
3857
ac1f60db 3858static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 3859{
1ee6dd77 3860 struct s2io_nic *sp = dev->priv;
1da177e4
LT
3861 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3862 register u64 val64;
1ee6dd77
RB
3863 struct TxD *txdp;
3864 struct TxFIFO_element __iomem *tx_fifo;
1da177e4 3865 unsigned long flags;
be3a6b02
K
3866 u16 vlan_tag = 0;
3867 int vlan_priority = 0;
1ee6dd77 3868 struct mac_info *mac_control;
1da177e4 3869 struct config_param *config;
75c30b13 3870 int offload_type;
1da177e4
LT
3871
3872 mac_control = &sp->mac_control;
3873 config = &sp->config;
3874
20346722 3875 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
1da177e4 3876 spin_lock_irqsave(&sp->tx_lock, flags);
1da177e4 3877 if (atomic_read(&sp->card_state) == CARD_DOWN) {
20346722 3878 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
1da177e4
LT
3879 dev->name);
3880 spin_unlock_irqrestore(&sp->tx_lock, flags);
20346722
K
3881 dev_kfree_skb(skb);
3882 return 0;
1da177e4
LT
3883 }
3884
3885 queue = 0;
1da177e4 3886
be3a6b02
K
3887 /* Get Fifo number to Transmit based on vlan priority */
3888 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3889 vlan_tag = vlan_tx_tag_get(skb);
3890 vlan_priority = vlan_tag >> 13;
3891 queue = config->fifo_mapping[vlan_priority];
3892 }
3893
20346722
K
3894 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3895 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
1ee6dd77 3896 txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
20346722
K
3897 list_virt_addr;
3898
3899 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
1da177e4 3900 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9
AR
3901 if (txdp->Host_Control ||
3902 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
776bd20f 3903 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
1da177e4
LT
3904 netif_stop_queue(dev);
3905 dev_kfree_skb(skb);
3906 spin_unlock_irqrestore(&sp->tx_lock, flags);
3907 return 0;
3908 }
0b1f7ebe
K
3909
3910 /* A buffer with no data will be dropped */
3911 if (!skb->len) {
3912 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3913 dev_kfree_skb(skb);
3914 spin_unlock_irqrestore(&sp->tx_lock, flags);
3915 return 0;
3916 }
3917
75c30b13 3918 offload_type = s2io_offload_type(skb);
75c30b13 3919 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1da177e4 3920 txdp->Control_1 |= TXD_TCP_LSO_EN;
75c30b13 3921 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
1da177e4 3922 }
84fa7933 3923 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1da177e4
LT
3924 txdp->Control_2 |=
3925 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3926 TXD_TX_CKO_UDP_EN);
3927 }
fed5eccd
AR
3928 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
3929 txdp->Control_1 |= TXD_LIST_OWN_XENA;
1da177e4 3930 txdp->Control_2 |= config->tx_intr_type;
d8892c6e 3931
be3a6b02
K
3932 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3933 txdp->Control_2 |= TXD_VLAN_ENABLE;
3934 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3935 }
3936
fed5eccd 3937 frg_len = skb->len - skb->data_len;
75c30b13 3938 if (offload_type == SKB_GSO_UDP) {
fed5eccd
AR
3939 int ufo_size;
3940
75c30b13 3941 ufo_size = s2io_udp_mss(skb);
fed5eccd
AR
3942 ufo_size &= ~7;
3943 txdp->Control_1 |= TXD_UFO_EN;
3944 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
3945 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
3946#ifdef __BIG_ENDIAN
3947 sp->ufo_in_band_v[put_off] =
3948 (u64)skb_shinfo(skb)->ip6_frag_id;
3949#else
3950 sp->ufo_in_band_v[put_off] =
3951 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
3952#endif
3953 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
3954 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
3955 sp->ufo_in_band_v,
3956 sizeof(u64), PCI_DMA_TODEVICE);
3957 txdp++;
fed5eccd 3958 }
1da177e4 3959
fed5eccd
AR
3960 txdp->Buffer_Pointer = pci_map_single
3961 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3962 txdp->Host_Control = (unsigned long) skb;
3963 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
75c30b13 3964 if (offload_type == SKB_GSO_UDP)
fed5eccd
AR
3965 txdp->Control_1 |= TXD_UFO_EN;
3966
3967 frg_cnt = skb_shinfo(skb)->nr_frags;
1da177e4
LT
3968 /* For fragmented SKB. */
3969 for (i = 0; i < frg_cnt; i++) {
3970 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0b1f7ebe
K
3971 /* A '0' length fragment will be ignored */
3972 if (!frag->size)
3973 continue;
1da177e4
LT
3974 txdp++;
3975 txdp->Buffer_Pointer = (u64) pci_map_page
3976 (sp->pdev, frag->page, frag->page_offset,
3977 frag->size, PCI_DMA_TODEVICE);
efd51b5c 3978 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
75c30b13 3979 if (offload_type == SKB_GSO_UDP)
fed5eccd 3980 txdp->Control_1 |= TXD_UFO_EN;
1da177e4
LT
3981 }
3982 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3983
75c30b13 3984 if (offload_type == SKB_GSO_UDP)
fed5eccd
AR
3985 frg_cnt++; /* as Txd0 was used for inband header */
3986
1da177e4 3987 tx_fifo = mac_control->tx_FIFO_start[queue];
20346722 3988 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
1da177e4
LT
3989 writeq(val64, &tx_fifo->TxDL_Pointer);
3990
3991 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3992 TX_FIFO_LAST_LIST);
75c30b13 3993 if (offload_type)
fed5eccd 3994 val64 |= TX_FIFO_SPECIAL_FUNC;
75c30b13 3995
1da177e4
LT
3996 writeq(val64, &tx_fifo->List_Control);
3997
303bcb4b
K
3998 mmiowb();
3999
1da177e4 4000 put_off++;
863c11a9
AR
4001 if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
4002 put_off = 0;
20346722 4003 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
1da177e4
LT
4004
4005 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9 4006 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
bd1034f0 4007 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
1da177e4
LT
4008 DBG_PRINT(TX_DBG,
4009 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4010 put_off, get_off);
4011 netif_stop_queue(dev);
4012 }
4013
4014 dev->trans_start = jiffies;
4015 spin_unlock_irqrestore(&sp->tx_lock, flags);
4016
4017 return 0;
4018}
4019
25fff88e
K
4020static void
4021s2io_alarm_handle(unsigned long data)
4022{
1ee6dd77 4023 struct s2io_nic *sp = (struct s2io_nic *)data;
25fff88e
K
4024
4025 alarm_intr_handler(sp);
4026 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4027}
4028
1ee6dd77 4029static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
75c30b13
AR
4030{
4031 int rxb_size, level;
4032
4033 if (!sp->lro) {
4034 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4035 level = rx_buffer_level(sp, rxb_size, rng_n);
4036
4037 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4038 int ret;
4039 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4040 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4041 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
0c61ed5f 4042 DBG_PRINT(INFO_DBG, "Out of memory in %s",
75c30b13
AR
4043 __FUNCTION__);
4044 clear_bit(0, (&sp->tasklet_status));
4045 return -1;
4046 }
4047 clear_bit(0, (&sp->tasklet_status));
4048 } else if (level == LOW)
4049 tasklet_schedule(&sp->task);
4050
4051 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
0c61ed5f
RV
4052 DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4053 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
75c30b13
AR
4054 }
4055 return 0;
4056}
4057
7d12e780 4058static irqreturn_t s2io_msi_handle(int irq, void *dev_id)
cc6e7c44
RA
4059{
4060 struct net_device *dev = (struct net_device *) dev_id;
1ee6dd77 4061 struct s2io_nic *sp = dev->priv;
cc6e7c44 4062 int i;
1ee6dd77 4063 struct mac_info *mac_control;
cc6e7c44
RA
4064 struct config_param *config;
4065
4066 atomic_inc(&sp->isr_cnt);
4067 mac_control = &sp->mac_control;
4068 config = &sp->config;
4069 DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
4070
4071 /* If Intr is because of Rx Traffic */
4072 for (i = 0; i < config->rx_ring_num; i++)
4073 rx_intr_handler(&mac_control->rings[i]);
4074
4075 /* If Intr is because of Tx Traffic */
4076 for (i = 0; i < config->tx_fifo_num; i++)
4077 tx_intr_handler(&mac_control->fifos[i]);
4078
4079 /*
4080 * If the Rx buffer count is below the panic threshold then
4081 * reallocate the buffers from the interrupt handler itself,
4082 * else schedule a tasklet to reallocate the buffers.
4083 */
75c30b13
AR
4084 for (i = 0; i < config->rx_ring_num; i++)
4085 s2io_chk_rx_buffers(sp, i);
cc6e7c44
RA
4086
4087 atomic_dec(&sp->isr_cnt);
4088 return IRQ_HANDLED;
4089}
4090
7d12e780 4091static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
cc6e7c44 4092{
1ee6dd77
RB
4093 struct ring_info *ring = (struct ring_info *)dev_id;
4094 struct s2io_nic *sp = ring->nic;
cc6e7c44
RA
4095
4096 atomic_inc(&sp->isr_cnt);
cc6e7c44 4097
75c30b13
AR
4098 rx_intr_handler(ring);
4099 s2io_chk_rx_buffers(sp, ring->ring_no);
7d3d0439 4100
cc6e7c44 4101 atomic_dec(&sp->isr_cnt);
cc6e7c44
RA
4102 return IRQ_HANDLED;
4103}
4104
7d12e780 4105static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
cc6e7c44 4106{
1ee6dd77
RB
4107 struct fifo_info *fifo = (struct fifo_info *)dev_id;
4108 struct s2io_nic *sp = fifo->nic;
cc6e7c44
RA
4109
4110 atomic_inc(&sp->isr_cnt);
4111 tx_intr_handler(fifo);
4112 atomic_dec(&sp->isr_cnt);
4113 return IRQ_HANDLED;
4114}
1ee6dd77 4115static void s2io_txpic_intr_handle(struct s2io_nic *sp)
a371a07d 4116{
1ee6dd77 4117 struct XENA_dev_config __iomem *bar0 = sp->bar0;
a371a07d
K
4118 u64 val64;
4119
4120 val64 = readq(&bar0->pic_int_status);
4121 if (val64 & PIC_INT_GPIO) {
4122 val64 = readq(&bar0->gpio_int_reg);
4123 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4124 (val64 & GPIO_INT_REG_LINK_UP)) {
c92ca04b
AR
4125 /*
4126 * This is unstable state so clear both up/down
4127 * interrupt and adapter to re-evaluate the link state.
4128 */
a371a07d
K
4129 val64 |= GPIO_INT_REG_LINK_DOWN;
4130 val64 |= GPIO_INT_REG_LINK_UP;
4131 writeq(val64, &bar0->gpio_int_reg);
a371a07d 4132 val64 = readq(&bar0->gpio_int_mask);
c92ca04b
AR
4133 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4134 GPIO_INT_MASK_LINK_DOWN);
a371a07d 4135 writeq(val64, &bar0->gpio_int_mask);
a371a07d 4136 }
c92ca04b
AR
4137 else if (val64 & GPIO_INT_REG_LINK_UP) {
4138 val64 = readq(&bar0->adapter_status);
c92ca04b 4139 /* Enable Adapter */
19a60522
SS
4140 val64 = readq(&bar0->adapter_control);
4141 val64 |= ADAPTER_CNTL_EN;
4142 writeq(val64, &bar0->adapter_control);
4143 val64 |= ADAPTER_LED_ON;
4144 writeq(val64, &bar0->adapter_control);
4145 if (!sp->device_enabled_once)
4146 sp->device_enabled_once = 1;
c92ca04b 4147
19a60522
SS
4148 s2io_link(sp, LINK_UP);
4149 /*
4150 * unmask link down interrupt and mask link-up
4151 * intr
4152 */
4153 val64 = readq(&bar0->gpio_int_mask);
4154 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4155 val64 |= GPIO_INT_MASK_LINK_UP;
4156 writeq(val64, &bar0->gpio_int_mask);
c92ca04b 4157
c92ca04b
AR
4158 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4159 val64 = readq(&bar0->adapter_status);
19a60522
SS
4160 s2io_link(sp, LINK_DOWN);
4161 /* Link is down so unmaks link up interrupt */
4162 val64 = readq(&bar0->gpio_int_mask);
4163 val64 &= ~GPIO_INT_MASK_LINK_UP;
4164 val64 |= GPIO_INT_MASK_LINK_DOWN;
4165 writeq(val64, &bar0->gpio_int_mask);
ac1f90d6
SS
4166
4167 /* turn off LED */
4168 val64 = readq(&bar0->adapter_control);
4169 val64 = val64 &(~ADAPTER_LED_ON);
4170 writeq(val64, &bar0->adapter_control);
a371a07d
K
4171 }
4172 }
c92ca04b 4173 val64 = readq(&bar0->gpio_int_mask);
a371a07d
K
4174}
4175
1da177e4
LT
4176/**
4177 * s2io_isr - ISR handler of the device .
4178 * @irq: the irq of the device.
4179 * @dev_id: a void pointer to the dev structure of the NIC.
20346722
K
4180 * Description: This function is the ISR handler of the device. It
4181 * identifies the reason for the interrupt and calls the relevant
4182 * service routines. As a contongency measure, this ISR allocates the
1da177e4
LT
4183 * recv buffers, if their numbers are below the panic value which is
4184 * presently set to 25% of the original number of rcv buffers allocated.
4185 * Return value:
20346722 4186 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
1da177e4
LT
4187 * IRQ_NONE: will be returned if interrupt is not from our device
4188 */
7d12e780 4189static irqreturn_t s2io_isr(int irq, void *dev_id)
1da177e4
LT
4190{
4191 struct net_device *dev = (struct net_device *) dev_id;
1ee6dd77
RB
4192 struct s2io_nic *sp = dev->priv;
4193 struct XENA_dev_config __iomem *bar0 = sp->bar0;
20346722 4194 int i;
19a60522 4195 u64 reason = 0;
1ee6dd77 4196 struct mac_info *mac_control;
1da177e4
LT
4197 struct config_param *config;
4198
7ba013ac 4199 atomic_inc(&sp->isr_cnt);
1da177e4
LT
4200 mac_control = &sp->mac_control;
4201 config = &sp->config;
4202
20346722 4203 /*
1da177e4
LT
4204 * Identify the cause for interrupt and call the appropriate
4205 * interrupt handler. Causes for the interrupt could be;
4206 * 1. Rx of packet.
4207 * 2. Tx complete.
4208 * 3. Link down.
20346722 4209 * 4. Error in any functional blocks of the NIC.
1da177e4
LT
4210 */
4211 reason = readq(&bar0->general_int_status);
4212
4213 if (!reason) {
19a60522
SS
4214 /* The interrupt was not raised by us. */
4215 atomic_dec(&sp->isr_cnt);
4216 return IRQ_NONE;
4217 }
4218 else if (unlikely(reason == S2IO_MINUS_ONE) ) {
4219 /* Disable device and get out */
7ba013ac 4220 atomic_dec(&sp->isr_cnt);
1da177e4
LT
4221 return IRQ_NONE;
4222 }
5d3213cc 4223
db874e65
SS
4224 if (napi) {
4225 if (reason & GEN_INTR_RXTRAFFIC) {
19a60522 4226 if ( likely ( netif_rx_schedule_prep(dev)) ) {
db874e65 4227 __netif_rx_schedule(dev);
19a60522 4228 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
db874e65 4229 }
19a60522
SS
4230 else
4231 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
db874e65
SS
4232 }
4233 } else {
4234 /*
4235 * Rx handler is called by default, without checking for the
4236 * cause of interrupt.
4237 * rx_traffic_int reg is an R1 register, writing all 1's
4238 * will ensure that the actual interrupt causing bit get's
4239 * cleared and hence a read can be avoided.
4240 */
19a60522
SS
4241 if (reason & GEN_INTR_RXTRAFFIC)
4242 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4243
db874e65
SS
4244 for (i = 0; i < config->rx_ring_num; i++) {
4245 rx_intr_handler(&mac_control->rings[i]);
1da177e4
LT
4246 }
4247 }
1da177e4 4248
863c11a9
AR
4249 /*
4250 * tx_traffic_int reg is an R1 register, writing all 1's
4251 * will ensure that the actual interrupt causing bit get's
4252 * cleared and hence a read can be avoided.
4253 */
19a60522
SS
4254 if (reason & GEN_INTR_TXTRAFFIC)
4255 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
fe113638 4256
863c11a9
AR
4257 for (i = 0; i < config->tx_fifo_num; i++)
4258 tx_intr_handler(&mac_control->fifos[i]);
20346722 4259
a371a07d
K
4260 if (reason & GEN_INTR_TXPIC)
4261 s2io_txpic_intr_handle(sp);
20346722
K
4262 /*
4263 * If the Rx buffer count is below the panic threshold then
4264 * reallocate the buffers from the interrupt handler itself,
1da177e4
LT
4265 * else schedule a tasklet to reallocate the buffers.
4266 */
db874e65
SS
4267 if (!napi) {
4268 for (i = 0; i < config->rx_ring_num; i++)
4269 s2io_chk_rx_buffers(sp, i);
4270 }
4271
4272 writeq(0, &bar0->general_int_mask);
4273 readl(&bar0->general_int_status);
4274
7ba013ac 4275 atomic_dec(&sp->isr_cnt);
1da177e4
LT
4276 return IRQ_HANDLED;
4277}
4278
7ba013ac
K
4279/**
4280 * s2io_updt_stats -
4281 */
1ee6dd77 4282static void s2io_updt_stats(struct s2io_nic *sp)
7ba013ac 4283{
1ee6dd77 4284 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7ba013ac
K
4285 u64 val64;
4286 int cnt = 0;
4287
4288 if (atomic_read(&sp->card_state) == CARD_UP) {
4289 /* Apprx 30us on a 133 MHz bus */
4290 val64 = SET_UPDT_CLICKS(10) |
4291 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4292 writeq(val64, &bar0->stat_cfg);
4293 do {
4294 udelay(100);
4295 val64 = readq(&bar0->stat_cfg);
4296 if (!(val64 & BIT(0)))
4297 break;
4298 cnt++;
4299 if (cnt == 5)
4300 break; /* Updt failed */
4301 } while(1);
363dc367 4302 }
7ba013ac
K
4303}
4304
1da177e4 4305/**
20346722 4306 * s2io_get_stats - Updates the device statistics structure.
1da177e4
LT
4307 * @dev : pointer to the device structure.
4308 * Description:
20346722 4309 * This function updates the device statistics structure in the s2io_nic
1da177e4
LT
4310 * structure and returns a pointer to the same.
4311 * Return value:
4312 * pointer to the updated net_device_stats structure.
4313 */
4314
ac1f60db 4315static struct net_device_stats *s2io_get_stats(struct net_device *dev)
1da177e4 4316{
1ee6dd77
RB
4317 struct s2io_nic *sp = dev->priv;
4318 struct mac_info *mac_control;
1da177e4
LT
4319 struct config_param *config;
4320
20346722 4321
1da177e4
LT
4322 mac_control = &sp->mac_control;
4323 config = &sp->config;
4324
7ba013ac
K
4325 /* Configure Stats for immediate updt */
4326 s2io_updt_stats(sp);
4327
4328 sp->stats.tx_packets =
4329 le32_to_cpu(mac_control->stats_info->tmac_frms);
20346722
K
4330 sp->stats.tx_errors =
4331 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4332 sp->stats.rx_errors =
ee705dba 4333 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
20346722
K
4334 sp->stats.multicast =
4335 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
1da177e4 4336 sp->stats.rx_length_errors =
ee705dba 4337 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
1da177e4
LT
4338
4339 return (&sp->stats);
4340}
4341
4342/**
4343 * s2io_set_multicast - entry point for multicast address enable/disable.
4344 * @dev : pointer to the device structure
4345 * Description:
20346722
K
4346 * This function is a driver entry point which gets called by the kernel
4347 * whenever multicast addresses must be enabled/disabled. This also gets
1da177e4
LT
4348 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4349 * determine, if multicast address must be enabled or if promiscuous mode
4350 * is to be disabled etc.
4351 * Return value:
4352 * void.
4353 */
4354
4355static void s2io_set_multicast(struct net_device *dev)
4356{
4357 int i, j, prev_cnt;
4358 struct dev_mc_list *mclist;
1ee6dd77
RB
4359 struct s2io_nic *sp = dev->priv;
4360 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4361 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4362 0xfeffffffffffULL;
4363 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4364 void __iomem *add;
4365
4366 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4367 /* Enable all Multicast addresses */
4368 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4369 &bar0->rmac_addr_data0_mem);
4370 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4371 &bar0->rmac_addr_data1_mem);
4372 val64 = RMAC_ADDR_CMD_MEM_WE |
4373 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4374 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4375 writeq(val64, &bar0->rmac_addr_cmd_mem);
4376 /* Wait till command completes */
c92ca04b 4377 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
4378 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4379 S2IO_BIT_RESET);
1da177e4
LT
4380
4381 sp->m_cast_flg = 1;
4382 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4383 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4384 /* Disable all Multicast addresses */
4385 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4386 &bar0->rmac_addr_data0_mem);
5e25b9dd
K
4387 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4388 &bar0->rmac_addr_data1_mem);
1da177e4
LT
4389 val64 = RMAC_ADDR_CMD_MEM_WE |
4390 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4391 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4392 writeq(val64, &bar0->rmac_addr_cmd_mem);
4393 /* Wait till command completes */
c92ca04b 4394 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
4395 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4396 S2IO_BIT_RESET);
1da177e4
LT
4397
4398 sp->m_cast_flg = 0;
4399 sp->all_multi_pos = 0;
4400 }
4401
4402 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4403 /* Put the NIC into promiscuous mode */
4404 add = &bar0->mac_cfg;
4405 val64 = readq(&bar0->mac_cfg);
4406 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4407
4408 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4409 writel((u32) val64, add);
4410 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4411 writel((u32) (val64 >> 32), (add + 4));
4412
926930b2
SS
4413 if (vlan_tag_strip != 1) {
4414 val64 = readq(&bar0->rx_pa_cfg);
4415 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4416 writeq(val64, &bar0->rx_pa_cfg);
4417 vlan_strip_flag = 0;
4418 }
4419
1da177e4
LT
4420 val64 = readq(&bar0->mac_cfg);
4421 sp->promisc_flg = 1;
776bd20f 4422 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
1da177e4
LT
4423 dev->name);
4424 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4425 /* Remove the NIC from promiscuous mode */
4426 add = &bar0->mac_cfg;
4427 val64 = readq(&bar0->mac_cfg);
4428 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4429
4430 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4431 writel((u32) val64, add);
4432 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4433 writel((u32) (val64 >> 32), (add + 4));
4434
926930b2
SS
4435 if (vlan_tag_strip != 0) {
4436 val64 = readq(&bar0->rx_pa_cfg);
4437 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4438 writeq(val64, &bar0->rx_pa_cfg);
4439 vlan_strip_flag = 1;
4440 }
4441
1da177e4
LT
4442 val64 = readq(&bar0->mac_cfg);
4443 sp->promisc_flg = 0;
776bd20f 4444 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
1da177e4
LT
4445 dev->name);
4446 }
4447
4448 /* Update individual M_CAST address list */
4449 if ((!sp->m_cast_flg) && dev->mc_count) {
4450 if (dev->mc_count >
4451 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4452 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4453 dev->name);
4454 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4455 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4456 return;
4457 }
4458
4459 prev_cnt = sp->mc_addr_count;
4460 sp->mc_addr_count = dev->mc_count;
4461
4462 /* Clear out the previous list of Mc in the H/W. */
4463 for (i = 0; i < prev_cnt; i++) {
4464 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4465 &bar0->rmac_addr_data0_mem);
4466 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 4467 &bar0->rmac_addr_data1_mem);
1da177e4
LT
4468 val64 = RMAC_ADDR_CMD_MEM_WE |
4469 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4470 RMAC_ADDR_CMD_MEM_OFFSET
4471 (MAC_MC_ADDR_START_OFFSET + i);
4472 writeq(val64, &bar0->rmac_addr_cmd_mem);
4473
4474 /* Wait for command completes */
c92ca04b 4475 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
4476 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4477 S2IO_BIT_RESET)) {
1da177e4
LT
4478 DBG_PRINT(ERR_DBG, "%s: Adding ",
4479 dev->name);
4480 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4481 return;
4482 }
4483 }
4484
4485 /* Create the new Rx filter list and update the same in H/W. */
4486 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4487 i++, mclist = mclist->next) {
4488 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4489 ETH_ALEN);
a7a80d5a 4490 mac_addr = 0;
1da177e4
LT
4491 for (j = 0; j < ETH_ALEN; j++) {
4492 mac_addr |= mclist->dmi_addr[j];
4493 mac_addr <<= 8;
4494 }
4495 mac_addr >>= 8;
4496 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4497 &bar0->rmac_addr_data0_mem);
4498 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 4499 &bar0->rmac_addr_data1_mem);
1da177e4
LT
4500 val64 = RMAC_ADDR_CMD_MEM_WE |
4501 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4502 RMAC_ADDR_CMD_MEM_OFFSET
4503 (i + MAC_MC_ADDR_START_OFFSET);
4504 writeq(val64, &bar0->rmac_addr_cmd_mem);
4505
4506 /* Wait for command completes */
c92ca04b 4507 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
4508 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4509 S2IO_BIT_RESET)) {
1da177e4
LT
4510 DBG_PRINT(ERR_DBG, "%s: Adding ",
4511 dev->name);
4512 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4513 return;
4514 }
4515 }
4516 }
4517}
4518
4519/**
20346722 4520 * s2io_set_mac_addr - Programs the Xframe mac address
1da177e4
LT
4521 * @dev : pointer to the device structure.
4522 * @addr: a uchar pointer to the new mac address which is to be set.
20346722 4523 * Description : This procedure will program the Xframe to receive
1da177e4 4524 * frames with new Mac Address
20346722 4525 * Return value: SUCCESS on success and an appropriate (-)ve integer
1da177e4
LT
4526 * as defined in errno.h file on failure.
4527 */
4528
26df54bf 4529static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
1da177e4 4530{
1ee6dd77
RB
4531 struct s2io_nic *sp = dev->priv;
4532 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4533 register u64 val64, mac_addr = 0;
4534 int i;
d8d70caf 4535 u64 old_mac_addr = 0;
1da177e4 4536
20346722 4537 /*
1da177e4
LT
4538 * Set the new MAC address as the new unicast filter and reflect this
4539 * change on the device address registered with the OS. It will be
20346722 4540 * at offset 0.
1da177e4
LT
4541 */
4542 for (i = 0; i < ETH_ALEN; i++) {
4543 mac_addr <<= 8;
4544 mac_addr |= addr[i];
d8d70caf
SS
4545 old_mac_addr <<= 8;
4546 old_mac_addr |= sp->def_mac_addr[0].mac_addr[i];
4547 }
4548
4549 if(0 == mac_addr)
4550 return SUCCESS;
4551
4552 /* Update the internal structure with this new mac address */
4553 if(mac_addr != old_mac_addr) {
4554 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4555 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_addr);
4556 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_addr >> 8);
4557 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_addr >> 16);
4558 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_addr >> 24);
4559 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_addr >> 32);
4560 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_addr >> 40);
1da177e4
LT
4561 }
4562
4563 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4564 &bar0->rmac_addr_data0_mem);
4565
4566 val64 =
4567 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4568 RMAC_ADDR_CMD_MEM_OFFSET(0);
4569 writeq(val64, &bar0->rmac_addr_cmd_mem);
4570 /* Wait till command completes */
c92ca04b 4571 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41 4572 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
1da177e4
LT
4573 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4574 return FAILURE;
4575 }
4576
4577 return SUCCESS;
4578}
4579
4580/**
20346722 4581 * s2io_ethtool_sset - Sets different link parameters.
1da177e4
LT
4582 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4583 * @info: pointer to the structure with parameters given by ethtool to set
4584 * link information.
4585 * Description:
20346722 4586 * The function sets different link parameters provided by the user onto
1da177e4
LT
4587 * the NIC.
4588 * Return value:
4589 * 0 on success.
4590*/
4591
4592static int s2io_ethtool_sset(struct net_device *dev,
4593 struct ethtool_cmd *info)
4594{
1ee6dd77 4595 struct s2io_nic *sp = dev->priv;
1da177e4
LT
4596 if ((info->autoneg == AUTONEG_ENABLE) ||
4597 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4598 return -EINVAL;
4599 else {
4600 s2io_close(sp->dev);
4601 s2io_open(sp->dev);
4602 }
4603
4604 return 0;
4605}
4606
4607/**
20346722 4608 * s2io_ethtol_gset - Return link specific information.
1da177e4
LT
4609 * @sp : private member of the device structure, pointer to the
4610 * s2io_nic structure.
4611 * @info : pointer to the structure with parameters given by ethtool
4612 * to return link information.
4613 * Description:
4614 * Returns link specific information like speed, duplex etc.. to ethtool.
4615 * Return value :
4616 * return 0 on success.
4617 */
4618
4619static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4620{
1ee6dd77 4621 struct s2io_nic *sp = dev->priv;
1da177e4
LT
4622 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4623 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4624 info->port = PORT_FIBRE;
4625 /* info->transceiver?? TODO */
4626
4627 if (netif_carrier_ok(sp->dev)) {
4628 info->speed = 10000;
4629 info->duplex = DUPLEX_FULL;
4630 } else {
4631 info->speed = -1;
4632 info->duplex = -1;
4633 }
4634
4635 info->autoneg = AUTONEG_DISABLE;
4636 return 0;
4637}
4638
4639/**
20346722
K
4640 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4641 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
4642 * s2io_nic structure.
4643 * @info : pointer to the structure with parameters given by ethtool to
4644 * return driver information.
4645 * Description:
4646 * Returns driver specefic information like name, version etc.. to ethtool.
4647 * Return value:
4648 * void
4649 */
4650
4651static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4652 struct ethtool_drvinfo *info)
4653{
1ee6dd77 4654 struct s2io_nic *sp = dev->priv;
1da177e4 4655
dbc2309d
JL
4656 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4657 strncpy(info->version, s2io_driver_version, sizeof(info->version));
4658 strncpy(info->fw_version, "", sizeof(info->fw_version));
4659 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
1da177e4
LT
4660 info->regdump_len = XENA_REG_SPACE;
4661 info->eedump_len = XENA_EEPROM_SPACE;
4662 info->testinfo_len = S2IO_TEST_LEN;
fa1f0cb3
SS
4663
4664 if (sp->device_type == XFRAME_I_DEVICE)
4665 info->n_stats = XFRAME_I_STAT_LEN;
4666 else
4667 info->n_stats = XFRAME_II_STAT_LEN;
1da177e4
LT
4668}
4669
4670/**
4671 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
20346722 4672 * @sp: private member of the device structure, which is a pointer to the
1da177e4 4673 * s2io_nic structure.
20346722 4674 * @regs : pointer to the structure with parameters given by ethtool for
1da177e4
LT
4675 * dumping the registers.
4676 * @reg_space: The input argumnet into which all the registers are dumped.
4677 * Description:
4678 * Dumps the entire register space of xFrame NIC into the user given
4679 * buffer area.
4680 * Return value :
4681 * void .
4682*/
4683
4684static void s2io_ethtool_gregs(struct net_device *dev,
4685 struct ethtool_regs *regs, void *space)
4686{
4687 int i;
4688 u64 reg;
4689 u8 *reg_space = (u8 *) space;
1ee6dd77 4690 struct s2io_nic *sp = dev->priv;
1da177e4
LT
4691
4692 regs->len = XENA_REG_SPACE;
4693 regs->version = sp->pdev->subsystem_device;
4694
4695 for (i = 0; i < regs->len; i += 8) {
4696 reg = readq(sp->bar0 + i);
4697 memcpy((reg_space + i), &reg, 8);
4698 }
4699}
4700
4701/**
4702 * s2io_phy_id - timer function that alternates adapter LED.
20346722 4703 * @data : address of the private member of the device structure, which
1da177e4 4704 * is a pointer to the s2io_nic structure, provided as an u32.
20346722
K
4705 * Description: This is actually the timer function that alternates the
4706 * adapter LED bit of the adapter control bit to set/reset every time on
4707 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
1da177e4
LT
4708 * once every second.
4709*/
4710static void s2io_phy_id(unsigned long data)
4711{
1ee6dd77
RB
4712 struct s2io_nic *sp = (struct s2io_nic *) data;
4713 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4714 u64 val64 = 0;
4715 u16 subid;
4716
4717 subid = sp->pdev->subsystem_device;
541ae68f
K
4718 if ((sp->device_type == XFRAME_II_DEVICE) ||
4719 ((subid & 0xFF) >= 0x07)) {
1da177e4
LT
4720 val64 = readq(&bar0->gpio_control);
4721 val64 ^= GPIO_CTRL_GPIO_0;
4722 writeq(val64, &bar0->gpio_control);
4723 } else {
4724 val64 = readq(&bar0->adapter_control);
4725 val64 ^= ADAPTER_LED_ON;
4726 writeq(val64, &bar0->adapter_control);
4727 }
4728
4729 mod_timer(&sp->id_timer, jiffies + HZ / 2);
4730}
4731
4732/**
4733 * s2io_ethtool_idnic - To physically identify the nic on the system.
4734 * @sp : private member of the device structure, which is a pointer to the
4735 * s2io_nic structure.
20346722 4736 * @id : pointer to the structure with identification parameters given by
1da177e4
LT
4737 * ethtool.
4738 * Description: Used to physically identify the NIC on the system.
20346722 4739 * The Link LED will blink for a time specified by the user for
1da177e4 4740 * identification.
20346722 4741 * NOTE: The Link has to be Up to be able to blink the LED. Hence
1da177e4
LT
4742 * identification is possible only if it's link is up.
4743 * Return value:
4744 * int , returns 0 on success
4745 */
4746
4747static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4748{
4749 u64 val64 = 0, last_gpio_ctrl_val;
1ee6dd77
RB
4750 struct s2io_nic *sp = dev->priv;
4751 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4752 u16 subid;
4753
4754 subid = sp->pdev->subsystem_device;
4755 last_gpio_ctrl_val = readq(&bar0->gpio_control);
541ae68f
K
4756 if ((sp->device_type == XFRAME_I_DEVICE) &&
4757 ((subid & 0xFF) < 0x07)) {
1da177e4
LT
4758 val64 = readq(&bar0->adapter_control);
4759 if (!(val64 & ADAPTER_CNTL_EN)) {
4760 printk(KERN_ERR
4761 "Adapter Link down, cannot blink LED\n");
4762 return -EFAULT;
4763 }
4764 }
4765 if (sp->id_timer.function == NULL) {
4766 init_timer(&sp->id_timer);
4767 sp->id_timer.function = s2io_phy_id;
4768 sp->id_timer.data = (unsigned long) sp;
4769 }
4770 mod_timer(&sp->id_timer, jiffies);
4771 if (data)
20346722 4772 msleep_interruptible(data * HZ);
1da177e4 4773 else
20346722 4774 msleep_interruptible(MAX_FLICKER_TIME);
1da177e4
LT
4775 del_timer_sync(&sp->id_timer);
4776
541ae68f 4777 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
1da177e4
LT
4778 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4779 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4780 }
4781
4782 return 0;
4783}
4784
0cec35eb
SH
4785static void s2io_ethtool_gringparam(struct net_device *dev,
4786 struct ethtool_ringparam *ering)
4787{
4788 struct s2io_nic *sp = dev->priv;
4789 int i,tx_desc_count=0,rx_desc_count=0;
4790
4791 if (sp->rxd_mode == RXD_MODE_1)
4792 ering->rx_max_pending = MAX_RX_DESC_1;
4793 else if (sp->rxd_mode == RXD_MODE_3B)
4794 ering->rx_max_pending = MAX_RX_DESC_2;
4795 else if (sp->rxd_mode == RXD_MODE_3A)
4796 ering->rx_max_pending = MAX_RX_DESC_3;
4797
4798 ering->tx_max_pending = MAX_TX_DESC;
4799 for (i = 0 ; i < sp->config.tx_fifo_num ; i++) {
4800 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
4801 }
4802 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
4803 ering->tx_pending = tx_desc_count;
4804 rx_desc_count = 0;
4805 for (i = 0 ; i < sp->config.rx_ring_num ; i++) {
4806 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
4807 }
4808 ering->rx_pending = rx_desc_count;
4809
4810 ering->rx_mini_max_pending = 0;
4811 ering->rx_mini_pending = 0;
4812 if(sp->rxd_mode == RXD_MODE_1)
4813 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
4814 else if (sp->rxd_mode == RXD_MODE_3B)
4815 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
4816 ering->rx_jumbo_pending = rx_desc_count;
4817}
4818
1da177e4
LT
4819/**
4820 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
20346722
K
4821 * @sp : private member of the device structure, which is a pointer to the
4822 * s2io_nic structure.
1da177e4
LT
4823 * @ep : pointer to the structure with pause parameters given by ethtool.
4824 * Description:
4825 * Returns the Pause frame generation and reception capability of the NIC.
4826 * Return value:
4827 * void
4828 */
4829static void s2io_ethtool_getpause_data(struct net_device *dev,
4830 struct ethtool_pauseparam *ep)
4831{
4832 u64 val64;
1ee6dd77
RB
4833 struct s2io_nic *sp = dev->priv;
4834 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4835
4836 val64 = readq(&bar0->rmac_pause_cfg);
4837 if (val64 & RMAC_PAUSE_GEN_ENABLE)
4838 ep->tx_pause = TRUE;
4839 if (val64 & RMAC_PAUSE_RX_ENABLE)
4840 ep->rx_pause = TRUE;
4841 ep->autoneg = FALSE;
4842}
4843
4844/**
4845 * s2io_ethtool_setpause_data - set/reset pause frame generation.
20346722 4846 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
4847 * s2io_nic structure.
4848 * @ep : pointer to the structure with pause parameters given by ethtool.
4849 * Description:
4850 * It can be used to set or reset Pause frame generation or reception
4851 * support of the NIC.
4852 * Return value:
4853 * int, returns 0 on Success
4854 */
4855
4856static int s2io_ethtool_setpause_data(struct net_device *dev,
20346722 4857 struct ethtool_pauseparam *ep)
1da177e4
LT
4858{
4859 u64 val64;
1ee6dd77
RB
4860 struct s2io_nic *sp = dev->priv;
4861 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4862
4863 val64 = readq(&bar0->rmac_pause_cfg);
4864 if (ep->tx_pause)
4865 val64 |= RMAC_PAUSE_GEN_ENABLE;
4866 else
4867 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4868 if (ep->rx_pause)
4869 val64 |= RMAC_PAUSE_RX_ENABLE;
4870 else
4871 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4872 writeq(val64, &bar0->rmac_pause_cfg);
4873 return 0;
4874}
4875
4876/**
4877 * read_eeprom - reads 4 bytes of data from user given offset.
20346722 4878 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
4879 * s2io_nic structure.
4880 * @off : offset at which the data must be written
4881 * @data : Its an output parameter where the data read at the given
20346722 4882 * offset is stored.
1da177e4 4883 * Description:
20346722 4884 * Will read 4 bytes of data from the user given offset and return the
1da177e4
LT
4885 * read data.
4886 * NOTE: Will allow to read only part of the EEPROM visible through the
4887 * I2C bus.
4888 * Return value:
4889 * -1 on failure and 0 on success.
4890 */
4891
4892#define S2IO_DEV_ID 5
1ee6dd77 4893static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
1da177e4
LT
4894{
4895 int ret = -1;
4896 u32 exit_cnt = 0;
4897 u64 val64;
1ee6dd77 4898 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 4899
ad4ebed0 4900 if (sp->device_type == XFRAME_I_DEVICE) {
4901 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4902 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
4903 I2C_CONTROL_CNTL_START;
4904 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
1da177e4 4905
ad4ebed0 4906 while (exit_cnt < 5) {
4907 val64 = readq(&bar0->i2c_control);
4908 if (I2C_CONTROL_CNTL_END(val64)) {
4909 *data = I2C_CONTROL_GET_DATA(val64);
4910 ret = 0;
4911 break;
4912 }
4913 msleep(50);
4914 exit_cnt++;
1da177e4 4915 }
1da177e4
LT
4916 }
4917
ad4ebed0 4918 if (sp->device_type == XFRAME_II_DEVICE) {
4919 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 4920 SPI_CONTROL_BYTECNT(0x3) |
ad4ebed0 4921 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
4922 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4923 val64 |= SPI_CONTROL_REQ;
4924 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4925 while (exit_cnt < 5) {
4926 val64 = readq(&bar0->spi_control);
4927 if (val64 & SPI_CONTROL_NACK) {
4928 ret = 1;
4929 break;
4930 } else if (val64 & SPI_CONTROL_DONE) {
4931 *data = readq(&bar0->spi_data);
4932 *data &= 0xffffff;
4933 ret = 0;
4934 break;
4935 }
4936 msleep(50);
4937 exit_cnt++;
4938 }
4939 }
1da177e4
LT
4940 return ret;
4941}
4942
4943/**
4944 * write_eeprom - actually writes the relevant part of the data value.
4945 * @sp : private member of the device structure, which is a pointer to the
4946 * s2io_nic structure.
4947 * @off : offset at which the data must be written
4948 * @data : The data that is to be written
20346722 4949 * @cnt : Number of bytes of the data that are actually to be written into
1da177e4
LT
4950 * the Eeprom. (max of 3)
4951 * Description:
4952 * Actually writes the relevant part of the data value into the Eeprom
4953 * through the I2C bus.
4954 * Return value:
4955 * 0 on success, -1 on failure.
4956 */
4957
1ee6dd77 4958static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
1da177e4
LT
4959{
4960 int exit_cnt = 0, ret = -1;
4961 u64 val64;
1ee6dd77 4962 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 4963
ad4ebed0 4964 if (sp->device_type == XFRAME_I_DEVICE) {
4965 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4966 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
4967 I2C_CONTROL_CNTL_START;
4968 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4969
4970 while (exit_cnt < 5) {
4971 val64 = readq(&bar0->i2c_control);
4972 if (I2C_CONTROL_CNTL_END(val64)) {
4973 if (!(val64 & I2C_CONTROL_NACK))
4974 ret = 0;
4975 break;
4976 }
4977 msleep(50);
4978 exit_cnt++;
4979 }
4980 }
1da177e4 4981
ad4ebed0 4982 if (sp->device_type == XFRAME_II_DEVICE) {
4983 int write_cnt = (cnt == 8) ? 0 : cnt;
4984 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
4985
4986 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 4987 SPI_CONTROL_BYTECNT(write_cnt) |
ad4ebed0 4988 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
4989 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4990 val64 |= SPI_CONTROL_REQ;
4991 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4992 while (exit_cnt < 5) {
4993 val64 = readq(&bar0->spi_control);
4994 if (val64 & SPI_CONTROL_NACK) {
4995 ret = 1;
4996 break;
4997 } else if (val64 & SPI_CONTROL_DONE) {
1da177e4 4998 ret = 0;
ad4ebed0 4999 break;
5000 }
5001 msleep(50);
5002 exit_cnt++;
1da177e4 5003 }
1da177e4 5004 }
1da177e4
LT
5005 return ret;
5006}
1ee6dd77 5007static void s2io_vpd_read(struct s2io_nic *nic)
9dc737a7 5008{
b41477f3
AR
5009 u8 *vpd_data;
5010 u8 data;
9dc737a7
AR
5011 int i=0, cnt, fail = 0;
5012 int vpd_addr = 0x80;
5013
5014 if (nic->device_type == XFRAME_II_DEVICE) {
5015 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5016 vpd_addr = 0x80;
5017 }
5018 else {
5019 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5020 vpd_addr = 0x50;
5021 }
19a60522 5022 strcpy(nic->serial_num, "NOT AVAILABLE");
9dc737a7 5023
b41477f3 5024 vpd_data = kmalloc(256, GFP_KERNEL);
c53d4945
SH
5025 if (!vpd_data) {
5026 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
b41477f3 5027 return;
c53d4945 5028 }
b41477f3 5029
9dc737a7
AR
5030 for (i = 0; i < 256; i +=4 ) {
5031 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5032 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5033 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5034 for (cnt = 0; cnt <5; cnt++) {
5035 msleep(2);
5036 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5037 if (data == 0x80)
5038 break;
5039 }
5040 if (cnt >= 5) {
5041 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5042 fail = 1;
5043 break;
5044 }
5045 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5046 (u32 *)&vpd_data[i]);
5047 }
19a60522
SS
5048
5049 if(!fail) {
5050 /* read serial number of adapter */
5051 for (cnt = 0; cnt < 256; cnt++) {
5052 if ((vpd_data[cnt] == 'S') &&
5053 (vpd_data[cnt+1] == 'N') &&
5054 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5055 memset(nic->serial_num, 0, VPD_STRING_LEN);
5056 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5057 vpd_data[cnt+2]);
5058 break;
5059 }
5060 }
5061 }
5062
5063 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
9dc737a7
AR
5064 memset(nic->product_name, 0, vpd_data[1]);
5065 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5066 }
b41477f3 5067 kfree(vpd_data);
9dc737a7
AR
5068}
5069
1da177e4
LT
5070/**
5071 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5072 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
20346722 5073 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5074 * containing all relevant information.
5075 * @data_buf : user defined value to be written into Eeprom.
5076 * Description: Reads the values stored in the Eeprom at given offset
5077 * for a given length. Stores these values int the input argument data
5078 * buffer 'data_buf' and returns these to the caller (ethtool.)
5079 * Return value:
5080 * int 0 on success
5081 */
5082
5083static int s2io_ethtool_geeprom(struct net_device *dev,
20346722 5084 struct ethtool_eeprom *eeprom, u8 * data_buf)
1da177e4 5085{
ad4ebed0 5086 u32 i, valid;
5087 u64 data;
1ee6dd77 5088 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5089
5090 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5091
5092 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5093 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5094
5095 for (i = 0; i < eeprom->len; i += 4) {
5096 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5097 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5098 return -EFAULT;
5099 }
5100 valid = INV(data);
5101 memcpy((data_buf + i), &valid, 4);
5102 }
5103 return 0;
5104}
5105
5106/**
5107 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5108 * @sp : private member of the device structure, which is a pointer to the
5109 * s2io_nic structure.
20346722 5110 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5111 * containing all relevant information.
5112 * @data_buf ; user defined value to be written into Eeprom.
5113 * Description:
5114 * Tries to write the user provided value in the Eeprom, at the offset
5115 * given by the user.
5116 * Return value:
5117 * 0 on success, -EFAULT on failure.
5118 */
5119
5120static int s2io_ethtool_seeprom(struct net_device *dev,
5121 struct ethtool_eeprom *eeprom,
5122 u8 * data_buf)
5123{
5124 int len = eeprom->len, cnt = 0;
ad4ebed0 5125 u64 valid = 0, data;
1ee6dd77 5126 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5127
5128 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5129 DBG_PRINT(ERR_DBG,
5130 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5131 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5132 eeprom->magic);
5133 return -EFAULT;
5134 }
5135
5136 while (len) {
5137 data = (u32) data_buf[cnt] & 0x000000FF;
5138 if (data) {
5139 valid = (u32) (data << 24);
5140 } else
5141 valid = data;
5142
5143 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5144 DBG_PRINT(ERR_DBG,
5145 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5146 DBG_PRINT(ERR_DBG,
5147 "write into the specified offset\n");
5148 return -EFAULT;
5149 }
5150 cnt++;
5151 len--;
5152 }
5153
5154 return 0;
5155}
5156
5157/**
20346722
K
5158 * s2io_register_test - reads and writes into all clock domains.
5159 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5160 * s2io_nic structure.
5161 * @data : variable that returns the result of each of the test conducted b
5162 * by the driver.
5163 * Description:
5164 * Read and write into all clock domains. The NIC has 3 clock domains,
5165 * see that registers in all the three regions are accessible.
5166 * Return value:
5167 * 0 on success.
5168 */
5169
1ee6dd77 5170static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
1da177e4 5171{
1ee6dd77 5172 struct XENA_dev_config __iomem *bar0 = sp->bar0;
ad4ebed0 5173 u64 val64 = 0, exp_val;
1da177e4
LT
5174 int fail = 0;
5175
20346722
K
5176 val64 = readq(&bar0->pif_rd_swapper_fb);
5177 if (val64 != 0x123456789abcdefULL) {
1da177e4
LT
5178 fail = 1;
5179 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5180 }
5181
5182 val64 = readq(&bar0->rmac_pause_cfg);
5183 if (val64 != 0xc000ffff00000000ULL) {
5184 fail = 1;
5185 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5186 }
5187
5188 val64 = readq(&bar0->rx_queue_cfg);
ad4ebed0 5189 if (sp->device_type == XFRAME_II_DEVICE)
5190 exp_val = 0x0404040404040404ULL;
5191 else
5192 exp_val = 0x0808080808080808ULL;
5193 if (val64 != exp_val) {
1da177e4
LT
5194 fail = 1;
5195 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5196 }
5197
5198 val64 = readq(&bar0->xgxs_efifo_cfg);
5199 if (val64 != 0x000000001923141EULL) {
5200 fail = 1;
5201 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5202 }
5203
5204 val64 = 0x5A5A5A5A5A5A5A5AULL;
5205 writeq(val64, &bar0->xmsi_data);
5206 val64 = readq(&bar0->xmsi_data);
5207 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5208 fail = 1;
5209 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5210 }
5211
5212 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5213 writeq(val64, &bar0->xmsi_data);
5214 val64 = readq(&bar0->xmsi_data);
5215 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5216 fail = 1;
5217 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5218 }
5219
5220 *data = fail;
ad4ebed0 5221 return fail;
1da177e4
LT
5222}
5223
5224/**
20346722 5225 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
1da177e4
LT
5226 * @sp : private member of the device structure, which is a pointer to the
5227 * s2io_nic structure.
5228 * @data:variable that returns the result of each of the test conducted by
5229 * the driver.
5230 * Description:
20346722 5231 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
1da177e4
LT
5232 * register.
5233 * Return value:
5234 * 0 on success.
5235 */
5236
1ee6dd77 5237static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
1da177e4
LT
5238{
5239 int fail = 0;
ad4ebed0 5240 u64 ret_data, org_4F0, org_7F0;
5241 u8 saved_4F0 = 0, saved_7F0 = 0;
5242 struct net_device *dev = sp->dev;
1da177e4
LT
5243
5244 /* Test Write Error at offset 0 */
ad4ebed0 5245 /* Note that SPI interface allows write access to all areas
5246 * of EEPROM. Hence doing all negative testing only for Xframe I.
5247 */
5248 if (sp->device_type == XFRAME_I_DEVICE)
5249 if (!write_eeprom(sp, 0, 0, 3))
5250 fail = 1;
5251
5252 /* Save current values at offsets 0x4F0 and 0x7F0 */
5253 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5254 saved_4F0 = 1;
5255 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5256 saved_7F0 = 1;
1da177e4
LT
5257
5258 /* Test Write at offset 4f0 */
ad4ebed0 5259 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
1da177e4
LT
5260 fail = 1;
5261 if (read_eeprom(sp, 0x4F0, &ret_data))
5262 fail = 1;
5263
ad4ebed0 5264 if (ret_data != 0x012345) {
26b7625c
AM
5265 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5266 "Data written %llx Data read %llx\n",
5267 dev->name, (unsigned long long)0x12345,
5268 (unsigned long long)ret_data);
1da177e4 5269 fail = 1;
ad4ebed0 5270 }
1da177e4
LT
5271
5272 /* Reset the EEPROM data go FFFF */
ad4ebed0 5273 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
1da177e4
LT
5274
5275 /* Test Write Request Error at offset 0x7c */
ad4ebed0 5276 if (sp->device_type == XFRAME_I_DEVICE)
5277 if (!write_eeprom(sp, 0x07C, 0, 3))
5278 fail = 1;
1da177e4 5279
ad4ebed0 5280 /* Test Write Request at offset 0x7f0 */
5281 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
1da177e4 5282 fail = 1;
ad4ebed0 5283 if (read_eeprom(sp, 0x7F0, &ret_data))
1da177e4
LT
5284 fail = 1;
5285
ad4ebed0 5286 if (ret_data != 0x012345) {
26b7625c
AM
5287 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5288 "Data written %llx Data read %llx\n",
5289 dev->name, (unsigned long long)0x12345,
5290 (unsigned long long)ret_data);
1da177e4 5291 fail = 1;
ad4ebed0 5292 }
1da177e4
LT
5293
5294 /* Reset the EEPROM data go FFFF */
ad4ebed0 5295 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
1da177e4 5296
ad4ebed0 5297 if (sp->device_type == XFRAME_I_DEVICE) {
5298 /* Test Write Error at offset 0x80 */
5299 if (!write_eeprom(sp, 0x080, 0, 3))
5300 fail = 1;
1da177e4 5301
ad4ebed0 5302 /* Test Write Error at offset 0xfc */
5303 if (!write_eeprom(sp, 0x0FC, 0, 3))
5304 fail = 1;
1da177e4 5305
ad4ebed0 5306 /* Test Write Error at offset 0x100 */
5307 if (!write_eeprom(sp, 0x100, 0, 3))
5308 fail = 1;
1da177e4 5309
ad4ebed0 5310 /* Test Write Error at offset 4ec */
5311 if (!write_eeprom(sp, 0x4EC, 0, 3))
5312 fail = 1;
5313 }
5314
5315 /* Restore values at offsets 0x4F0 and 0x7F0 */
5316 if (saved_4F0)
5317 write_eeprom(sp, 0x4F0, org_4F0, 3);
5318 if (saved_7F0)
5319 write_eeprom(sp, 0x7F0, org_7F0, 3);
1da177e4
LT
5320
5321 *data = fail;
ad4ebed0 5322 return fail;
1da177e4
LT
5323}
5324
5325/**
5326 * s2io_bist_test - invokes the MemBist test of the card .
20346722 5327 * @sp : private member of the device structure, which is a pointer to the
1da177e4 5328 * s2io_nic structure.
20346722 5329 * @data:variable that returns the result of each of the test conducted by
1da177e4
LT
5330 * the driver.
5331 * Description:
5332 * This invokes the MemBist test of the card. We give around
5333 * 2 secs time for the Test to complete. If it's still not complete
20346722 5334 * within this peiod, we consider that the test failed.
1da177e4
LT
5335 * Return value:
5336 * 0 on success and -1 on failure.
5337 */
5338
1ee6dd77 5339static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
1da177e4
LT
5340{
5341 u8 bist = 0;
5342 int cnt = 0, ret = -1;
5343
5344 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5345 bist |= PCI_BIST_START;
5346 pci_write_config_word(sp->pdev, PCI_BIST, bist);
5347
5348 while (cnt < 20) {
5349 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5350 if (!(bist & PCI_BIST_START)) {
5351 *data = (bist & PCI_BIST_CODE_MASK);
5352 ret = 0;
5353 break;
5354 }
5355 msleep(100);
5356 cnt++;
5357 }
5358
5359 return ret;
5360}
5361
5362/**
20346722
K
5363 * s2io-link_test - verifies the link state of the nic
5364 * @sp ; private member of the device structure, which is a pointer to the
1da177e4
LT
5365 * s2io_nic structure.
5366 * @data: variable that returns the result of each of the test conducted by
5367 * the driver.
5368 * Description:
20346722 5369 * The function verifies the link state of the NIC and updates the input
1da177e4
LT
5370 * argument 'data' appropriately.
5371 * Return value:
5372 * 0 on success.
5373 */
5374
1ee6dd77 5375static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
1da177e4 5376{
1ee6dd77 5377 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5378 u64 val64;
5379
5380 val64 = readq(&bar0->adapter_status);
c92ca04b 5381 if(!(LINK_IS_UP(val64)))
1da177e4 5382 *data = 1;
c92ca04b
AR
5383 else
5384 *data = 0;
1da177e4 5385
b41477f3 5386 return *data;
1da177e4
LT
5387}
5388
5389/**
20346722
K
5390 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5391 * @sp - private member of the device structure, which is a pointer to the
1da177e4 5392 * s2io_nic structure.
20346722 5393 * @data - variable that returns the result of each of the test
1da177e4
LT
5394 * conducted by the driver.
5395 * Description:
20346722 5396 * This is one of the offline test that tests the read and write
1da177e4
LT
5397 * access to the RldRam chip on the NIC.
5398 * Return value:
5399 * 0 on success.
5400 */
5401
1ee6dd77 5402static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
1da177e4 5403{
1ee6dd77 5404 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 5405 u64 val64;
ad4ebed0 5406 int cnt, iteration = 0, test_fail = 0;
1da177e4
LT
5407
5408 val64 = readq(&bar0->adapter_control);
5409 val64 &= ~ADAPTER_ECC_EN;
5410 writeq(val64, &bar0->adapter_control);
5411
5412 val64 = readq(&bar0->mc_rldram_test_ctrl);
5413 val64 |= MC_RLDRAM_TEST_MODE;
ad4ebed0 5414 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
5415
5416 val64 = readq(&bar0->mc_rldram_mrs);
5417 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5418 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5419
5420 val64 |= MC_RLDRAM_MRS_ENABLE;
5421 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5422
5423 while (iteration < 2) {
5424 val64 = 0x55555555aaaa0000ULL;
5425 if (iteration == 1) {
5426 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5427 }
5428 writeq(val64, &bar0->mc_rldram_test_d0);
5429
5430 val64 = 0xaaaa5a5555550000ULL;
5431 if (iteration == 1) {
5432 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5433 }
5434 writeq(val64, &bar0->mc_rldram_test_d1);
5435
5436 val64 = 0x55aaaaaaaa5a0000ULL;
5437 if (iteration == 1) {
5438 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5439 }
5440 writeq(val64, &bar0->mc_rldram_test_d2);
5441
ad4ebed0 5442 val64 = (u64) (0x0000003ffffe0100ULL);
1da177e4
LT
5443 writeq(val64, &bar0->mc_rldram_test_add);
5444
ad4ebed0 5445 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5446 MC_RLDRAM_TEST_GO;
5447 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
5448
5449 for (cnt = 0; cnt < 5; cnt++) {
5450 val64 = readq(&bar0->mc_rldram_test_ctrl);
5451 if (val64 & MC_RLDRAM_TEST_DONE)
5452 break;
5453 msleep(200);
5454 }
5455
5456 if (cnt == 5)
5457 break;
5458
ad4ebed0 5459 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5460 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
5461
5462 for (cnt = 0; cnt < 5; cnt++) {
5463 val64 = readq(&bar0->mc_rldram_test_ctrl);
5464 if (val64 & MC_RLDRAM_TEST_DONE)
5465 break;
5466 msleep(500);
5467 }
5468
5469 if (cnt == 5)
5470 break;
5471
5472 val64 = readq(&bar0->mc_rldram_test_ctrl);
ad4ebed0 5473 if (!(val64 & MC_RLDRAM_TEST_PASS))
5474 test_fail = 1;
1da177e4
LT
5475
5476 iteration++;
5477 }
5478
ad4ebed0 5479 *data = test_fail;
1da177e4 5480
ad4ebed0 5481 /* Bring the adapter out of test mode */
5482 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5483
5484 return test_fail;
1da177e4
LT
5485}
5486
5487/**
5488 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5489 * @sp : private member of the device structure, which is a pointer to the
5490 * s2io_nic structure.
5491 * @ethtest : pointer to a ethtool command specific structure that will be
5492 * returned to the user.
20346722 5493 * @data : variable that returns the result of each of the test
1da177e4
LT
5494 * conducted by the driver.
5495 * Description:
5496 * This function conducts 6 tests ( 4 offline and 2 online) to determine
5497 * the health of the card.
5498 * Return value:
5499 * void
5500 */
5501
5502static void s2io_ethtool_test(struct net_device *dev,
5503 struct ethtool_test *ethtest,
5504 uint64_t * data)
5505{
1ee6dd77 5506 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5507 int orig_state = netif_running(sp->dev);
5508
5509 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5510 /* Offline Tests. */
20346722 5511 if (orig_state)
1da177e4 5512 s2io_close(sp->dev);
1da177e4
LT
5513
5514 if (s2io_register_test(sp, &data[0]))
5515 ethtest->flags |= ETH_TEST_FL_FAILED;
5516
5517 s2io_reset(sp);
1da177e4
LT
5518
5519 if (s2io_rldram_test(sp, &data[3]))
5520 ethtest->flags |= ETH_TEST_FL_FAILED;
5521
5522 s2io_reset(sp);
1da177e4
LT
5523
5524 if (s2io_eeprom_test(sp, &data[1]))
5525 ethtest->flags |= ETH_TEST_FL_FAILED;
5526
5527 if (s2io_bist_test(sp, &data[4]))
5528 ethtest->flags |= ETH_TEST_FL_FAILED;
5529
5530 if (orig_state)
5531 s2io_open(sp->dev);
5532
5533 data[2] = 0;
5534 } else {
5535 /* Online Tests. */
5536 if (!orig_state) {
5537 DBG_PRINT(ERR_DBG,
5538 "%s: is not up, cannot run test\n",
5539 dev->name);
5540 data[0] = -1;
5541 data[1] = -1;
5542 data[2] = -1;
5543 data[3] = -1;
5544 data[4] = -1;
5545 }
5546
5547 if (s2io_link_test(sp, &data[2]))
5548 ethtest->flags |= ETH_TEST_FL_FAILED;
5549
5550 data[0] = 0;
5551 data[1] = 0;
5552 data[3] = 0;
5553 data[4] = 0;
5554 }
5555}
5556
5557static void s2io_get_ethtool_stats(struct net_device *dev,
5558 struct ethtool_stats *estats,
5559 u64 * tmp_stats)
5560{
5561 int i = 0;
1ee6dd77
RB
5562 struct s2io_nic *sp = dev->priv;
5563 struct stat_block *stat_info = sp->mac_control.stats_info;
1da177e4 5564
7ba013ac 5565 s2io_updt_stats(sp);
541ae68f
K
5566 tmp_stats[i++] =
5567 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
5568 le32_to_cpu(stat_info->tmac_frms);
5569 tmp_stats[i++] =
5570 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5571 le32_to_cpu(stat_info->tmac_data_octets);
1da177e4 5572 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
541ae68f
K
5573 tmp_stats[i++] =
5574 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5575 le32_to_cpu(stat_info->tmac_mcst_frms);
5576 tmp_stats[i++] =
5577 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5578 le32_to_cpu(stat_info->tmac_bcst_frms);
1da177e4 5579 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
bd1034f0
AR
5580 tmp_stats[i++] =
5581 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5582 le32_to_cpu(stat_info->tmac_ttl_octets);
5583 tmp_stats[i++] =
5584 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5585 le32_to_cpu(stat_info->tmac_ucst_frms);
5586 tmp_stats[i++] =
5587 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5588 le32_to_cpu(stat_info->tmac_nucst_frms);
541ae68f
K
5589 tmp_stats[i++] =
5590 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5591 le32_to_cpu(stat_info->tmac_any_err_frms);
bd1034f0 5592 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
1da177e4 5593 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
541ae68f
K
5594 tmp_stats[i++] =
5595 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5596 le32_to_cpu(stat_info->tmac_vld_ip);
5597 tmp_stats[i++] =
5598 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5599 le32_to_cpu(stat_info->tmac_drop_ip);
5600 tmp_stats[i++] =
5601 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5602 le32_to_cpu(stat_info->tmac_icmp);
5603 tmp_stats[i++] =
5604 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5605 le32_to_cpu(stat_info->tmac_rst_tcp);
1da177e4 5606 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
541ae68f
K
5607 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5608 le32_to_cpu(stat_info->tmac_udp);
5609 tmp_stats[i++] =
5610 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5611 le32_to_cpu(stat_info->rmac_vld_frms);
5612 tmp_stats[i++] =
5613 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5614 le32_to_cpu(stat_info->rmac_data_octets);
1da177e4
LT
5615 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5616 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
541ae68f
K
5617 tmp_stats[i++] =
5618 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5619 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5620 tmp_stats[i++] =
5621 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5622 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
1da177e4 5623 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
bd1034f0 5624 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
1da177e4
LT
5625 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5626 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
bd1034f0
AR
5627 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5628 tmp_stats[i++] =
5629 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5630 le32_to_cpu(stat_info->rmac_ttl_octets);
5631 tmp_stats[i++] =
5632 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5633 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5634 tmp_stats[i++] =
5635 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5636 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
541ae68f
K
5637 tmp_stats[i++] =
5638 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5639 le32_to_cpu(stat_info->rmac_discarded_frms);
bd1034f0
AR
5640 tmp_stats[i++] =
5641 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5642 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5643 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5644 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
541ae68f
K
5645 tmp_stats[i++] =
5646 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5647 le32_to_cpu(stat_info->rmac_usized_frms);
5648 tmp_stats[i++] =
5649 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5650 le32_to_cpu(stat_info->rmac_osized_frms);
5651 tmp_stats[i++] =
5652 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5653 le32_to_cpu(stat_info->rmac_frag_frms);
5654 tmp_stats[i++] =
5655 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5656 le32_to_cpu(stat_info->rmac_jabber_frms);
bd1034f0
AR
5657 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
5658 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
5659 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
5660 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
5661 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
5662 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
5663 tmp_stats[i++] =
5664 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
541ae68f 5665 le32_to_cpu(stat_info->rmac_ip);
1da177e4
LT
5666 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5667 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
bd1034f0
AR
5668 tmp_stats[i++] =
5669 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
541ae68f 5670 le32_to_cpu(stat_info->rmac_drop_ip);
bd1034f0
AR
5671 tmp_stats[i++] =
5672 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
541ae68f 5673 le32_to_cpu(stat_info->rmac_icmp);
1da177e4 5674 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
bd1034f0
AR
5675 tmp_stats[i++] =
5676 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
541ae68f
K
5677 le32_to_cpu(stat_info->rmac_udp);
5678 tmp_stats[i++] =
5679 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5680 le32_to_cpu(stat_info->rmac_err_drp_udp);
bd1034f0
AR
5681 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
5682 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
5683 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
5684 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
5685 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
5686 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
5687 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
5688 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
5689 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
5690 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
5691 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
5692 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
5693 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
5694 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
5695 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
5696 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
5697 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
541ae68f
K
5698 tmp_stats[i++] =
5699 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5700 le32_to_cpu(stat_info->rmac_pause_cnt);
bd1034f0
AR
5701 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
5702 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
541ae68f
K
5703 tmp_stats[i++] =
5704 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5705 le32_to_cpu(stat_info->rmac_accepted_ip);
1da177e4 5706 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
bd1034f0
AR
5707 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
5708 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
5709 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
5710 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
5711 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
5712 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
5713 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
5714 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
5715 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
5716 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
5717 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
5718 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
5719 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
5720 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
5721 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
5722 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5723 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5724 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
fa1f0cb3
SS
5725
5726 /* Enhanced statistics exist only for Hercules */
5727 if(sp->device_type == XFRAME_II_DEVICE) {
5728 tmp_stats[i++] =
5729 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5730 tmp_stats[i++] =
5731 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5732 tmp_stats[i++] =
5733 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5734 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5735 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5736 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5737 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5738 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5739 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5740 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5741 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5742 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5743 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5744 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5745 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5746 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
5747 }
5748
7ba013ac
K
5749 tmp_stats[i++] = 0;
5750 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5751 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
bd1034f0
AR
5752 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
5753 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
5754 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
5755 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
5756 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
5757 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
5758 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
5759 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
5760 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
5761 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
5762 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
5763 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
5764 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
5765 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
5766 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
5767 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
5768 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
7d3d0439
RA
5769 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5770 tmp_stats[i++] = stat_info->sw_stat.sending_both;
5771 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5772 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
fe931395 5773 if (stat_info->sw_stat.num_aggregations) {
bd1034f0
AR
5774 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5775 int count = 0;
6aa20a22 5776 /*
bd1034f0
AR
5777 * Since 64-bit divide does not work on all platforms,
5778 * do repeated subtraction.
5779 */
5780 while (tmp >= stat_info->sw_stat.num_aggregations) {
5781 tmp -= stat_info->sw_stat.num_aggregations;
5782 count++;
5783 }
5784 tmp_stats[i++] = count;
fe931395 5785 }
bd1034f0
AR
5786 else
5787 tmp_stats[i++] = 0;
c53d4945
SH
5788 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
5789 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
1da177e4
LT
5790}
5791
ac1f60db 5792static int s2io_ethtool_get_regs_len(struct net_device *dev)
1da177e4
LT
5793{
5794 return (XENA_REG_SPACE);
5795}
5796
5797
ac1f60db 5798static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
1da177e4 5799{
1ee6dd77 5800 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5801
5802 return (sp->rx_csum);
5803}
ac1f60db
AB
5804
5805static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
1da177e4 5806{
1ee6dd77 5807 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5808
5809 if (data)
5810 sp->rx_csum = 1;
5811 else
5812 sp->rx_csum = 0;
5813
5814 return 0;
5815}
ac1f60db
AB
5816
5817static int s2io_get_eeprom_len(struct net_device *dev)
1da177e4
LT
5818{
5819 return (XENA_EEPROM_SPACE);
5820}
5821
ac1f60db 5822static int s2io_ethtool_self_test_count(struct net_device *dev)
1da177e4
LT
5823{
5824 return (S2IO_TEST_LEN);
5825}
ac1f60db
AB
5826
5827static void s2io_ethtool_get_strings(struct net_device *dev,
5828 u32 stringset, u8 * data)
1da177e4 5829{
fa1f0cb3
SS
5830 int stat_size = 0;
5831 struct s2io_nic *sp = dev->priv;
5832
1da177e4
LT
5833 switch (stringset) {
5834 case ETH_SS_TEST:
5835 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5836 break;
5837 case ETH_SS_STATS:
fa1f0cb3
SS
5838 stat_size = sizeof(ethtool_xena_stats_keys);
5839 memcpy(data, &ethtool_xena_stats_keys,stat_size);
5840 if(sp->device_type == XFRAME_II_DEVICE) {
5841 memcpy(data + stat_size,
5842 &ethtool_enhanced_stats_keys,
5843 sizeof(ethtool_enhanced_stats_keys));
5844 stat_size += sizeof(ethtool_enhanced_stats_keys);
5845 }
5846
5847 memcpy(data + stat_size, &ethtool_driver_stats_keys,
5848 sizeof(ethtool_driver_stats_keys));
1da177e4
LT
5849 }
5850}
1da177e4
LT
5851static int s2io_ethtool_get_stats_count(struct net_device *dev)
5852{
fa1f0cb3
SS
5853 struct s2io_nic *sp = dev->priv;
5854 int stat_count = 0;
5855 switch(sp->device_type) {
5856 case XFRAME_I_DEVICE:
5857 stat_count = XFRAME_I_STAT_LEN;
5858 break;
5859
5860 case XFRAME_II_DEVICE:
5861 stat_count = XFRAME_II_STAT_LEN;
5862 break;
5863 }
5864
5865 return stat_count;
1da177e4
LT
5866}
5867
ac1f60db 5868static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
1da177e4
LT
5869{
5870 if (data)
5871 dev->features |= NETIF_F_IP_CSUM;
5872 else
5873 dev->features &= ~NETIF_F_IP_CSUM;
5874
5875 return 0;
5876}
5877
75c30b13
AR
5878static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
5879{
5880 return (dev->features & NETIF_F_TSO) != 0;
5881}
5882static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
5883{
5884 if (data)
5885 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
5886 else
5887 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
5888
5889 return 0;
5890}
1da177e4 5891
7282d491 5892static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
5893 .get_settings = s2io_ethtool_gset,
5894 .set_settings = s2io_ethtool_sset,
5895 .get_drvinfo = s2io_ethtool_gdrvinfo,
5896 .get_regs_len = s2io_ethtool_get_regs_len,
5897 .get_regs = s2io_ethtool_gregs,
5898 .get_link = ethtool_op_get_link,
5899 .get_eeprom_len = s2io_get_eeprom_len,
5900 .get_eeprom = s2io_ethtool_geeprom,
5901 .set_eeprom = s2io_ethtool_seeprom,
0cec35eb 5902 .get_ringparam = s2io_ethtool_gringparam,
1da177e4
LT
5903 .get_pauseparam = s2io_ethtool_getpause_data,
5904 .set_pauseparam = s2io_ethtool_setpause_data,
5905 .get_rx_csum = s2io_ethtool_get_rx_csum,
5906 .set_rx_csum = s2io_ethtool_set_rx_csum,
5907 .get_tx_csum = ethtool_op_get_tx_csum,
5908 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5909 .get_sg = ethtool_op_get_sg,
5910 .set_sg = ethtool_op_set_sg,
75c30b13
AR
5911 .get_tso = s2io_ethtool_op_get_tso,
5912 .set_tso = s2io_ethtool_op_set_tso,
fed5eccd
AR
5913 .get_ufo = ethtool_op_get_ufo,
5914 .set_ufo = ethtool_op_set_ufo,
1da177e4
LT
5915 .self_test_count = s2io_ethtool_self_test_count,
5916 .self_test = s2io_ethtool_test,
5917 .get_strings = s2io_ethtool_get_strings,
5918 .phys_id = s2io_ethtool_idnic,
5919 .get_stats_count = s2io_ethtool_get_stats_count,
5920 .get_ethtool_stats = s2io_get_ethtool_stats
5921};
5922
5923/**
20346722 5924 * s2io_ioctl - Entry point for the Ioctl
1da177e4
LT
5925 * @dev : Device pointer.
5926 * @ifr : An IOCTL specefic structure, that can contain a pointer to
5927 * a proprietary structure used to pass information to the driver.
5928 * @cmd : This is used to distinguish between the different commands that
5929 * can be passed to the IOCTL functions.
5930 * Description:
20346722
K
5931 * Currently there are no special functionality supported in IOCTL, hence
5932 * function always return EOPNOTSUPPORTED
1da177e4
LT
5933 */
5934
ac1f60db 5935static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1da177e4
LT
5936{
5937 return -EOPNOTSUPP;
5938}
5939
5940/**
5941 * s2io_change_mtu - entry point to change MTU size for the device.
5942 * @dev : device pointer.
5943 * @new_mtu : the new MTU size for the device.
5944 * Description: A driver entry point to change MTU size for the device.
5945 * Before changing the MTU the device must be stopped.
5946 * Return value:
5947 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5948 * file on failure.
5949 */
5950
ac1f60db 5951static int s2io_change_mtu(struct net_device *dev, int new_mtu)
1da177e4 5952{
1ee6dd77 5953 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5954
5955 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5956 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
5957 dev->name);
5958 return -EPERM;
5959 }
5960
1da177e4 5961 dev->mtu = new_mtu;
d8892c6e 5962 if (netif_running(dev)) {
e6a8fee2 5963 s2io_card_down(sp);
d8892c6e
K
5964 netif_stop_queue(dev);
5965 if (s2io_card_up(sp)) {
5966 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5967 __FUNCTION__);
5968 }
5969 if (netif_queue_stopped(dev))
5970 netif_wake_queue(dev);
5971 } else { /* Device is down */
1ee6dd77 5972 struct XENA_dev_config __iomem *bar0 = sp->bar0;
d8892c6e
K
5973 u64 val64 = new_mtu;
5974
5975 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
5976 }
1da177e4
LT
5977
5978 return 0;
5979}
5980
5981/**
5982 * s2io_tasklet - Bottom half of the ISR.
5983 * @dev_adr : address of the device structure in dma_addr_t format.
5984 * Description:
5985 * This is the tasklet or the bottom half of the ISR. This is
20346722 5986 * an extension of the ISR which is scheduled by the scheduler to be run
1da177e4 5987 * when the load on the CPU is low. All low priority tasks of the ISR can
20346722 5988 * be pushed into the tasklet. For now the tasklet is used only to
1da177e4
LT
5989 * replenish the Rx buffers in the Rx buffer descriptors.
5990 * Return value:
5991 * void.
5992 */
5993
5994static void s2io_tasklet(unsigned long dev_addr)
5995{
5996 struct net_device *dev = (struct net_device *) dev_addr;
1ee6dd77 5997 struct s2io_nic *sp = dev->priv;
1da177e4 5998 int i, ret;
1ee6dd77 5999 struct mac_info *mac_control;
1da177e4
LT
6000 struct config_param *config;
6001
6002 mac_control = &sp->mac_control;
6003 config = &sp->config;
6004
6005 if (!TASKLET_IN_USE) {
6006 for (i = 0; i < config->rx_ring_num; i++) {
6007 ret = fill_rx_buffers(sp, i);
6008 if (ret == -ENOMEM) {
0c61ed5f 6009 DBG_PRINT(INFO_DBG, "%s: Out of ",
1da177e4
LT
6010 dev->name);
6011 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
6012 break;
6013 } else if (ret == -EFILL) {
0c61ed5f 6014 DBG_PRINT(INFO_DBG,
1da177e4
LT
6015 "%s: Rx Ring %d is full\n",
6016 dev->name, i);
6017 break;
6018 }
6019 }
6020 clear_bit(0, (&sp->tasklet_status));
6021 }
6022}
6023
6024/**
6025 * s2io_set_link - Set the LInk status
6026 * @data: long pointer to device private structue
6027 * Description: Sets the link status for the adapter
6028 */
6029
c4028958 6030static void s2io_set_link(struct work_struct *work)
1da177e4 6031{
1ee6dd77 6032 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
1da177e4 6033 struct net_device *dev = nic->dev;
1ee6dd77 6034 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
6035 register u64 val64;
6036 u16 subid;
6037
22747d6b
FR
6038 rtnl_lock();
6039
6040 if (!netif_running(dev))
6041 goto out_unlock;
6042
1da177e4
LT
6043 if (test_and_set_bit(0, &(nic->link_state))) {
6044 /* The card is being reset, no point doing anything */
22747d6b 6045 goto out_unlock;
1da177e4
LT
6046 }
6047
6048 subid = nic->pdev->subsystem_device;
a371a07d
K
6049 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6050 /*
6051 * Allow a small delay for the NICs self initiated
6052 * cleanup to complete.
6053 */
6054 msleep(100);
6055 }
1da177e4
LT
6056
6057 val64 = readq(&bar0->adapter_status);
19a60522
SS
6058 if (LINK_IS_UP(val64)) {
6059 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6060 if (verify_xena_quiescence(nic)) {
6061 val64 = readq(&bar0->adapter_control);
6062 val64 |= ADAPTER_CNTL_EN;
1da177e4 6063 writeq(val64, &bar0->adapter_control);
19a60522
SS
6064 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6065 nic->device_type, subid)) {
6066 val64 = readq(&bar0->gpio_control);
6067 val64 |= GPIO_CTRL_GPIO_0;
6068 writeq(val64, &bar0->gpio_control);
6069 val64 = readq(&bar0->gpio_control);
6070 } else {
6071 val64 |= ADAPTER_LED_ON;
6072 writeq(val64, &bar0->adapter_control);
a371a07d 6073 }
1da177e4 6074 nic->device_enabled_once = TRUE;
19a60522
SS
6075 } else {
6076 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6077 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6078 netif_stop_queue(dev);
1da177e4 6079 }
19a60522
SS
6080 }
6081 val64 = readq(&bar0->adapter_status);
6082 if (!LINK_IS_UP(val64)) {
6083 DBG_PRINT(ERR_DBG, "%s:", dev->name);
6084 DBG_PRINT(ERR_DBG, " Link down after enabling ");
6085 DBG_PRINT(ERR_DBG, "device \n");
6086 } else
1da177e4 6087 s2io_link(nic, LINK_UP);
19a60522
SS
6088 } else {
6089 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6090 subid)) {
6091 val64 = readq(&bar0->gpio_control);
6092 val64 &= ~GPIO_CTRL_GPIO_0;
6093 writeq(val64, &bar0->gpio_control);
6094 val64 = readq(&bar0->gpio_control);
1da177e4 6095 }
19a60522 6096 s2io_link(nic, LINK_DOWN);
1da177e4
LT
6097 }
6098 clear_bit(0, &(nic->link_state));
22747d6b
FR
6099
6100out_unlock:
d8d70caf 6101 rtnl_unlock();
1da177e4
LT
6102}
6103
1ee6dd77
RB
6104static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6105 struct buffAdd *ba,
6106 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6107 u64 *temp2, int size)
5d3213cc
AR
6108{
6109 struct net_device *dev = sp->dev;
6110 struct sk_buff *frag_list;
6111
6112 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6113 /* allocate skb */
6114 if (*skb) {
6115 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6116 /*
6117 * As Rx frame are not going to be processed,
6118 * using same mapped address for the Rxd
6119 * buffer pointer
6120 */
1ee6dd77 6121 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0;
5d3213cc
AR
6122 } else {
6123 *skb = dev_alloc_skb(size);
6124 if (!(*skb)) {
0c61ed5f 6125 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
c53d4945
SH
6126 DBG_PRINT(INFO_DBG, "memory to allocate ");
6127 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6128 sp->mac_control.stats_info->sw_stat. \
6129 mem_alloc_fail_cnt++;
5d3213cc
AR
6130 return -ENOMEM ;
6131 }
6132 /* storing the mapped addr in a temp variable
6133 * such it will be used for next rxd whose
6134 * Host Control is NULL
6135 */
1ee6dd77 6136 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0 =
5d3213cc
AR
6137 pci_map_single( sp->pdev, (*skb)->data,
6138 size - NET_IP_ALIGN,
6139 PCI_DMA_FROMDEVICE);
6140 rxdp->Host_Control = (unsigned long) (*skb);
6141 }
6142 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6143 /* Two buffer Mode */
6144 if (*skb) {
1ee6dd77
RB
6145 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
6146 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6147 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
5d3213cc
AR
6148 } else {
6149 *skb = dev_alloc_skb(size);
2ceaac75 6150 if (!(*skb)) {
c53d4945
SH
6151 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6152 DBG_PRINT(INFO_DBG, "memory to allocate ");
6153 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6154 sp->mac_control.stats_info->sw_stat. \
6155 mem_alloc_fail_cnt++;
2ceaac75
DR
6156 return -ENOMEM;
6157 }
1ee6dd77 6158 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
5d3213cc
AR
6159 pci_map_single(sp->pdev, (*skb)->data,
6160 dev->mtu + 4,
6161 PCI_DMA_FROMDEVICE);
1ee6dd77 6162 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
5d3213cc
AR
6163 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6164 PCI_DMA_FROMDEVICE);
6165 rxdp->Host_Control = (unsigned long) (*skb);
6166
6167 /* Buffer-1 will be dummy buffer not used */
1ee6dd77 6168 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
5d3213cc
AR
6169 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6170 PCI_DMA_FROMDEVICE);
6171 }
6172 } else if ((rxdp->Host_Control == 0)) {
6173 /* Three buffer mode */
6174 if (*skb) {
1ee6dd77
RB
6175 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6176 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6177 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
5d3213cc
AR
6178 } else {
6179 *skb = dev_alloc_skb(size);
2ceaac75 6180 if (!(*skb)) {
c53d4945
SH
6181 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6182 DBG_PRINT(INFO_DBG, "memory to allocate ");
6183 DBG_PRINT(INFO_DBG, "3 buf mode SKBs\n");
6184 sp->mac_control.stats_info->sw_stat. \
6185 mem_alloc_fail_cnt++;
2ceaac75
DR
6186 return -ENOMEM;
6187 }
1ee6dd77 6188 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
5d3213cc
AR
6189 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6190 PCI_DMA_FROMDEVICE);
6191 /* Buffer-1 receives L3/L4 headers */
1ee6dd77 6192 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
5d3213cc
AR
6193 pci_map_single( sp->pdev, (*skb)->data,
6194 l3l4hdr_size + 4,
6195 PCI_DMA_FROMDEVICE);
6196 /*
6197 * skb_shinfo(skb)->frag_list will have L4
6198 * data payload
6199 */
6200 skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu +
6201 ALIGN_SIZE);
6202 if (skb_shinfo(*skb)->frag_list == NULL) {
6203 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
6204 failed\n ", dev->name);
c53d4945
SH
6205 sp->mac_control.stats_info->sw_stat. \
6206 mem_alloc_fail_cnt++;
5d3213cc
AR
6207 return -ENOMEM ;
6208 }
6209 frag_list = skb_shinfo(*skb)->frag_list;
6210 frag_list->next = NULL;
6211 /*
6212 * Buffer-2 receives L4 data payload
6213 */
1ee6dd77 6214 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
5d3213cc
AR
6215 pci_map_single( sp->pdev, frag_list->data,
6216 dev->mtu, PCI_DMA_FROMDEVICE);
6217 }
6218 }
6219 return 0;
6220}
1ee6dd77
RB
6221static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6222 int size)
5d3213cc
AR
6223{
6224 struct net_device *dev = sp->dev;
6225 if (sp->rxd_mode == RXD_MODE_1) {
6226 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6227 } else if (sp->rxd_mode == RXD_MODE_3B) {
6228 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6229 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6230 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6231 } else {
6232 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6233 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
6234 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
6235 }
6236}
6237
1ee6dd77 6238static int rxd_owner_bit_reset(struct s2io_nic *sp)
5d3213cc
AR
6239{
6240 int i, j, k, blk_cnt = 0, size;
1ee6dd77 6241 struct mac_info * mac_control = &sp->mac_control;
5d3213cc
AR
6242 struct config_param *config = &sp->config;
6243 struct net_device *dev = sp->dev;
1ee6dd77 6244 struct RxD_t *rxdp = NULL;
5d3213cc 6245 struct sk_buff *skb = NULL;
1ee6dd77 6246 struct buffAdd *ba = NULL;
5d3213cc
AR
6247 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6248
6249 /* Calculate the size based on ring mode */
6250 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6251 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6252 if (sp->rxd_mode == RXD_MODE_1)
6253 size += NET_IP_ALIGN;
6254 else if (sp->rxd_mode == RXD_MODE_3B)
6255 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6256 else
6257 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
6258
6259 for (i = 0; i < config->rx_ring_num; i++) {
6260 blk_cnt = config->rx_cfg[i].num_rxd /
6261 (rxd_count[sp->rxd_mode] +1);
6262
6263 for (j = 0; j < blk_cnt; j++) {
6264 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6265 rxdp = mac_control->rings[i].
6266 rx_blocks[j].rxds[k].virt_addr;
6267 if(sp->rxd_mode >= RXD_MODE_3A)
6268 ba = &mac_control->rings[i].ba[j][k];
ac1f90d6 6269 if (set_rxd_buffer_pointer(sp, rxdp, ba,
5d3213cc
AR
6270 &skb,(u64 *)&temp0_64,
6271 (u64 *)&temp1_64,
ac1f90d6
SS
6272 (u64 *)&temp2_64,
6273 size) == ENOMEM) {
6274 return 0;
6275 }
5d3213cc
AR
6276
6277 set_rxd_buffer_size(sp, rxdp, size);
6278 wmb();
6279 /* flip the Ownership bit to Hardware */
6280 rxdp->Control_1 |= RXD_OWN_XENA;
6281 }
6282 }
6283 }
6284 return 0;
6285
6286}
6287
1ee6dd77 6288static int s2io_add_isr(struct s2io_nic * sp)
1da177e4 6289{
e6a8fee2 6290 int ret = 0;
c92ca04b 6291 struct net_device *dev = sp->dev;
e6a8fee2 6292 int err = 0;
1da177e4 6293
e6a8fee2
AR
6294 if (sp->intr_type == MSI)
6295 ret = s2io_enable_msi(sp);
6296 else if (sp->intr_type == MSI_X)
6297 ret = s2io_enable_msi_x(sp);
6298 if (ret) {
6299 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6300 sp->intr_type = INTA;
20346722 6301 }
1da177e4 6302
1ee6dd77 6303 /* Store the values of the MSIX table in the struct s2io_nic structure */
e6a8fee2 6304 store_xmsi_data(sp);
c92ca04b 6305
e6a8fee2
AR
6306 /* After proper initialization of H/W, register ISR */
6307 if (sp->intr_type == MSI) {
6308 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
6309 IRQF_SHARED, sp->name, dev);
6310 if (err) {
6311 pci_disable_msi(sp->pdev);
6312 DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n",
6313 dev->name);
6314 return -1;
6315 }
6316 }
6317 if (sp->intr_type == MSI_X) {
fb6a825b 6318 int i, msix_tx_cnt=0,msix_rx_cnt=0;
c92ca04b 6319
e6a8fee2
AR
6320 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6321 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6322 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6323 dev->name, i);
6324 err = request_irq(sp->entries[i].vector,
6325 s2io_msix_fifo_handle, 0, sp->desc[i],
6326 sp->s2io_entries[i].arg);
fb6a825b
SS
6327 /* If either data or addr is zero print it */
6328 if(!(sp->msix_info[i].addr &&
6329 sp->msix_info[i].data)) {
6330 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6331 "Data:0x%lx\n",sp->desc[i],
6332 (unsigned long long)
6333 sp->msix_info[i].addr,
6334 (unsigned long)
6335 ntohl(sp->msix_info[i].data));
6336 } else {
6337 msix_tx_cnt++;
6338 }
e6a8fee2
AR
6339 } else {
6340 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6341 dev->name, i);
6342 err = request_irq(sp->entries[i].vector,
6343 s2io_msix_ring_handle, 0, sp->desc[i],
6344 sp->s2io_entries[i].arg);
fb6a825b
SS
6345 /* If either data or addr is zero print it */
6346 if(!(sp->msix_info[i].addr &&
6347 sp->msix_info[i].data)) {
6348 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6349 "Data:0x%lx\n",sp->desc[i],
6350 (unsigned long long)
6351 sp->msix_info[i].addr,
6352 (unsigned long)
6353 ntohl(sp->msix_info[i].data));
6354 } else {
6355 msix_rx_cnt++;
6356 }
c92ca04b 6357 }
e6a8fee2
AR
6358 if (err) {
6359 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6360 "failed\n", dev->name, i);
6361 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6362 return -1;
6363 }
6364 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6365 }
fb6a825b
SS
6366 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6367 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
e6a8fee2
AR
6368 }
6369 if (sp->intr_type == INTA) {
6370 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6371 sp->name, dev);
6372 if (err) {
6373 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6374 dev->name);
6375 return -1;
6376 }
6377 }
6378 return 0;
6379}
1ee6dd77 6380static void s2io_rem_isr(struct s2io_nic * sp)
e6a8fee2
AR
6381{
6382 int cnt = 0;
6383 struct net_device *dev = sp->dev;
6384
6385 if (sp->intr_type == MSI_X) {
6386 int i;
6387 u16 msi_control;
6388
6389 for (i=1; (sp->s2io_entries[i].in_use ==
6390 MSIX_REGISTERED_SUCCESS); i++) {
6391 int vector = sp->entries[i].vector;
6392 void *arg = sp->s2io_entries[i].arg;
6393
6394 free_irq(vector, arg);
6395 }
6396 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6397 msi_control &= 0xFFFE; /* Disable MSI */
6398 pci_write_config_word(sp->pdev, 0x42, msi_control);
6399
6400 pci_disable_msix(sp->pdev);
6401 } else {
6402 free_irq(sp->pdev->irq, dev);
6403 if (sp->intr_type == MSI) {
6404 u16 val;
6405
6406 pci_disable_msi(sp->pdev);
6407 pci_read_config_word(sp->pdev, 0x4c, &val);
6408 val ^= 0x1;
6409 pci_write_config_word(sp->pdev, 0x4c, val);
c92ca04b
AR
6410 }
6411 }
6412 /* Waiting till all Interrupt handlers are complete */
6413 cnt = 0;
6414 do {
6415 msleep(10);
6416 if (!atomic_read(&sp->isr_cnt))
6417 break;
6418 cnt++;
6419 } while(cnt < 5);
e6a8fee2
AR
6420}
6421
1ee6dd77 6422static void s2io_card_down(struct s2io_nic * sp)
e6a8fee2
AR
6423{
6424 int cnt = 0;
1ee6dd77 6425 struct XENA_dev_config __iomem *bar0 = sp->bar0;
e6a8fee2
AR
6426 unsigned long flags;
6427 register u64 val64 = 0;
6428
6429 del_timer_sync(&sp->alarm_timer);
6430 /* If s2io_set_link task is executing, wait till it completes. */
6431 while (test_and_set_bit(0, &(sp->link_state))) {
6432 msleep(50);
6433 }
6434 atomic_set(&sp->card_state, CARD_DOWN);
6435
6436 /* disable Tx and Rx traffic on the NIC */
6437 stop_nic(sp);
6438
6439 s2io_rem_isr(sp);
1da177e4
LT
6440
6441 /* Kill tasklet. */
6442 tasklet_kill(&sp->task);
6443
6444 /* Check if the device is Quiescent and then Reset the NIC */
6445 do {
5d3213cc
AR
6446 /* As per the HW requirement we need to replenish the
6447 * receive buffer to avoid the ring bump. Since there is
6448 * no intention of processing the Rx frame at this pointwe are
6449 * just settting the ownership bit of rxd in Each Rx
6450 * ring to HW and set the appropriate buffer size
6451 * based on the ring mode
6452 */
6453 rxd_owner_bit_reset(sp);
6454
1da177e4 6455 val64 = readq(&bar0->adapter_status);
19a60522
SS
6456 if (verify_xena_quiescence(sp)) {
6457 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
1da177e4
LT
6458 break;
6459 }
6460
6461 msleep(50);
6462 cnt++;
6463 if (cnt == 10) {
6464 DBG_PRINT(ERR_DBG,
6465 "s2io_close:Device not Quiescent ");
6466 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6467 (unsigned long long) val64);
6468 break;
6469 }
6470 } while (1);
1da177e4
LT
6471 s2io_reset(sp);
6472
7ba013ac
K
6473 spin_lock_irqsave(&sp->tx_lock, flags);
6474 /* Free all Tx buffers */
1da177e4 6475 free_tx_buffers(sp);
7ba013ac
K
6476 spin_unlock_irqrestore(&sp->tx_lock, flags);
6477
6478 /* Free all Rx buffers */
6479 spin_lock_irqsave(&sp->rx_lock, flags);
1da177e4 6480 free_rx_buffers(sp);
7ba013ac 6481 spin_unlock_irqrestore(&sp->rx_lock, flags);
1da177e4 6482
1da177e4
LT
6483 clear_bit(0, &(sp->link_state));
6484}
6485
1ee6dd77 6486static int s2io_card_up(struct s2io_nic * sp)
1da177e4 6487{
cc6e7c44 6488 int i, ret = 0;
1ee6dd77 6489 struct mac_info *mac_control;
1da177e4
LT
6490 struct config_param *config;
6491 struct net_device *dev = (struct net_device *) sp->dev;
e6a8fee2 6492 u16 interruptible;
1da177e4
LT
6493
6494 /* Initialize the H/W I/O registers */
6495 if (init_nic(sp) != 0) {
6496 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6497 dev->name);
e6a8fee2 6498 s2io_reset(sp);
1da177e4
LT
6499 return -ENODEV;
6500 }
6501
20346722
K
6502 /*
6503 * Initializing the Rx buffers. For now we are considering only 1
1da177e4
LT
6504 * Rx ring and initializing buffers into 30 Rx blocks
6505 */
6506 mac_control = &sp->mac_control;
6507 config = &sp->config;
6508
6509 for (i = 0; i < config->rx_ring_num; i++) {
6510 if ((ret = fill_rx_buffers(sp, i))) {
6511 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6512 dev->name);
6513 s2io_reset(sp);
6514 free_rx_buffers(sp);
6515 return -ENOMEM;
6516 }
6517 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6518 atomic_read(&sp->rx_bufs_left[i]));
6519 }
19a60522
SS
6520 /* Maintain the state prior to the open */
6521 if (sp->promisc_flg)
6522 sp->promisc_flg = 0;
6523 if (sp->m_cast_flg) {
6524 sp->m_cast_flg = 0;
6525 sp->all_multi_pos= 0;
6526 }
1da177e4
LT
6527
6528 /* Setting its receive mode */
6529 s2io_set_multicast(dev);
6530
7d3d0439 6531 if (sp->lro) {
b41477f3 6532 /* Initialize max aggregatable pkts per session based on MTU */
7d3d0439
RA
6533 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6534 /* Check if we can use(if specified) user provided value */
6535 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6536 sp->lro_max_aggr_per_sess = lro_max_pkts;
6537 }
6538
1da177e4
LT
6539 /* Enable Rx Traffic and interrupts on the NIC */
6540 if (start_nic(sp)) {
6541 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
1da177e4 6542 s2io_reset(sp);
e6a8fee2
AR
6543 free_rx_buffers(sp);
6544 return -ENODEV;
6545 }
6546
6547 /* Add interrupt service routine */
6548 if (s2io_add_isr(sp) != 0) {
6549 if (sp->intr_type == MSI_X)
6550 s2io_rem_isr(sp);
6551 s2io_reset(sp);
1da177e4
LT
6552 free_rx_buffers(sp);
6553 return -ENODEV;
6554 }
6555
25fff88e
K
6556 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6557
e6a8fee2
AR
6558 /* Enable tasklet for the device */
6559 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6560
6561 /* Enable select interrupts */
6562 if (sp->intr_type != INTA)
6563 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6564 else {
6565 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6566 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
6567 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
6568 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6569 }
6570
6571
1da177e4
LT
6572 atomic_set(&sp->card_state, CARD_UP);
6573 return 0;
6574}
6575
20346722 6576/**
1da177e4
LT
6577 * s2io_restart_nic - Resets the NIC.
6578 * @data : long pointer to the device private structure
6579 * Description:
6580 * This function is scheduled to be run by the s2io_tx_watchdog
20346722 6581 * function after 0.5 secs to reset the NIC. The idea is to reduce
1da177e4
LT
6582 * the run time of the watch dog routine which is run holding a
6583 * spin lock.
6584 */
6585
c4028958 6586static void s2io_restart_nic(struct work_struct *work)
1da177e4 6587{
1ee6dd77 6588 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
c4028958 6589 struct net_device *dev = sp->dev;
1da177e4 6590
22747d6b
FR
6591 rtnl_lock();
6592
6593 if (!netif_running(dev))
6594 goto out_unlock;
6595
e6a8fee2 6596 s2io_card_down(sp);
1da177e4
LT
6597 if (s2io_card_up(sp)) {
6598 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6599 dev->name);
6600 }
6601 netif_wake_queue(dev);
6602 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6603 dev->name);
22747d6b
FR
6604out_unlock:
6605 rtnl_unlock();
1da177e4
LT
6606}
6607
20346722
K
6608/**
6609 * s2io_tx_watchdog - Watchdog for transmit side.
1da177e4
LT
6610 * @dev : Pointer to net device structure
6611 * Description:
6612 * This function is triggered if the Tx Queue is stopped
6613 * for a pre-defined amount of time when the Interface is still up.
6614 * If the Interface is jammed in such a situation, the hardware is
6615 * reset (by s2io_close) and restarted again (by s2io_open) to
6616 * overcome any problem that might have been caused in the hardware.
6617 * Return value:
6618 * void
6619 */
6620
6621static void s2io_tx_watchdog(struct net_device *dev)
6622{
1ee6dd77 6623 struct s2io_nic *sp = dev->priv;
1da177e4
LT
6624
6625 if (netif_carrier_ok(dev)) {
c53d4945 6626 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
1da177e4 6627 schedule_work(&sp->rst_timer_task);
bd1034f0 6628 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
1da177e4
LT
6629 }
6630}
6631
6632/**
6633 * rx_osm_handler - To perform some OS related operations on SKB.
6634 * @sp: private member of the device structure,pointer to s2io_nic structure.
6635 * @skb : the socket buffer pointer.
6636 * @len : length of the packet
6637 * @cksum : FCS checksum of the frame.
6638 * @ring_no : the ring from which this RxD was extracted.
20346722 6639 * Description:
b41477f3 6640 * This function is called by the Rx interrupt serivce routine to perform
1da177e4
LT
6641 * some OS related operations on the SKB before passing it to the upper
6642 * layers. It mainly checks if the checksum is OK, if so adds it to the
6643 * SKBs cksum variable, increments the Rx packet count and passes the SKB
6644 * to the upper layer. If the checksum is wrong, it increments the Rx
6645 * packet error count, frees the SKB and returns error.
6646 * Return value:
6647 * SUCCESS on success and -1 on failure.
6648 */
1ee6dd77 6649static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
1da177e4 6650{
1ee6dd77 6651 struct s2io_nic *sp = ring_data->nic;
1da177e4 6652 struct net_device *dev = (struct net_device *) sp->dev;
20346722
K
6653 struct sk_buff *skb = (struct sk_buff *)
6654 ((unsigned long) rxdp->Host_Control);
6655 int ring_no = ring_data->ring_no;
1da177e4 6656 u16 l3_csum, l4_csum;
863c11a9 6657 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
1ee6dd77 6658 struct lro *lro;
da6971d8 6659
20346722 6660 skb->dev = dev;
c92ca04b 6661
863c11a9 6662 if (err) {
bd1034f0
AR
6663 /* Check for parity error */
6664 if (err & 0x1) {
6665 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
6666 }
6667
863c11a9
AR
6668 /*
6669 * Drop the packet if bad transfer code. Exception being
6670 * 0x5, which could be due to unsupported IPv6 extension header.
6671 * In this case, we let stack handle the packet.
6672 * Note that in this case, since checksum will be incorrect,
6673 * stack will validate the same.
6674 */
6675 if (err && ((err >> 48) != 0x5)) {
6676 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
6677 dev->name, err);
6678 sp->stats.rx_crc_errors++;
6679 dev_kfree_skb(skb);
6680 atomic_dec(&sp->rx_bufs_left[ring_no]);
6681 rxdp->Host_Control = 0;
6682 return 0;
6683 }
20346722 6684 }
1da177e4 6685
20346722
K
6686 /* Updating statistics */
6687 rxdp->Host_Control = 0;
20346722 6688 sp->stats.rx_packets++;
da6971d8
AR
6689 if (sp->rxd_mode == RXD_MODE_1) {
6690 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
20346722 6691
da6971d8
AR
6692 sp->stats.rx_bytes += len;
6693 skb_put(skb, len);
6694
6695 } else if (sp->rxd_mode >= RXD_MODE_3A) {
6696 int get_block = ring_data->rx_curr_get_info.block_index;
6697 int get_off = ring_data->rx_curr_get_info.offset;
6698 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
6699 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6700 unsigned char *buff = skb_push(skb, buf0_len);
6701
1ee6dd77 6702 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
da6971d8
AR
6703 sp->stats.rx_bytes += buf0_len + buf2_len;
6704 memcpy(buff, ba->ba_0, buf0_len);
6705
6706 if (sp->rxd_mode == RXD_MODE_3A) {
6707 int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
6708
6709 skb_put(skb, buf1_len);
6710 skb->len += buf2_len;
6711 skb->data_len += buf2_len;
da6971d8
AR
6712 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
6713 sp->stats.rx_bytes += buf1_len;
6714
6715 } else
6716 skb_put(skb, buf2_len);
6717 }
20346722 6718
7d3d0439
RA
6719 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
6720 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
20346722
K
6721 (sp->rx_csum)) {
6722 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
1da177e4
LT
6723 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
6724 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
20346722 6725 /*
1da177e4
LT
6726 * NIC verifies if the Checksum of the received
6727 * frame is Ok or not and accordingly returns
6728 * a flag in the RxD.
6729 */
6730 skb->ip_summed = CHECKSUM_UNNECESSARY;
7d3d0439
RA
6731 if (sp->lro) {
6732 u32 tcp_len;
6733 u8 *tcp;
6734 int ret = 0;
6735
6736 ret = s2io_club_tcp_session(skb->data, &tcp,
6737 &tcp_len, &lro, rxdp, sp);
6738 switch (ret) {
6739 case 3: /* Begin anew */
6740 lro->parent = skb;
6741 goto aggregate;
6742 case 1: /* Aggregate */
6743 {
6744 lro_append_pkt(sp, lro,
6745 skb, tcp_len);
6746 goto aggregate;
6747 }
6748 case 4: /* Flush session */
6749 {
6750 lro_append_pkt(sp, lro,
6751 skb, tcp_len);
6752 queue_rx_frame(lro->parent);
6753 clear_lro_session(lro);
6754 sp->mac_control.stats_info->
6755 sw_stat.flush_max_pkts++;
6756 goto aggregate;
6757 }
6758 case 2: /* Flush both */
6759 lro->parent->data_len =
6760 lro->frags_len;
6761 sp->mac_control.stats_info->
6762 sw_stat.sending_both++;
6763 queue_rx_frame(lro->parent);
6764 clear_lro_session(lro);
6765 goto send_up;
6766 case 0: /* sessions exceeded */
c92ca04b
AR
6767 case -1: /* non-TCP or not
6768 * L2 aggregatable
6769 */
7d3d0439
RA
6770 case 5: /*
6771 * First pkt in session not
6772 * L3/L4 aggregatable
6773 */
6774 break;
6775 default:
6776 DBG_PRINT(ERR_DBG,
6777 "%s: Samadhana!!\n",
6778 __FUNCTION__);
6779 BUG();
6780 }
6781 }
1da177e4 6782 } else {
20346722
K
6783 /*
6784 * Packet with erroneous checksum, let the
1da177e4
LT
6785 * upper layers deal with it.
6786 */
6787 skb->ip_summed = CHECKSUM_NONE;
6788 }
6789 } else {
6790 skb->ip_summed = CHECKSUM_NONE;
6791 }
6792
7d3d0439
RA
6793 if (!sp->lro) {
6794 skb->protocol = eth_type_trans(skb, dev);
926930b2
SS
6795 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
6796 vlan_strip_flag)) {
7d3d0439 6797 /* Queueing the vlan frame to the upper layer */
db874e65
SS
6798 if (napi)
6799 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6800 RXD_GET_VLAN_TAG(rxdp->Control_2));
6801 else
6802 vlan_hwaccel_rx(skb, sp->vlgrp,
6803 RXD_GET_VLAN_TAG(rxdp->Control_2));
7d3d0439 6804 } else {
db874e65
SS
6805 if (napi)
6806 netif_receive_skb(skb);
6807 else
6808 netif_rx(skb);
7d3d0439 6809 }
7d3d0439
RA
6810 } else {
6811send_up:
6812 queue_rx_frame(skb);
6aa20a22 6813 }
1da177e4 6814 dev->last_rx = jiffies;
7d3d0439 6815aggregate:
1da177e4 6816 atomic_dec(&sp->rx_bufs_left[ring_no]);
1da177e4
LT
6817 return SUCCESS;
6818}
6819
6820/**
6821 * s2io_link - stops/starts the Tx queue.
6822 * @sp : private member of the device structure, which is a pointer to the
6823 * s2io_nic structure.
6824 * @link : inidicates whether link is UP/DOWN.
6825 * Description:
6826 * This function stops/starts the Tx queue depending on whether the link
20346722
K
6827 * status of the NIC is is down or up. This is called by the Alarm
6828 * interrupt handler whenever a link change interrupt comes up.
1da177e4
LT
6829 * Return value:
6830 * void.
6831 */
6832
1ee6dd77 6833static void s2io_link(struct s2io_nic * sp, int link)
1da177e4
LT
6834{
6835 struct net_device *dev = (struct net_device *) sp->dev;
6836
6837 if (link != sp->last_link_state) {
6838 if (link == LINK_DOWN) {
6839 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
6840 netif_carrier_off(dev);
6841 } else {
6842 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
6843 netif_carrier_on(dev);
6844 }
6845 }
6846 sp->last_link_state = link;
6847}
6848
6849/**
20346722
K
6850 * get_xena_rev_id - to identify revision ID of xena.
6851 * @pdev : PCI Dev structure
6852 * Description:
6853 * Function to identify the Revision ID of xena.
6854 * Return value:
6855 * returns the revision ID of the device.
6856 */
6857
26df54bf 6858static int get_xena_rev_id(struct pci_dev *pdev)
20346722
K
6859{
6860 u8 id = 0;
6861 int ret;
6862 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
6863 return id;
6864}
6865
6866/**
6867 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
6868 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
6869 * s2io_nic structure.
6870 * Description:
6871 * This function initializes a few of the PCI and PCI-X configuration registers
6872 * with recommended values.
6873 * Return value:
6874 * void
6875 */
6876
1ee6dd77 6877static void s2io_init_pci(struct s2io_nic * sp)
1da177e4 6878{
20346722 6879 u16 pci_cmd = 0, pcix_cmd = 0;
1da177e4
LT
6880
6881 /* Enable Data Parity Error Recovery in PCI-X command register. */
6882 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 6883 &(pcix_cmd));
1da177e4 6884 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 6885 (pcix_cmd | 1));
1da177e4 6886 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 6887 &(pcix_cmd));
1da177e4
LT
6888
6889 /* Set the PErr Response bit in PCI command register. */
6890 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6891 pci_write_config_word(sp->pdev, PCI_COMMAND,
6892 (pci_cmd | PCI_COMMAND_PARITY));
6893 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
1da177e4
LT
6894}
6895
9dc737a7
AR
6896static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6897{
6898 if ( tx_fifo_num > 8) {
6899 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
6900 "supported\n");
6901 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
6902 tx_fifo_num = 8;
6903 }
6904 if ( rx_ring_num > 8) {
6905 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
6906 "supported\n");
6907 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
6908 rx_ring_num = 8;
6909 }
db874e65
SS
6910 if (*dev_intr_type != INTA)
6911 napi = 0;
6912
9dc737a7
AR
6913#ifndef CONFIG_PCI_MSI
6914 if (*dev_intr_type != INTA) {
6915 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
6916 "MSI/MSI-X. Defaulting to INTA\n");
6917 *dev_intr_type = INTA;
6918 }
6919#else
6920 if (*dev_intr_type > MSI_X) {
6921 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
6922 "Defaulting to INTA\n");
6923 *dev_intr_type = INTA;
6924 }
6925#endif
6926 if ((*dev_intr_type == MSI_X) &&
6927 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
6928 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
6aa20a22 6929 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
9dc737a7
AR
6930 "Defaulting to INTA\n");
6931 *dev_intr_type = INTA;
6932 }
fb6a825b 6933
9dc737a7
AR
6934 if (rx_ring_mode > 3) {
6935 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6936 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
6937 rx_ring_mode = 3;
6938 }
6939 return SUCCESS;
6940}
6941
9fc93a41
SS
6942/**
6943 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
6944 * or Traffic class respectively.
6945 * @nic: device peivate variable
6946 * Description: The function configures the receive steering to
6947 * desired receive ring.
6948 * Return Value: SUCCESS on success and
6949 * '-1' on failure (endian settings incorrect).
6950 */
6951static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
6952{
6953 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6954 register u64 val64 = 0;
6955
6956 if (ds_codepoint > 63)
6957 return FAILURE;
6958
6959 val64 = RTS_DS_MEM_DATA(ring);
6960 writeq(val64, &bar0->rts_ds_mem_data);
6961
6962 val64 = RTS_DS_MEM_CTRL_WE |
6963 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
6964 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
6965
6966 writeq(val64, &bar0->rts_ds_mem_ctrl);
6967
6968 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
6969 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
6970 S2IO_BIT_RESET);
6971}
6972
1da177e4 6973/**
20346722 6974 * s2io_init_nic - Initialization of the adapter .
1da177e4
LT
6975 * @pdev : structure containing the PCI related information of the device.
6976 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
6977 * Description:
6978 * The function initializes an adapter identified by the pci_dec structure.
20346722
K
6979 * All OS related initialization including memory and device structure and
6980 * initlaization of the device private variable is done. Also the swapper
6981 * control register is initialized to enable read and write into the I/O
1da177e4
LT
6982 * registers of the device.
6983 * Return value:
6984 * returns 0 on success and negative on failure.
6985 */
6986
6987static int __devinit
6988s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6989{
1ee6dd77 6990 struct s2io_nic *sp;
1da177e4 6991 struct net_device *dev;
1da177e4
LT
6992 int i, j, ret;
6993 int dma_flag = FALSE;
6994 u32 mac_up, mac_down;
6995 u64 val64 = 0, tmp64 = 0;
1ee6dd77 6996 struct XENA_dev_config __iomem *bar0 = NULL;
1da177e4 6997 u16 subid;
1ee6dd77 6998 struct mac_info *mac_control;
1da177e4 6999 struct config_param *config;
541ae68f 7000 int mode;
cc6e7c44 7001 u8 dev_intr_type = intr_type;
1da177e4 7002
9dc737a7
AR
7003 if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
7004 return ret;
1da177e4
LT
7005
7006 if ((ret = pci_enable_device(pdev))) {
7007 DBG_PRINT(ERR_DBG,
7008 "s2io_init_nic: pci_enable_device failed\n");
7009 return ret;
7010 }
7011
1e7f0bd8 7012 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1da177e4
LT
7013 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7014 dma_flag = TRUE;
1da177e4 7015 if (pci_set_consistent_dma_mask
1e7f0bd8 7016 (pdev, DMA_64BIT_MASK)) {
1da177e4
LT
7017 DBG_PRINT(ERR_DBG,
7018 "Unable to obtain 64bit DMA for \
7019 consistent allocations\n");
7020 pci_disable_device(pdev);
7021 return -ENOMEM;
7022 }
1e7f0bd8 7023 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
1da177e4
LT
7024 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7025 } else {
7026 pci_disable_device(pdev);
7027 return -ENOMEM;
7028 }
cc6e7c44
RA
7029 if (dev_intr_type != MSI_X) {
7030 if (pci_request_regions(pdev, s2io_driver_name)) {
b41477f3
AR
7031 DBG_PRINT(ERR_DBG, "Request Regions failed\n");
7032 pci_disable_device(pdev);
cc6e7c44
RA
7033 return -ENODEV;
7034 }
7035 }
7036 else {
7037 if (!(request_mem_region(pci_resource_start(pdev, 0),
7038 pci_resource_len(pdev, 0), s2io_driver_name))) {
7039 DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
7040 pci_disable_device(pdev);
7041 return -ENODEV;
7042 }
7043 if (!(request_mem_region(pci_resource_start(pdev, 2),
7044 pci_resource_len(pdev, 2), s2io_driver_name))) {
7045 DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
7046 release_mem_region(pci_resource_start(pdev, 0),
7047 pci_resource_len(pdev, 0));
7048 pci_disable_device(pdev);
7049 return -ENODEV;
7050 }
1da177e4
LT
7051 }
7052
1ee6dd77 7053 dev = alloc_etherdev(sizeof(struct s2io_nic));
1da177e4
LT
7054 if (dev == NULL) {
7055 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7056 pci_disable_device(pdev);
7057 pci_release_regions(pdev);
7058 return -ENODEV;
7059 }
7060
7061 pci_set_master(pdev);
7062 pci_set_drvdata(pdev, dev);
7063 SET_MODULE_OWNER(dev);
7064 SET_NETDEV_DEV(dev, &pdev->dev);
7065
7066 /* Private member variable initialized to s2io NIC structure */
7067 sp = dev->priv;
1ee6dd77 7068 memset(sp, 0, sizeof(struct s2io_nic));
1da177e4
LT
7069 sp->dev = dev;
7070 sp->pdev = pdev;
1da177e4 7071 sp->high_dma_flag = dma_flag;
1da177e4 7072 sp->device_enabled_once = FALSE;
da6971d8
AR
7073 if (rx_ring_mode == 1)
7074 sp->rxd_mode = RXD_MODE_1;
7075 if (rx_ring_mode == 2)
7076 sp->rxd_mode = RXD_MODE_3B;
7077 if (rx_ring_mode == 3)
7078 sp->rxd_mode = RXD_MODE_3A;
7079
cc6e7c44 7080 sp->intr_type = dev_intr_type;
1da177e4 7081
541ae68f
K
7082 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7083 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7084 sp->device_type = XFRAME_II_DEVICE;
7085 else
7086 sp->device_type = XFRAME_I_DEVICE;
7087
7d3d0439 7088 sp->lro = lro;
6aa20a22 7089
1da177e4
LT
7090 /* Initialize some PCI/PCI-X fields of the NIC. */
7091 s2io_init_pci(sp);
7092
20346722 7093 /*
1da177e4 7094 * Setting the device configuration parameters.
20346722
K
7095 * Most of these parameters can be specified by the user during
7096 * module insertion as they are module loadable parameters. If
7097 * these parameters are not not specified during load time, they
1da177e4
LT
7098 * are initialized with default values.
7099 */
7100 mac_control = &sp->mac_control;
7101 config = &sp->config;
7102
7103 /* Tx side parameters. */
1da177e4
LT
7104 config->tx_fifo_num = tx_fifo_num;
7105 for (i = 0; i < MAX_TX_FIFOS; i++) {
7106 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7107 config->tx_cfg[i].fifo_priority = i;
7108 }
7109
20346722
K
7110 /* mapping the QoS priority to the configured fifos */
7111 for (i = 0; i < MAX_TX_FIFOS; i++)
7112 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7113
1da177e4
LT
7114 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7115 for (i = 0; i < config->tx_fifo_num; i++) {
7116 config->tx_cfg[i].f_no_snoop =
7117 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7118 if (config->tx_cfg[i].fifo_len < 65) {
7119 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7120 break;
7121 }
7122 }
fed5eccd
AR
7123 /* + 2 because one Txd for skb->data and one Txd for UFO */
7124 config->max_txds = MAX_SKB_FRAGS + 2;
1da177e4
LT
7125
7126 /* Rx side parameters. */
1da177e4
LT
7127 config->rx_ring_num = rx_ring_num;
7128 for (i = 0; i < MAX_RX_RINGS; i++) {
7129 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
da6971d8 7130 (rxd_count[sp->rxd_mode] + 1);
1da177e4
LT
7131 config->rx_cfg[i].ring_priority = i;
7132 }
7133
7134 for (i = 0; i < rx_ring_num; i++) {
7135 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7136 config->rx_cfg[i].f_no_snoop =
7137 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7138 }
7139
7140 /* Setting Mac Control parameters */
7141 mac_control->rmac_pause_time = rmac_pause_time;
7142 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7143 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7144
7145
7146 /* Initialize Ring buffer parameters. */
7147 for (i = 0; i < config->rx_ring_num; i++)
7148 atomic_set(&sp->rx_bufs_left[i], 0);
7149
7ba013ac
K
7150 /* Initialize the number of ISRs currently running */
7151 atomic_set(&sp->isr_cnt, 0);
7152
1da177e4
LT
7153 /* initialize the shared memory used by the NIC and the host */
7154 if (init_shared_mem(sp)) {
7155 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
b41477f3 7156 dev->name);
1da177e4
LT
7157 ret = -ENOMEM;
7158 goto mem_alloc_failed;
7159 }
7160
7161 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7162 pci_resource_len(pdev, 0));
7163 if (!sp->bar0) {
19a60522 7164 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
1da177e4
LT
7165 dev->name);
7166 ret = -ENOMEM;
7167 goto bar0_remap_failed;
7168 }
7169
7170 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7171 pci_resource_len(pdev, 2));
7172 if (!sp->bar1) {
19a60522 7173 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
1da177e4
LT
7174 dev->name);
7175 ret = -ENOMEM;
7176 goto bar1_remap_failed;
7177 }
7178
7179 dev->irq = pdev->irq;
7180 dev->base_addr = (unsigned long) sp->bar0;
7181
7182 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7183 for (j = 0; j < MAX_TX_FIFOS; j++) {
1ee6dd77 7184 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
1da177e4
LT
7185 (sp->bar1 + (j * 0x00020000));
7186 }
7187
7188 /* Driver entry points */
7189 dev->open = &s2io_open;
7190 dev->stop = &s2io_close;
7191 dev->hard_start_xmit = &s2io_xmit;
7192 dev->get_stats = &s2io_get_stats;
7193 dev->set_multicast_list = &s2io_set_multicast;
7194 dev->do_ioctl = &s2io_ioctl;
7195 dev->change_mtu = &s2io_change_mtu;
7196 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
be3a6b02
K
7197 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7198 dev->vlan_rx_register = s2io_vlan_rx_register;
7199 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
20346722 7200
1da177e4
LT
7201 /*
7202 * will use eth_mac_addr() for dev->set_mac_address
7203 * mac address will be set every time dev->open() is called
7204 */
1da177e4 7205 dev->poll = s2io_poll;
20346722 7206 dev->weight = 32;
1da177e4 7207
612eff0e
BH
7208#ifdef CONFIG_NET_POLL_CONTROLLER
7209 dev->poll_controller = s2io_netpoll;
7210#endif
7211
1da177e4
LT
7212 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7213 if (sp->high_dma_flag == TRUE)
7214 dev->features |= NETIF_F_HIGHDMA;
1da177e4 7215 dev->features |= NETIF_F_TSO;
f83ef8c0 7216 dev->features |= NETIF_F_TSO6;
db874e65 7217 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
fed5eccd
AR
7218 dev->features |= NETIF_F_UFO;
7219 dev->features |= NETIF_F_HW_CSUM;
7220 }
1da177e4
LT
7221
7222 dev->tx_timeout = &s2io_tx_watchdog;
7223 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
c4028958
DH
7224 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7225 INIT_WORK(&sp->set_link_task, s2io_set_link);
1da177e4 7226
e960fc5c 7227 pci_save_state(sp->pdev);
1da177e4
LT
7228
7229 /* Setting swapper control on the NIC, for proper reset operation */
7230 if (s2io_set_swapper(sp)) {
7231 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7232 dev->name);
7233 ret = -EAGAIN;
7234 goto set_swap_failed;
7235 }
7236
541ae68f
K
7237 /* Verify if the Herc works on the slot its placed into */
7238 if (sp->device_type & XFRAME_II_DEVICE) {
7239 mode = s2io_verify_pci_mode(sp);
7240 if (mode < 0) {
7241 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7242 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7243 ret = -EBADSLT;
7244 goto set_swap_failed;
7245 }
7246 }
7247
7248 /* Not needed for Herc */
7249 if (sp->device_type & XFRAME_I_DEVICE) {
7250 /*
7251 * Fix for all "FFs" MAC address problems observed on
7252 * Alpha platforms
7253 */
7254 fix_mac_address(sp);
7255 s2io_reset(sp);
7256 }
1da177e4
LT
7257
7258 /*
1da177e4
LT
7259 * MAC address initialization.
7260 * For now only one mac address will be read and used.
7261 */
7262 bar0 = sp->bar0;
7263 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7264 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7265 writeq(val64, &bar0->rmac_addr_cmd_mem);
c92ca04b 7266 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41 7267 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
1da177e4
LT
7268 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7269 mac_down = (u32) tmp64;
7270 mac_up = (u32) (tmp64 >> 32);
7271
1da177e4
LT
7272 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7273 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7274 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7275 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7276 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7277 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7278
1da177e4
LT
7279 /* Set the factory defined MAC address initially */
7280 dev->addr_len = ETH_ALEN;
7281 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7282
b41477f3
AR
7283 /* reset Nic and bring it to known state */
7284 s2io_reset(sp);
7285
1da177e4 7286 /*
20346722 7287 * Initialize the tasklet status and link state flags
541ae68f 7288 * and the card state parameter
1da177e4
LT
7289 */
7290 atomic_set(&(sp->card_state), 0);
7291 sp->tasklet_status = 0;
7292 sp->link_state = 0;
7293
1da177e4
LT
7294 /* Initialize spinlocks */
7295 spin_lock_init(&sp->tx_lock);
db874e65
SS
7296
7297 if (!napi)
7298 spin_lock_init(&sp->put_lock);
7ba013ac 7299 spin_lock_init(&sp->rx_lock);
1da177e4 7300
20346722
K
7301 /*
7302 * SXE-002: Configure link and activity LED to init state
7303 * on driver load.
1da177e4
LT
7304 */
7305 subid = sp->pdev->subsystem_device;
7306 if ((subid & 0xFF) >= 0x07) {
7307 val64 = readq(&bar0->gpio_control);
7308 val64 |= 0x0000800000000000ULL;
7309 writeq(val64, &bar0->gpio_control);
7310 val64 = 0x0411040400000000ULL;
7311 writeq(val64, (void __iomem *) bar0 + 0x2700);
7312 val64 = readq(&bar0->gpio_control);
7313 }
7314
7315 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
7316
7317 if (register_netdev(dev)) {
7318 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7319 ret = -ENODEV;
7320 goto register_failed;
7321 }
9dc737a7 7322 s2io_vpd_read(sp);
0c61ed5f 7323 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
b41477f3
AR
7324 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7325 sp->product_name, get_xena_rev_id(sp->pdev));
7326 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7327 s2io_driver_version);
9dc737a7 7328 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
19a60522 7329 "%02x:%02x:%02x:%02x:%02x:%02x", dev->name,
541ae68f
K
7330 sp->def_mac_addr[0].mac_addr[0],
7331 sp->def_mac_addr[0].mac_addr[1],
7332 sp->def_mac_addr[0].mac_addr[2],
7333 sp->def_mac_addr[0].mac_addr[3],
7334 sp->def_mac_addr[0].mac_addr[4],
7335 sp->def_mac_addr[0].mac_addr[5]);
19a60522 7336 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
9dc737a7 7337 if (sp->device_type & XFRAME_II_DEVICE) {
0b1f7ebe 7338 mode = s2io_print_pci_mode(sp);
541ae68f 7339 if (mode < 0) {
9dc737a7 7340 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
541ae68f 7341 ret = -EBADSLT;
9dc737a7 7342 unregister_netdev(dev);
541ae68f
K
7343 goto set_swap_failed;
7344 }
541ae68f 7345 }
9dc737a7
AR
7346 switch(sp->rxd_mode) {
7347 case RXD_MODE_1:
7348 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7349 dev->name);
7350 break;
7351 case RXD_MODE_3B:
7352 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7353 dev->name);
7354 break;
7355 case RXD_MODE_3A:
7356 DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n",
7357 dev->name);
7358 break;
7359 }
db874e65
SS
7360
7361 if (napi)
7362 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
9dc737a7
AR
7363 switch(sp->intr_type) {
7364 case INTA:
7365 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7366 break;
7367 case MSI:
7368 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name);
7369 break;
7370 case MSI_X:
7371 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7372 break;
7373 }
7d3d0439
RA
7374 if (sp->lro)
7375 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
9dc737a7 7376 dev->name);
db874e65
SS
7377 if (ufo)
7378 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7379 " enabled\n", dev->name);
7ba013ac 7380 /* Initialize device name */
9dc737a7 7381 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7ba013ac 7382
b6e3f982
K
7383 /* Initialize bimodal Interrupts */
7384 sp->config.bimodal = bimodal;
7385 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7386 sp->config.bimodal = 0;
7387 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7388 dev->name);
7389 }
7390
20346722
K
7391 /*
7392 * Make Link state as off at this point, when the Link change
7393 * interrupt comes the state will be automatically changed to
1da177e4
LT
7394 * the right state.
7395 */
7396 netif_carrier_off(dev);
1da177e4
LT
7397
7398 return 0;
7399
7400 register_failed:
7401 set_swap_failed:
7402 iounmap(sp->bar1);
7403 bar1_remap_failed:
7404 iounmap(sp->bar0);
7405 bar0_remap_failed:
7406 mem_alloc_failed:
7407 free_shared_mem(sp);
7408 pci_disable_device(pdev);
cc6e7c44
RA
7409 if (dev_intr_type != MSI_X)
7410 pci_release_regions(pdev);
7411 else {
7412 release_mem_region(pci_resource_start(pdev, 0),
7413 pci_resource_len(pdev, 0));
7414 release_mem_region(pci_resource_start(pdev, 2),
7415 pci_resource_len(pdev, 2));
7416 }
1da177e4
LT
7417 pci_set_drvdata(pdev, NULL);
7418 free_netdev(dev);
7419
7420 return ret;
7421}
7422
7423/**
20346722 7424 * s2io_rem_nic - Free the PCI device
1da177e4 7425 * @pdev: structure containing the PCI related information of the device.
20346722 7426 * Description: This function is called by the Pci subsystem to release a
1da177e4 7427 * PCI device and free up all resource held up by the device. This could
20346722 7428 * be in response to a Hot plug event or when the driver is to be removed
1da177e4
LT
7429 * from memory.
7430 */
7431
7432static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7433{
7434 struct net_device *dev =
7435 (struct net_device *) pci_get_drvdata(pdev);
1ee6dd77 7436 struct s2io_nic *sp;
1da177e4
LT
7437
7438 if (dev == NULL) {
7439 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7440 return;
7441 }
7442
22747d6b
FR
7443 flush_scheduled_work();
7444
1da177e4
LT
7445 sp = dev->priv;
7446 unregister_netdev(dev);
7447
7448 free_shared_mem(sp);
7449 iounmap(sp->bar0);
7450 iounmap(sp->bar1);
cc6e7c44
RA
7451 if (sp->intr_type != MSI_X)
7452 pci_release_regions(pdev);
7453 else {
7454 release_mem_region(pci_resource_start(pdev, 0),
7455 pci_resource_len(pdev, 0));
7456 release_mem_region(pci_resource_start(pdev, 2),
7457 pci_resource_len(pdev, 2));
7458 }
1da177e4 7459 pci_set_drvdata(pdev, NULL);
1da177e4 7460 free_netdev(dev);
19a60522 7461 pci_disable_device(pdev);
1da177e4
LT
7462}
7463
7464/**
7465 * s2io_starter - Entry point for the driver
7466 * Description: This function is the entry point for the driver. It verifies
7467 * the module loadable parameters and initializes PCI configuration space.
7468 */
7469
7470int __init s2io_starter(void)
7471{
29917620 7472 return pci_register_driver(&s2io_driver);
1da177e4
LT
7473}
7474
7475/**
20346722 7476 * s2io_closer - Cleanup routine for the driver
1da177e4
LT
7477 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7478 */
7479
372cc597 7480static __exit void s2io_closer(void)
1da177e4
LT
7481{
7482 pci_unregister_driver(&s2io_driver);
7483 DBG_PRINT(INIT_DBG, "cleanup done\n");
7484}
7485
7486module_init(s2io_starter);
7487module_exit(s2io_closer);
7d3d0439 7488
6aa20a22 7489static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
1ee6dd77 7490 struct tcphdr **tcp, struct RxD_t *rxdp)
7d3d0439
RA
7491{
7492 int ip_off;
7493 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7494
7495 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7496 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7497 __FUNCTION__);
7498 return -1;
7499 }
7500
7501 /* TODO:
7502 * By default the VLAN field in the MAC is stripped by the card, if this
7503 * feature is turned off in rx_pa_cfg register, then the ip_off field
7504 * has to be shifted by a further 2 bytes
7505 */
7506 switch (l2_type) {
7507 case 0: /* DIX type */
7508 case 4: /* DIX type with VLAN */
7509 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7510 break;
7511 /* LLC, SNAP etc are considered non-mergeable */
7512 default:
7513 return -1;
7514 }
7515
7516 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7517 ip_len = (u8)((*ip)->ihl);
7518 ip_len <<= 2;
7519 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7520
7521 return 0;
7522}
7523
1ee6dd77 7524static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7d3d0439
RA
7525 struct tcphdr *tcp)
7526{
7527 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7528 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7529 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7530 return -1;
7531 return 0;
7532}
7533
7534static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7535{
7536 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7537}
7538
1ee6dd77 7539static void initiate_new_session(struct lro *lro, u8 *l2h,
7d3d0439
RA
7540 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7541{
7542 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7543 lro->l2h = l2h;
7544 lro->iph = ip;
7545 lro->tcph = tcp;
7546 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7547 lro->tcp_ack = ntohl(tcp->ack_seq);
7548 lro->sg_num = 1;
7549 lro->total_len = ntohs(ip->tot_len);
7550 lro->frags_len = 0;
6aa20a22 7551 /*
7d3d0439
RA
7552 * check if we saw TCP timestamp. Other consistency checks have
7553 * already been done.
7554 */
7555 if (tcp->doff == 8) {
7556 u32 *ptr;
7557 ptr = (u32 *)(tcp+1);
7558 lro->saw_ts = 1;
7559 lro->cur_tsval = *(ptr+1);
7560 lro->cur_tsecr = *(ptr+2);
7561 }
7562 lro->in_use = 1;
7563}
7564
1ee6dd77 7565static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7d3d0439
RA
7566{
7567 struct iphdr *ip = lro->iph;
7568 struct tcphdr *tcp = lro->tcph;
bd4f3ae1 7569 __sum16 nchk;
1ee6dd77 7570 struct stat_block *statinfo = sp->mac_control.stats_info;
7d3d0439
RA
7571 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7572
7573 /* Update L3 header */
7574 ip->tot_len = htons(lro->total_len);
7575 ip->check = 0;
7576 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7577 ip->check = nchk;
7578
7579 /* Update L4 header */
7580 tcp->ack_seq = lro->tcp_ack;
7581 tcp->window = lro->window;
7582
7583 /* Update tsecr field if this session has timestamps enabled */
7584 if (lro->saw_ts) {
7585 u32 *ptr = (u32 *)(tcp + 1);
7586 *(ptr+2) = lro->cur_tsecr;
7587 }
7588
7589 /* Update counters required for calculation of
7590 * average no. of packets aggregated.
7591 */
7592 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7593 statinfo->sw_stat.num_aggregations++;
7594}
7595
1ee6dd77 7596static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7d3d0439
RA
7597 struct tcphdr *tcp, u32 l4_pyld)
7598{
7599 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7600 lro->total_len += l4_pyld;
7601 lro->frags_len += l4_pyld;
7602 lro->tcp_next_seq += l4_pyld;
7603 lro->sg_num++;
7604
7605 /* Update ack seq no. and window ad(from this pkt) in LRO object */
7606 lro->tcp_ack = tcp->ack_seq;
7607 lro->window = tcp->window;
6aa20a22 7608
7d3d0439
RA
7609 if (lro->saw_ts) {
7610 u32 *ptr;
7611 /* Update tsecr and tsval from this packet */
7612 ptr = (u32 *) (tcp + 1);
6aa20a22 7613 lro->cur_tsval = *(ptr + 1);
7d3d0439
RA
7614 lro->cur_tsecr = *(ptr + 2);
7615 }
7616}
7617
1ee6dd77 7618static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7d3d0439
RA
7619 struct tcphdr *tcp, u32 tcp_pyld_len)
7620{
7d3d0439
RA
7621 u8 *ptr;
7622
79dc1901
AM
7623 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7624
7d3d0439
RA
7625 if (!tcp_pyld_len) {
7626 /* Runt frame or a pure ack */
7627 return -1;
7628 }
7629
7630 if (ip->ihl != 5) /* IP has options */
7631 return -1;
7632
75c30b13
AR
7633 /* If we see CE codepoint in IP header, packet is not mergeable */
7634 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7635 return -1;
7636
7637 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7d3d0439 7638 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
75c30b13 7639 tcp->ece || tcp->cwr || !tcp->ack) {
7d3d0439
RA
7640 /*
7641 * Currently recognize only the ack control word and
7642 * any other control field being set would result in
7643 * flushing the LRO session
7644 */
7645 return -1;
7646 }
7647
6aa20a22 7648 /*
7d3d0439
RA
7649 * Allow only one TCP timestamp option. Don't aggregate if
7650 * any other options are detected.
7651 */
7652 if (tcp->doff != 5 && tcp->doff != 8)
7653 return -1;
7654
7655 if (tcp->doff == 8) {
6aa20a22 7656 ptr = (u8 *)(tcp + 1);
7d3d0439
RA
7657 while (*ptr == TCPOPT_NOP)
7658 ptr++;
7659 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7660 return -1;
7661
7662 /* Ensure timestamp value increases monotonically */
7663 if (l_lro)
7664 if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7665 return -1;
7666
7667 /* timestamp echo reply should be non-zero */
6aa20a22 7668 if (*((u32 *)(ptr+6)) == 0)
7d3d0439
RA
7669 return -1;
7670 }
7671
7672 return 0;
7673}
7674
7675static int
1ee6dd77
RB
7676s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
7677 struct RxD_t *rxdp, struct s2io_nic *sp)
7d3d0439
RA
7678{
7679 struct iphdr *ip;
7680 struct tcphdr *tcph;
7681 int ret = 0, i;
7682
7683 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
7684 rxdp))) {
7685 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
7686 ip->saddr, ip->daddr);
7687 } else {
7688 return ret;
7689 }
7690
7691 tcph = (struct tcphdr *)*tcp;
7692 *tcp_len = get_l4_pyld_length(ip, tcph);
7693 for (i=0; i<MAX_LRO_SESSIONS; i++) {
1ee6dd77 7694 struct lro *l_lro = &sp->lro0_n[i];
7d3d0439
RA
7695 if (l_lro->in_use) {
7696 if (check_for_socket_match(l_lro, ip, tcph))
7697 continue;
7698 /* Sock pair matched */
7699 *lro = l_lro;
7700
7701 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
7702 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
7703 "0x%x, actual 0x%x\n", __FUNCTION__,
7704 (*lro)->tcp_next_seq,
7705 ntohl(tcph->seq));
7706
7707 sp->mac_control.stats_info->
7708 sw_stat.outof_sequence_pkts++;
7709 ret = 2;
7710 break;
7711 }
7712
7713 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
7714 ret = 1; /* Aggregate */
7715 else
7716 ret = 2; /* Flush both */
7717 break;
7718 }
7719 }
7720
7721 if (ret == 0) {
7722 /* Before searching for available LRO objects,
7723 * check if the pkt is L3/L4 aggregatable. If not
7724 * don't create new LRO session. Just send this
7725 * packet up.
7726 */
7727 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
7728 return 5;
7729 }
7730
7731 for (i=0; i<MAX_LRO_SESSIONS; i++) {
1ee6dd77 7732 struct lro *l_lro = &sp->lro0_n[i];
7d3d0439
RA
7733 if (!(l_lro->in_use)) {
7734 *lro = l_lro;
7735 ret = 3; /* Begin anew */
7736 break;
7737 }
7738 }
7739 }
7740
7741 if (ret == 0) { /* sessions exceeded */
7742 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
7743 __FUNCTION__);
7744 *lro = NULL;
7745 return ret;
7746 }
7747
7748 switch (ret) {
7749 case 3:
7750 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
7751 break;
7752 case 2:
7753 update_L3L4_header(sp, *lro);
7754 break;
7755 case 1:
7756 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
7757 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7758 update_L3L4_header(sp, *lro);
7759 ret = 4; /* Flush the LRO */
7760 }
7761 break;
7762 default:
7763 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
7764 __FUNCTION__);
7765 break;
7766 }
7767
7768 return ret;
7769}
7770
1ee6dd77 7771static void clear_lro_session(struct lro *lro)
7d3d0439 7772{
1ee6dd77 7773 static u16 lro_struct_size = sizeof(struct lro);
7d3d0439
RA
7774
7775 memset(lro, 0, lro_struct_size);
7776}
7777
7778static void queue_rx_frame(struct sk_buff *skb)
7779{
7780 struct net_device *dev = skb->dev;
7781
7782 skb->protocol = eth_type_trans(skb, dev);
db874e65
SS
7783 if (napi)
7784 netif_receive_skb(skb);
7785 else
7786 netif_rx(skb);
7d3d0439
RA
7787}
7788
1ee6dd77
RB
7789static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
7790 struct sk_buff *skb,
7d3d0439
RA
7791 u32 tcp_len)
7792{
75c30b13 7793 struct sk_buff *first = lro->parent;
7d3d0439
RA
7794
7795 first->len += tcp_len;
7796 first->data_len = lro->frags_len;
7797 skb_pull(skb, (skb->len - tcp_len));
75c30b13
AR
7798 if (skb_shinfo(first)->frag_list)
7799 lro->last_frag->next = skb;
7d3d0439
RA
7800 else
7801 skb_shinfo(first)->frag_list = skb;
372cc597 7802 first->truesize += skb->truesize;
75c30b13 7803 lro->last_frag = skb;
7d3d0439
RA
7804 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
7805 return;
7806}