]> bbs.cooldavid.org Git - net-next-2.6.git/blame_incremental - drivers/net/s2io.c
[S2IO]: Handle and monitor all of the device errors and alarms
[net-next-2.6.git] / drivers / net / s2io.c
... / ...
CommitLineData
1/************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2007 Neterion Inc.
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
26 *
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 *
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 2(MSI_X). Default value is '2(MSI_X)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
53 ************************************************************************/
54
55#include <linux/module.h>
56#include <linux/types.h>
57#include <linux/errno.h>
58#include <linux/ioport.h>
59#include <linux/pci.h>
60#include <linux/dma-mapping.h>
61#include <linux/kernel.h>
62#include <linux/netdevice.h>
63#include <linux/etherdevice.h>
64#include <linux/skbuff.h>
65#include <linux/init.h>
66#include <linux/delay.h>
67#include <linux/stddef.h>
68#include <linux/ioctl.h>
69#include <linux/timex.h>
70#include <linux/ethtool.h>
71#include <linux/workqueue.h>
72#include <linux/if_vlan.h>
73#include <linux/ip.h>
74#include <linux/tcp.h>
75#include <net/tcp.h>
76
77#include <asm/system.h>
78#include <asm/uaccess.h>
79#include <asm/io.h>
80#include <asm/div64.h>
81#include <asm/irq.h>
82
83/* local include */
84#include "s2io.h"
85#include "s2io-regs.h"
86
87#define DRV_VERSION "2.0.26.1"
88
89/* S2io Driver name & version. */
90static char s2io_driver_name[] = "Neterion";
91static char s2io_driver_version[] = DRV_VERSION;
92
93static int rxd_size[2] = {32,48};
94static int rxd_count[2] = {127,85};
95
96static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
97{
98 int ret;
99
100 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
102
103 return ret;
104}
105
106/*
107 * Cards with following subsystem_id have a link state indication
108 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109 * macro below identifies these cards given the subsystem_id.
110 */
111#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112 (dev_type == XFRAME_I_DEVICE) ? \
113 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
115
116#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
119#define PANIC 1
120#define LOW 2
121static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
122{
123 struct mac_info *mac_control;
124
125 mac_control = &sp->mac_control;
126 if (rxb_size <= rxd_count[sp->rxd_mode])
127 return PANIC;
128 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
129 return LOW;
130 return 0;
131}
132
133/* Ethtool related variables and Macros. */
134static char s2io_gstrings[][ETH_GSTRING_LEN] = {
135 "Register test\t(offline)",
136 "Eeprom test\t(offline)",
137 "Link test\t(online)",
138 "RLDRAM test\t(offline)",
139 "BIST Test\t(offline)"
140};
141
142static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
143 {"tmac_frms"},
144 {"tmac_data_octets"},
145 {"tmac_drop_frms"},
146 {"tmac_mcst_frms"},
147 {"tmac_bcst_frms"},
148 {"tmac_pause_ctrl_frms"},
149 {"tmac_ttl_octets"},
150 {"tmac_ucst_frms"},
151 {"tmac_nucst_frms"},
152 {"tmac_any_err_frms"},
153 {"tmac_ttl_less_fb_octets"},
154 {"tmac_vld_ip_octets"},
155 {"tmac_vld_ip"},
156 {"tmac_drop_ip"},
157 {"tmac_icmp"},
158 {"tmac_rst_tcp"},
159 {"tmac_tcp"},
160 {"tmac_udp"},
161 {"rmac_vld_frms"},
162 {"rmac_data_octets"},
163 {"rmac_fcs_err_frms"},
164 {"rmac_drop_frms"},
165 {"rmac_vld_mcst_frms"},
166 {"rmac_vld_bcst_frms"},
167 {"rmac_in_rng_len_err_frms"},
168 {"rmac_out_rng_len_err_frms"},
169 {"rmac_long_frms"},
170 {"rmac_pause_ctrl_frms"},
171 {"rmac_unsup_ctrl_frms"},
172 {"rmac_ttl_octets"},
173 {"rmac_accepted_ucst_frms"},
174 {"rmac_accepted_nucst_frms"},
175 {"rmac_discarded_frms"},
176 {"rmac_drop_events"},
177 {"rmac_ttl_less_fb_octets"},
178 {"rmac_ttl_frms"},
179 {"rmac_usized_frms"},
180 {"rmac_osized_frms"},
181 {"rmac_frag_frms"},
182 {"rmac_jabber_frms"},
183 {"rmac_ttl_64_frms"},
184 {"rmac_ttl_65_127_frms"},
185 {"rmac_ttl_128_255_frms"},
186 {"rmac_ttl_256_511_frms"},
187 {"rmac_ttl_512_1023_frms"},
188 {"rmac_ttl_1024_1518_frms"},
189 {"rmac_ip"},
190 {"rmac_ip_octets"},
191 {"rmac_hdr_err_ip"},
192 {"rmac_drop_ip"},
193 {"rmac_icmp"},
194 {"rmac_tcp"},
195 {"rmac_udp"},
196 {"rmac_err_drp_udp"},
197 {"rmac_xgmii_err_sym"},
198 {"rmac_frms_q0"},
199 {"rmac_frms_q1"},
200 {"rmac_frms_q2"},
201 {"rmac_frms_q3"},
202 {"rmac_frms_q4"},
203 {"rmac_frms_q5"},
204 {"rmac_frms_q6"},
205 {"rmac_frms_q7"},
206 {"rmac_full_q0"},
207 {"rmac_full_q1"},
208 {"rmac_full_q2"},
209 {"rmac_full_q3"},
210 {"rmac_full_q4"},
211 {"rmac_full_q5"},
212 {"rmac_full_q6"},
213 {"rmac_full_q7"},
214 {"rmac_pause_cnt"},
215 {"rmac_xgmii_data_err_cnt"},
216 {"rmac_xgmii_ctrl_err_cnt"},
217 {"rmac_accepted_ip"},
218 {"rmac_err_tcp"},
219 {"rd_req_cnt"},
220 {"new_rd_req_cnt"},
221 {"new_rd_req_rtry_cnt"},
222 {"rd_rtry_cnt"},
223 {"wr_rtry_rd_ack_cnt"},
224 {"wr_req_cnt"},
225 {"new_wr_req_cnt"},
226 {"new_wr_req_rtry_cnt"},
227 {"wr_rtry_cnt"},
228 {"wr_disc_cnt"},
229 {"rd_rtry_wr_ack_cnt"},
230 {"txp_wr_cnt"},
231 {"txd_rd_cnt"},
232 {"txd_wr_cnt"},
233 {"rxd_rd_cnt"},
234 {"rxd_wr_cnt"},
235 {"txf_rd_cnt"},
236 {"rxf_wr_cnt"}
237};
238
239static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
240 {"rmac_ttl_1519_4095_frms"},
241 {"rmac_ttl_4096_8191_frms"},
242 {"rmac_ttl_8192_max_frms"},
243 {"rmac_ttl_gt_max_frms"},
244 {"rmac_osized_alt_frms"},
245 {"rmac_jabber_alt_frms"},
246 {"rmac_gt_max_alt_frms"},
247 {"rmac_vlan_frms"},
248 {"rmac_len_discard"},
249 {"rmac_fcs_discard"},
250 {"rmac_pf_discard"},
251 {"rmac_da_discard"},
252 {"rmac_red_discard"},
253 {"rmac_rts_discard"},
254 {"rmac_ingm_full_discard"},
255 {"link_fault_cnt"}
256};
257
258static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
259 {"\n DRIVER STATISTICS"},
260 {"single_bit_ecc_errs"},
261 {"double_bit_ecc_errs"},
262 {"parity_err_cnt"},
263 {"serious_err_cnt"},
264 {"soft_reset_cnt"},
265 {"fifo_full_cnt"},
266 {"ring_0_full_cnt"},
267 {"ring_1_full_cnt"},
268 {"ring_2_full_cnt"},
269 {"ring_3_full_cnt"},
270 {"ring_4_full_cnt"},
271 {"ring_5_full_cnt"},
272 {"ring_6_full_cnt"},
273 {"ring_7_full_cnt"},
274 ("alarm_transceiver_temp_high"),
275 ("alarm_transceiver_temp_low"),
276 ("alarm_laser_bias_current_high"),
277 ("alarm_laser_bias_current_low"),
278 ("alarm_laser_output_power_high"),
279 ("alarm_laser_output_power_low"),
280 ("warn_transceiver_temp_high"),
281 ("warn_transceiver_temp_low"),
282 ("warn_laser_bias_current_high"),
283 ("warn_laser_bias_current_low"),
284 ("warn_laser_output_power_high"),
285 ("warn_laser_output_power_low"),
286 ("lro_aggregated_pkts"),
287 ("lro_flush_both_count"),
288 ("lro_out_of_sequence_pkts"),
289 ("lro_flush_due_to_max_pkts"),
290 ("lro_avg_aggr_pkts"),
291 ("mem_alloc_fail_cnt"),
292 ("pci_map_fail_cnt"),
293 ("watchdog_timer_cnt"),
294 ("mem_allocated"),
295 ("mem_freed"),
296 ("link_up_cnt"),
297 ("link_down_cnt"),
298 ("link_up_time"),
299 ("link_down_time"),
300 ("tx_tcode_buf_abort_cnt"),
301 ("tx_tcode_desc_abort_cnt"),
302 ("tx_tcode_parity_err_cnt"),
303 ("tx_tcode_link_loss_cnt"),
304 ("tx_tcode_list_proc_err_cnt"),
305 ("rx_tcode_parity_err_cnt"),
306 ("rx_tcode_abort_cnt"),
307 ("rx_tcode_parity_abort_cnt"),
308 ("rx_tcode_rda_fail_cnt"),
309 ("rx_tcode_unkn_prot_cnt"),
310 ("rx_tcode_fcs_err_cnt"),
311 ("rx_tcode_buf_size_err_cnt"),
312 ("rx_tcode_rxd_corrupt_cnt"),
313 ("rx_tcode_unkn_err_cnt"),
314 {"tda_err_cnt"},
315 {"pfc_err_cnt"},
316 {"pcc_err_cnt"},
317 {"tti_err_cnt"},
318 {"tpa_err_cnt"},
319 {"sm_err_cnt"},
320 {"lso_err_cnt"},
321 {"mac_tmac_err_cnt"},
322 {"mac_rmac_err_cnt"},
323 {"xgxs_txgxs_err_cnt"},
324 {"xgxs_rxgxs_err_cnt"},
325 {"rc_err_cnt"},
326 {"prc_pcix_err_cnt"},
327 {"rpa_err_cnt"},
328 {"rda_err_cnt"},
329 {"rti_err_cnt"},
330 {"mc_err_cnt"}
331};
332
333#define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
334#define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
335 ETH_GSTRING_LEN
336#define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
337
338#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
339#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
340
341#define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
342#define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
343
344#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
345#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
346
347#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
348 init_timer(&timer); \
349 timer.function = handle; \
350 timer.data = (unsigned long) arg; \
351 mod_timer(&timer, (jiffies + exp)) \
352
353/* Add the vlan */
354static void s2io_vlan_rx_register(struct net_device *dev,
355 struct vlan_group *grp)
356{
357 struct s2io_nic *nic = dev->priv;
358 unsigned long flags;
359
360 spin_lock_irqsave(&nic->tx_lock, flags);
361 nic->vlgrp = grp;
362 spin_unlock_irqrestore(&nic->tx_lock, flags);
363}
364
365/* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
366static int vlan_strip_flag;
367
368/*
369 * Constants to be programmed into the Xena's registers, to configure
370 * the XAUI.
371 */
372
373#define END_SIGN 0x0
374static const u64 herc_act_dtx_cfg[] = {
375 /* Set address */
376 0x8000051536750000ULL, 0x80000515367500E0ULL,
377 /* Write data */
378 0x8000051536750004ULL, 0x80000515367500E4ULL,
379 /* Set address */
380 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
381 /* Write data */
382 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
383 /* Set address */
384 0x801205150D440000ULL, 0x801205150D4400E0ULL,
385 /* Write data */
386 0x801205150D440004ULL, 0x801205150D4400E4ULL,
387 /* Set address */
388 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
389 /* Write data */
390 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
391 /* Done */
392 END_SIGN
393};
394
395static const u64 xena_dtx_cfg[] = {
396 /* Set address */
397 0x8000051500000000ULL, 0x80000515000000E0ULL,
398 /* Write data */
399 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
400 /* Set address */
401 0x8001051500000000ULL, 0x80010515000000E0ULL,
402 /* Write data */
403 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
404 /* Set address */
405 0x8002051500000000ULL, 0x80020515000000E0ULL,
406 /* Write data */
407 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
408 END_SIGN
409};
410
411/*
412 * Constants for Fixing the MacAddress problem seen mostly on
413 * Alpha machines.
414 */
415static const u64 fix_mac[] = {
416 0x0060000000000000ULL, 0x0060600000000000ULL,
417 0x0040600000000000ULL, 0x0000600000000000ULL,
418 0x0020600000000000ULL, 0x0060600000000000ULL,
419 0x0020600000000000ULL, 0x0060600000000000ULL,
420 0x0020600000000000ULL, 0x0060600000000000ULL,
421 0x0020600000000000ULL, 0x0060600000000000ULL,
422 0x0020600000000000ULL, 0x0060600000000000ULL,
423 0x0020600000000000ULL, 0x0060600000000000ULL,
424 0x0020600000000000ULL, 0x0060600000000000ULL,
425 0x0020600000000000ULL, 0x0060600000000000ULL,
426 0x0020600000000000ULL, 0x0060600000000000ULL,
427 0x0020600000000000ULL, 0x0060600000000000ULL,
428 0x0020600000000000ULL, 0x0000600000000000ULL,
429 0x0040600000000000ULL, 0x0060600000000000ULL,
430 END_SIGN
431};
432
433MODULE_LICENSE("GPL");
434MODULE_VERSION(DRV_VERSION);
435
436
437/* Module Loadable parameters. */
438S2IO_PARM_INT(tx_fifo_num, 1);
439S2IO_PARM_INT(rx_ring_num, 1);
440
441
442S2IO_PARM_INT(rx_ring_mode, 1);
443S2IO_PARM_INT(use_continuous_tx_intrs, 1);
444S2IO_PARM_INT(rmac_pause_time, 0x100);
445S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
446S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
447S2IO_PARM_INT(shared_splits, 0);
448S2IO_PARM_INT(tmac_util_period, 5);
449S2IO_PARM_INT(rmac_util_period, 5);
450S2IO_PARM_INT(bimodal, 0);
451S2IO_PARM_INT(l3l4hdr_size, 128);
452/* Frequency of Rx desc syncs expressed as power of 2 */
453S2IO_PARM_INT(rxsync_frequency, 3);
454/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
455S2IO_PARM_INT(intr_type, 2);
456/* Large receive offload feature */
457S2IO_PARM_INT(lro, 0);
458/* Max pkts to be aggregated by LRO at one time. If not specified,
459 * aggregation happens until we hit max IP pkt size(64K)
460 */
461S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
462S2IO_PARM_INT(indicate_max_pkts, 0);
463
464S2IO_PARM_INT(napi, 1);
465S2IO_PARM_INT(ufo, 0);
466S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
467
468static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
469 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
470static unsigned int rx_ring_sz[MAX_RX_RINGS] =
471 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
472static unsigned int rts_frm_len[MAX_RX_RINGS] =
473 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
474
475module_param_array(tx_fifo_len, uint, NULL, 0);
476module_param_array(rx_ring_sz, uint, NULL, 0);
477module_param_array(rts_frm_len, uint, NULL, 0);
478
479/*
480 * S2IO device table.
481 * This table lists all the devices that this driver supports.
482 */
483static struct pci_device_id s2io_tbl[] __devinitdata = {
484 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
485 PCI_ANY_ID, PCI_ANY_ID},
486 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
487 PCI_ANY_ID, PCI_ANY_ID},
488 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
489 PCI_ANY_ID, PCI_ANY_ID},
490 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
491 PCI_ANY_ID, PCI_ANY_ID},
492 {0,}
493};
494
495MODULE_DEVICE_TABLE(pci, s2io_tbl);
496
497static struct pci_error_handlers s2io_err_handler = {
498 .error_detected = s2io_io_error_detected,
499 .slot_reset = s2io_io_slot_reset,
500 .resume = s2io_io_resume,
501};
502
503static struct pci_driver s2io_driver = {
504 .name = "S2IO",
505 .id_table = s2io_tbl,
506 .probe = s2io_init_nic,
507 .remove = __devexit_p(s2io_rem_nic),
508 .err_handler = &s2io_err_handler,
509};
510
511/* A simplifier macro used both by init and free shared_mem Fns(). */
512#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
513
514/**
515 * init_shared_mem - Allocation and Initialization of Memory
516 * @nic: Device private variable.
517 * Description: The function allocates all the memory areas shared
518 * between the NIC and the driver. This includes Tx descriptors,
519 * Rx descriptors and the statistics block.
520 */
521
522static int init_shared_mem(struct s2io_nic *nic)
523{
524 u32 size;
525 void *tmp_v_addr, *tmp_v_addr_next;
526 dma_addr_t tmp_p_addr, tmp_p_addr_next;
527 struct RxD_block *pre_rxd_blk = NULL;
528 int i, j, blk_cnt;
529 int lst_size, lst_per_page;
530 struct net_device *dev = nic->dev;
531 unsigned long tmp;
532 struct buffAdd *ba;
533
534 struct mac_info *mac_control;
535 struct config_param *config;
536 unsigned long long mem_allocated = 0;
537
538 mac_control = &nic->mac_control;
539 config = &nic->config;
540
541
542 /* Allocation and initialization of TXDLs in FIOFs */
543 size = 0;
544 for (i = 0; i < config->tx_fifo_num; i++) {
545 size += config->tx_cfg[i].fifo_len;
546 }
547 if (size > MAX_AVAILABLE_TXDS) {
548 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
549 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
550 return -EINVAL;
551 }
552
553 lst_size = (sizeof(struct TxD) * config->max_txds);
554 lst_per_page = PAGE_SIZE / lst_size;
555
556 for (i = 0; i < config->tx_fifo_num; i++) {
557 int fifo_len = config->tx_cfg[i].fifo_len;
558 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
559 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
560 GFP_KERNEL);
561 if (!mac_control->fifos[i].list_info) {
562 DBG_PRINT(INFO_DBG,
563 "Malloc failed for list_info\n");
564 return -ENOMEM;
565 }
566 mem_allocated += list_holder_size;
567 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
568 }
569 for (i = 0; i < config->tx_fifo_num; i++) {
570 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
571 lst_per_page);
572 mac_control->fifos[i].tx_curr_put_info.offset = 0;
573 mac_control->fifos[i].tx_curr_put_info.fifo_len =
574 config->tx_cfg[i].fifo_len - 1;
575 mac_control->fifos[i].tx_curr_get_info.offset = 0;
576 mac_control->fifos[i].tx_curr_get_info.fifo_len =
577 config->tx_cfg[i].fifo_len - 1;
578 mac_control->fifos[i].fifo_no = i;
579 mac_control->fifos[i].nic = nic;
580 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
581
582 for (j = 0; j < page_num; j++) {
583 int k = 0;
584 dma_addr_t tmp_p;
585 void *tmp_v;
586 tmp_v = pci_alloc_consistent(nic->pdev,
587 PAGE_SIZE, &tmp_p);
588 if (!tmp_v) {
589 DBG_PRINT(INFO_DBG,
590 "pci_alloc_consistent ");
591 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
592 return -ENOMEM;
593 }
594 /* If we got a zero DMA address(can happen on
595 * certain platforms like PPC), reallocate.
596 * Store virtual address of page we don't want,
597 * to be freed later.
598 */
599 if (!tmp_p) {
600 mac_control->zerodma_virt_addr = tmp_v;
601 DBG_PRINT(INIT_DBG,
602 "%s: Zero DMA address for TxDL. ", dev->name);
603 DBG_PRINT(INIT_DBG,
604 "Virtual address %p\n", tmp_v);
605 tmp_v = pci_alloc_consistent(nic->pdev,
606 PAGE_SIZE, &tmp_p);
607 if (!tmp_v) {
608 DBG_PRINT(INFO_DBG,
609 "pci_alloc_consistent ");
610 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
611 return -ENOMEM;
612 }
613 mem_allocated += PAGE_SIZE;
614 }
615 while (k < lst_per_page) {
616 int l = (j * lst_per_page) + k;
617 if (l == config->tx_cfg[i].fifo_len)
618 break;
619 mac_control->fifos[i].list_info[l].list_virt_addr =
620 tmp_v + (k * lst_size);
621 mac_control->fifos[i].list_info[l].list_phy_addr =
622 tmp_p + (k * lst_size);
623 k++;
624 }
625 }
626 }
627
628 nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
629 if (!nic->ufo_in_band_v)
630 return -ENOMEM;
631 mem_allocated += (size * sizeof(u64));
632
633 /* Allocation and initialization of RXDs in Rings */
634 size = 0;
635 for (i = 0; i < config->rx_ring_num; i++) {
636 if (config->rx_cfg[i].num_rxd %
637 (rxd_count[nic->rxd_mode] + 1)) {
638 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
639 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
640 i);
641 DBG_PRINT(ERR_DBG, "RxDs per Block");
642 return FAILURE;
643 }
644 size += config->rx_cfg[i].num_rxd;
645 mac_control->rings[i].block_count =
646 config->rx_cfg[i].num_rxd /
647 (rxd_count[nic->rxd_mode] + 1 );
648 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
649 mac_control->rings[i].block_count;
650 }
651 if (nic->rxd_mode == RXD_MODE_1)
652 size = (size * (sizeof(struct RxD1)));
653 else
654 size = (size * (sizeof(struct RxD3)));
655
656 for (i = 0; i < config->rx_ring_num; i++) {
657 mac_control->rings[i].rx_curr_get_info.block_index = 0;
658 mac_control->rings[i].rx_curr_get_info.offset = 0;
659 mac_control->rings[i].rx_curr_get_info.ring_len =
660 config->rx_cfg[i].num_rxd - 1;
661 mac_control->rings[i].rx_curr_put_info.block_index = 0;
662 mac_control->rings[i].rx_curr_put_info.offset = 0;
663 mac_control->rings[i].rx_curr_put_info.ring_len =
664 config->rx_cfg[i].num_rxd - 1;
665 mac_control->rings[i].nic = nic;
666 mac_control->rings[i].ring_no = i;
667
668 blk_cnt = config->rx_cfg[i].num_rxd /
669 (rxd_count[nic->rxd_mode] + 1);
670 /* Allocating all the Rx blocks */
671 for (j = 0; j < blk_cnt; j++) {
672 struct rx_block_info *rx_blocks;
673 int l;
674
675 rx_blocks = &mac_control->rings[i].rx_blocks[j];
676 size = SIZE_OF_BLOCK; //size is always page size
677 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
678 &tmp_p_addr);
679 if (tmp_v_addr == NULL) {
680 /*
681 * In case of failure, free_shared_mem()
682 * is called, which should free any
683 * memory that was alloced till the
684 * failure happened.
685 */
686 rx_blocks->block_virt_addr = tmp_v_addr;
687 return -ENOMEM;
688 }
689 mem_allocated += size;
690 memset(tmp_v_addr, 0, size);
691 rx_blocks->block_virt_addr = tmp_v_addr;
692 rx_blocks->block_dma_addr = tmp_p_addr;
693 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
694 rxd_count[nic->rxd_mode],
695 GFP_KERNEL);
696 if (!rx_blocks->rxds)
697 return -ENOMEM;
698 mem_allocated +=
699 (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
700 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
701 rx_blocks->rxds[l].virt_addr =
702 rx_blocks->block_virt_addr +
703 (rxd_size[nic->rxd_mode] * l);
704 rx_blocks->rxds[l].dma_addr =
705 rx_blocks->block_dma_addr +
706 (rxd_size[nic->rxd_mode] * l);
707 }
708 }
709 /* Interlinking all Rx Blocks */
710 for (j = 0; j < blk_cnt; j++) {
711 tmp_v_addr =
712 mac_control->rings[i].rx_blocks[j].block_virt_addr;
713 tmp_v_addr_next =
714 mac_control->rings[i].rx_blocks[(j + 1) %
715 blk_cnt].block_virt_addr;
716 tmp_p_addr =
717 mac_control->rings[i].rx_blocks[j].block_dma_addr;
718 tmp_p_addr_next =
719 mac_control->rings[i].rx_blocks[(j + 1) %
720 blk_cnt].block_dma_addr;
721
722 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
723 pre_rxd_blk->reserved_2_pNext_RxD_block =
724 (unsigned long) tmp_v_addr_next;
725 pre_rxd_blk->pNext_RxD_Blk_physical =
726 (u64) tmp_p_addr_next;
727 }
728 }
729 if (nic->rxd_mode == RXD_MODE_3B) {
730 /*
731 * Allocation of Storages for buffer addresses in 2BUFF mode
732 * and the buffers as well.
733 */
734 for (i = 0; i < config->rx_ring_num; i++) {
735 blk_cnt = config->rx_cfg[i].num_rxd /
736 (rxd_count[nic->rxd_mode]+ 1);
737 mac_control->rings[i].ba =
738 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
739 GFP_KERNEL);
740 if (!mac_control->rings[i].ba)
741 return -ENOMEM;
742 mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
743 for (j = 0; j < blk_cnt; j++) {
744 int k = 0;
745 mac_control->rings[i].ba[j] =
746 kmalloc((sizeof(struct buffAdd) *
747 (rxd_count[nic->rxd_mode] + 1)),
748 GFP_KERNEL);
749 if (!mac_control->rings[i].ba[j])
750 return -ENOMEM;
751 mem_allocated += (sizeof(struct buffAdd) * \
752 (rxd_count[nic->rxd_mode] + 1));
753 while (k != rxd_count[nic->rxd_mode]) {
754 ba = &mac_control->rings[i].ba[j][k];
755
756 ba->ba_0_org = (void *) kmalloc
757 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
758 if (!ba->ba_0_org)
759 return -ENOMEM;
760 mem_allocated +=
761 (BUF0_LEN + ALIGN_SIZE);
762 tmp = (unsigned long)ba->ba_0_org;
763 tmp += ALIGN_SIZE;
764 tmp &= ~((unsigned long) ALIGN_SIZE);
765 ba->ba_0 = (void *) tmp;
766
767 ba->ba_1_org = (void *) kmalloc
768 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
769 if (!ba->ba_1_org)
770 return -ENOMEM;
771 mem_allocated
772 += (BUF1_LEN + ALIGN_SIZE);
773 tmp = (unsigned long) ba->ba_1_org;
774 tmp += ALIGN_SIZE;
775 tmp &= ~((unsigned long) ALIGN_SIZE);
776 ba->ba_1 = (void *) tmp;
777 k++;
778 }
779 }
780 }
781 }
782
783 /* Allocation and initialization of Statistics block */
784 size = sizeof(struct stat_block);
785 mac_control->stats_mem = pci_alloc_consistent
786 (nic->pdev, size, &mac_control->stats_mem_phy);
787
788 if (!mac_control->stats_mem) {
789 /*
790 * In case of failure, free_shared_mem() is called, which
791 * should free any memory that was alloced till the
792 * failure happened.
793 */
794 return -ENOMEM;
795 }
796 mem_allocated += size;
797 mac_control->stats_mem_sz = size;
798
799 tmp_v_addr = mac_control->stats_mem;
800 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
801 memset(tmp_v_addr, 0, size);
802 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
803 (unsigned long long) tmp_p_addr);
804 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
805 return SUCCESS;
806}
807
808/**
809 * free_shared_mem - Free the allocated Memory
810 * @nic: Device private variable.
811 * Description: This function is to free all memory locations allocated by
812 * the init_shared_mem() function and return it to the kernel.
813 */
814
815static void free_shared_mem(struct s2io_nic *nic)
816{
817 int i, j, blk_cnt, size;
818 u32 ufo_size = 0;
819 void *tmp_v_addr;
820 dma_addr_t tmp_p_addr;
821 struct mac_info *mac_control;
822 struct config_param *config;
823 int lst_size, lst_per_page;
824 struct net_device *dev;
825 int page_num = 0;
826
827 if (!nic)
828 return;
829
830 dev = nic->dev;
831
832 mac_control = &nic->mac_control;
833 config = &nic->config;
834
835 lst_size = (sizeof(struct TxD) * config->max_txds);
836 lst_per_page = PAGE_SIZE / lst_size;
837
838 for (i = 0; i < config->tx_fifo_num; i++) {
839 ufo_size += config->tx_cfg[i].fifo_len;
840 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
841 lst_per_page);
842 for (j = 0; j < page_num; j++) {
843 int mem_blks = (j * lst_per_page);
844 if (!mac_control->fifos[i].list_info)
845 return;
846 if (!mac_control->fifos[i].list_info[mem_blks].
847 list_virt_addr)
848 break;
849 pci_free_consistent(nic->pdev, PAGE_SIZE,
850 mac_control->fifos[i].
851 list_info[mem_blks].
852 list_virt_addr,
853 mac_control->fifos[i].
854 list_info[mem_blks].
855 list_phy_addr);
856 nic->mac_control.stats_info->sw_stat.mem_freed
857 += PAGE_SIZE;
858 }
859 /* If we got a zero DMA address during allocation,
860 * free the page now
861 */
862 if (mac_control->zerodma_virt_addr) {
863 pci_free_consistent(nic->pdev, PAGE_SIZE,
864 mac_control->zerodma_virt_addr,
865 (dma_addr_t)0);
866 DBG_PRINT(INIT_DBG,
867 "%s: Freeing TxDL with zero DMA addr. ",
868 dev->name);
869 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
870 mac_control->zerodma_virt_addr);
871 nic->mac_control.stats_info->sw_stat.mem_freed
872 += PAGE_SIZE;
873 }
874 kfree(mac_control->fifos[i].list_info);
875 nic->mac_control.stats_info->sw_stat.mem_freed +=
876 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
877 }
878
879 size = SIZE_OF_BLOCK;
880 for (i = 0; i < config->rx_ring_num; i++) {
881 blk_cnt = mac_control->rings[i].block_count;
882 for (j = 0; j < blk_cnt; j++) {
883 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
884 block_virt_addr;
885 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
886 block_dma_addr;
887 if (tmp_v_addr == NULL)
888 break;
889 pci_free_consistent(nic->pdev, size,
890 tmp_v_addr, tmp_p_addr);
891 nic->mac_control.stats_info->sw_stat.mem_freed += size;
892 kfree(mac_control->rings[i].rx_blocks[j].rxds);
893 nic->mac_control.stats_info->sw_stat.mem_freed +=
894 ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
895 }
896 }
897
898 if (nic->rxd_mode == RXD_MODE_3B) {
899 /* Freeing buffer storage addresses in 2BUFF mode. */
900 for (i = 0; i < config->rx_ring_num; i++) {
901 blk_cnt = config->rx_cfg[i].num_rxd /
902 (rxd_count[nic->rxd_mode] + 1);
903 for (j = 0; j < blk_cnt; j++) {
904 int k = 0;
905 if (!mac_control->rings[i].ba[j])
906 continue;
907 while (k != rxd_count[nic->rxd_mode]) {
908 struct buffAdd *ba =
909 &mac_control->rings[i].ba[j][k];
910 kfree(ba->ba_0_org);
911 nic->mac_control.stats_info->sw_stat.\
912 mem_freed += (BUF0_LEN + ALIGN_SIZE);
913 kfree(ba->ba_1_org);
914 nic->mac_control.stats_info->sw_stat.\
915 mem_freed += (BUF1_LEN + ALIGN_SIZE);
916 k++;
917 }
918 kfree(mac_control->rings[i].ba[j]);
919 nic->mac_control.stats_info->sw_stat.mem_freed +=
920 (sizeof(struct buffAdd) *
921 (rxd_count[nic->rxd_mode] + 1));
922 }
923 kfree(mac_control->rings[i].ba);
924 nic->mac_control.stats_info->sw_stat.mem_freed +=
925 (sizeof(struct buffAdd *) * blk_cnt);
926 }
927 }
928
929 if (mac_control->stats_mem) {
930 pci_free_consistent(nic->pdev,
931 mac_control->stats_mem_sz,
932 mac_control->stats_mem,
933 mac_control->stats_mem_phy);
934 nic->mac_control.stats_info->sw_stat.mem_freed +=
935 mac_control->stats_mem_sz;
936 }
937 if (nic->ufo_in_band_v) {
938 kfree(nic->ufo_in_band_v);
939 nic->mac_control.stats_info->sw_stat.mem_freed
940 += (ufo_size * sizeof(u64));
941 }
942}
943
944/**
945 * s2io_verify_pci_mode -
946 */
947
948static int s2io_verify_pci_mode(struct s2io_nic *nic)
949{
950 struct XENA_dev_config __iomem *bar0 = nic->bar0;
951 register u64 val64 = 0;
952 int mode;
953
954 val64 = readq(&bar0->pci_mode);
955 mode = (u8)GET_PCI_MODE(val64);
956
957 if ( val64 & PCI_MODE_UNKNOWN_MODE)
958 return -1; /* Unknown PCI mode */
959 return mode;
960}
961
962#define NEC_VENID 0x1033
963#define NEC_DEVID 0x0125
964static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
965{
966 struct pci_dev *tdev = NULL;
967 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
968 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
969 if (tdev->bus == s2io_pdev->bus->parent)
970 pci_dev_put(tdev);
971 return 1;
972 }
973 }
974 return 0;
975}
976
977static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
978/**
979 * s2io_print_pci_mode -
980 */
981static int s2io_print_pci_mode(struct s2io_nic *nic)
982{
983 struct XENA_dev_config __iomem *bar0 = nic->bar0;
984 register u64 val64 = 0;
985 int mode;
986 struct config_param *config = &nic->config;
987
988 val64 = readq(&bar0->pci_mode);
989 mode = (u8)GET_PCI_MODE(val64);
990
991 if ( val64 & PCI_MODE_UNKNOWN_MODE)
992 return -1; /* Unknown PCI mode */
993
994 config->bus_speed = bus_speed[mode];
995
996 if (s2io_on_nec_bridge(nic->pdev)) {
997 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
998 nic->dev->name);
999 return mode;
1000 }
1001
1002 if (val64 & PCI_MODE_32_BITS) {
1003 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1004 } else {
1005 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1006 }
1007
1008 switch(mode) {
1009 case PCI_MODE_PCI_33:
1010 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1011 break;
1012 case PCI_MODE_PCI_66:
1013 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1014 break;
1015 case PCI_MODE_PCIX_M1_66:
1016 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1017 break;
1018 case PCI_MODE_PCIX_M1_100:
1019 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1020 break;
1021 case PCI_MODE_PCIX_M1_133:
1022 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1023 break;
1024 case PCI_MODE_PCIX_M2_66:
1025 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1026 break;
1027 case PCI_MODE_PCIX_M2_100:
1028 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1029 break;
1030 case PCI_MODE_PCIX_M2_133:
1031 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1032 break;
1033 default:
1034 return -1; /* Unsupported bus speed */
1035 }
1036
1037 return mode;
1038}
1039
1040/**
1041 * init_nic - Initialization of hardware
1042 * @nic: device peivate variable
1043 * Description: The function sequentially configures every block
1044 * of the H/W from their reset values.
1045 * Return Value: SUCCESS on success and
1046 * '-1' on failure (endian settings incorrect).
1047 */
1048
1049static int init_nic(struct s2io_nic *nic)
1050{
1051 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1052 struct net_device *dev = nic->dev;
1053 register u64 val64 = 0;
1054 void __iomem *add;
1055 u32 time;
1056 int i, j;
1057 struct mac_info *mac_control;
1058 struct config_param *config;
1059 int dtx_cnt = 0;
1060 unsigned long long mem_share;
1061 int mem_size;
1062
1063 mac_control = &nic->mac_control;
1064 config = &nic->config;
1065
1066 /* to set the swapper controle on the card */
1067 if(s2io_set_swapper(nic)) {
1068 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1069 return -1;
1070 }
1071
1072 /*
1073 * Herc requires EOI to be removed from reset before XGXS, so..
1074 */
1075 if (nic->device_type & XFRAME_II_DEVICE) {
1076 val64 = 0xA500000000ULL;
1077 writeq(val64, &bar0->sw_reset);
1078 msleep(500);
1079 val64 = readq(&bar0->sw_reset);
1080 }
1081
1082 /* Remove XGXS from reset state */
1083 val64 = 0;
1084 writeq(val64, &bar0->sw_reset);
1085 msleep(500);
1086 val64 = readq(&bar0->sw_reset);
1087
1088 /* Enable Receiving broadcasts */
1089 add = &bar0->mac_cfg;
1090 val64 = readq(&bar0->mac_cfg);
1091 val64 |= MAC_RMAC_BCAST_ENABLE;
1092 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1093 writel((u32) val64, add);
1094 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1095 writel((u32) (val64 >> 32), (add + 4));
1096
1097 /* Read registers in all blocks */
1098 val64 = readq(&bar0->mac_int_mask);
1099 val64 = readq(&bar0->mc_int_mask);
1100 val64 = readq(&bar0->xgxs_int_mask);
1101
1102 /* Set MTU */
1103 val64 = dev->mtu;
1104 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1105
1106 if (nic->device_type & XFRAME_II_DEVICE) {
1107 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1108 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1109 &bar0->dtx_control, UF);
1110 if (dtx_cnt & 0x1)
1111 msleep(1); /* Necessary!! */
1112 dtx_cnt++;
1113 }
1114 } else {
1115 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1116 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1117 &bar0->dtx_control, UF);
1118 val64 = readq(&bar0->dtx_control);
1119 dtx_cnt++;
1120 }
1121 }
1122
1123 /* Tx DMA Initialization */
1124 val64 = 0;
1125 writeq(val64, &bar0->tx_fifo_partition_0);
1126 writeq(val64, &bar0->tx_fifo_partition_1);
1127 writeq(val64, &bar0->tx_fifo_partition_2);
1128 writeq(val64, &bar0->tx_fifo_partition_3);
1129
1130
1131 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1132 val64 |=
1133 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1134 13) | vBIT(config->tx_cfg[i].fifo_priority,
1135 ((i * 32) + 5), 3);
1136
1137 if (i == (config->tx_fifo_num - 1)) {
1138 if (i % 2 == 0)
1139 i++;
1140 }
1141
1142 switch (i) {
1143 case 1:
1144 writeq(val64, &bar0->tx_fifo_partition_0);
1145 val64 = 0;
1146 break;
1147 case 3:
1148 writeq(val64, &bar0->tx_fifo_partition_1);
1149 val64 = 0;
1150 break;
1151 case 5:
1152 writeq(val64, &bar0->tx_fifo_partition_2);
1153 val64 = 0;
1154 break;
1155 case 7:
1156 writeq(val64, &bar0->tx_fifo_partition_3);
1157 break;
1158 }
1159 }
1160
1161 /*
1162 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1163 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1164 */
1165 if ((nic->device_type == XFRAME_I_DEVICE) &&
1166 (nic->pdev->revision < 4))
1167 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1168
1169 val64 = readq(&bar0->tx_fifo_partition_0);
1170 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1171 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1172
1173 /*
1174 * Initialization of Tx_PA_CONFIG register to ignore packet
1175 * integrity checking.
1176 */
1177 val64 = readq(&bar0->tx_pa_cfg);
1178 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1179 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1180 writeq(val64, &bar0->tx_pa_cfg);
1181
1182 /* Rx DMA intialization. */
1183 val64 = 0;
1184 for (i = 0; i < config->rx_ring_num; i++) {
1185 val64 |=
1186 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1187 3);
1188 }
1189 writeq(val64, &bar0->rx_queue_priority);
1190
1191 /*
1192 * Allocating equal share of memory to all the
1193 * configured Rings.
1194 */
1195 val64 = 0;
1196 if (nic->device_type & XFRAME_II_DEVICE)
1197 mem_size = 32;
1198 else
1199 mem_size = 64;
1200
1201 for (i = 0; i < config->rx_ring_num; i++) {
1202 switch (i) {
1203 case 0:
1204 mem_share = (mem_size / config->rx_ring_num +
1205 mem_size % config->rx_ring_num);
1206 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1207 continue;
1208 case 1:
1209 mem_share = (mem_size / config->rx_ring_num);
1210 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1211 continue;
1212 case 2:
1213 mem_share = (mem_size / config->rx_ring_num);
1214 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1215 continue;
1216 case 3:
1217 mem_share = (mem_size / config->rx_ring_num);
1218 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1219 continue;
1220 case 4:
1221 mem_share = (mem_size / config->rx_ring_num);
1222 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1223 continue;
1224 case 5:
1225 mem_share = (mem_size / config->rx_ring_num);
1226 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1227 continue;
1228 case 6:
1229 mem_share = (mem_size / config->rx_ring_num);
1230 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1231 continue;
1232 case 7:
1233 mem_share = (mem_size / config->rx_ring_num);
1234 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1235 continue;
1236 }
1237 }
1238 writeq(val64, &bar0->rx_queue_cfg);
1239
1240 /*
1241 * Filling Tx round robin registers
1242 * as per the number of FIFOs
1243 */
1244 switch (config->tx_fifo_num) {
1245 case 1:
1246 val64 = 0x0000000000000000ULL;
1247 writeq(val64, &bar0->tx_w_round_robin_0);
1248 writeq(val64, &bar0->tx_w_round_robin_1);
1249 writeq(val64, &bar0->tx_w_round_robin_2);
1250 writeq(val64, &bar0->tx_w_round_robin_3);
1251 writeq(val64, &bar0->tx_w_round_robin_4);
1252 break;
1253 case 2:
1254 val64 = 0x0000010000010000ULL;
1255 writeq(val64, &bar0->tx_w_round_robin_0);
1256 val64 = 0x0100000100000100ULL;
1257 writeq(val64, &bar0->tx_w_round_robin_1);
1258 val64 = 0x0001000001000001ULL;
1259 writeq(val64, &bar0->tx_w_round_robin_2);
1260 val64 = 0x0000010000010000ULL;
1261 writeq(val64, &bar0->tx_w_round_robin_3);
1262 val64 = 0x0100000000000000ULL;
1263 writeq(val64, &bar0->tx_w_round_robin_4);
1264 break;
1265 case 3:
1266 val64 = 0x0001000102000001ULL;
1267 writeq(val64, &bar0->tx_w_round_robin_0);
1268 val64 = 0x0001020000010001ULL;
1269 writeq(val64, &bar0->tx_w_round_robin_1);
1270 val64 = 0x0200000100010200ULL;
1271 writeq(val64, &bar0->tx_w_round_robin_2);
1272 val64 = 0x0001000102000001ULL;
1273 writeq(val64, &bar0->tx_w_round_robin_3);
1274 val64 = 0x0001020000000000ULL;
1275 writeq(val64, &bar0->tx_w_round_robin_4);
1276 break;
1277 case 4:
1278 val64 = 0x0001020300010200ULL;
1279 writeq(val64, &bar0->tx_w_round_robin_0);
1280 val64 = 0x0100000102030001ULL;
1281 writeq(val64, &bar0->tx_w_round_robin_1);
1282 val64 = 0x0200010000010203ULL;
1283 writeq(val64, &bar0->tx_w_round_robin_2);
1284 val64 = 0x0001020001000001ULL;
1285 writeq(val64, &bar0->tx_w_round_robin_3);
1286 val64 = 0x0203000100000000ULL;
1287 writeq(val64, &bar0->tx_w_round_robin_4);
1288 break;
1289 case 5:
1290 val64 = 0x0001000203000102ULL;
1291 writeq(val64, &bar0->tx_w_round_robin_0);
1292 val64 = 0x0001020001030004ULL;
1293 writeq(val64, &bar0->tx_w_round_robin_1);
1294 val64 = 0x0001000203000102ULL;
1295 writeq(val64, &bar0->tx_w_round_robin_2);
1296 val64 = 0x0001020001030004ULL;
1297 writeq(val64, &bar0->tx_w_round_robin_3);
1298 val64 = 0x0001000000000000ULL;
1299 writeq(val64, &bar0->tx_w_round_robin_4);
1300 break;
1301 case 6:
1302 val64 = 0x0001020304000102ULL;
1303 writeq(val64, &bar0->tx_w_round_robin_0);
1304 val64 = 0x0304050001020001ULL;
1305 writeq(val64, &bar0->tx_w_round_robin_1);
1306 val64 = 0x0203000100000102ULL;
1307 writeq(val64, &bar0->tx_w_round_robin_2);
1308 val64 = 0x0304000102030405ULL;
1309 writeq(val64, &bar0->tx_w_round_robin_3);
1310 val64 = 0x0001000200000000ULL;
1311 writeq(val64, &bar0->tx_w_round_robin_4);
1312 break;
1313 case 7:
1314 val64 = 0x0001020001020300ULL;
1315 writeq(val64, &bar0->tx_w_round_robin_0);
1316 val64 = 0x0102030400010203ULL;
1317 writeq(val64, &bar0->tx_w_round_robin_1);
1318 val64 = 0x0405060001020001ULL;
1319 writeq(val64, &bar0->tx_w_round_robin_2);
1320 val64 = 0x0304050000010200ULL;
1321 writeq(val64, &bar0->tx_w_round_robin_3);
1322 val64 = 0x0102030000000000ULL;
1323 writeq(val64, &bar0->tx_w_round_robin_4);
1324 break;
1325 case 8:
1326 val64 = 0x0001020300040105ULL;
1327 writeq(val64, &bar0->tx_w_round_robin_0);
1328 val64 = 0x0200030106000204ULL;
1329 writeq(val64, &bar0->tx_w_round_robin_1);
1330 val64 = 0x0103000502010007ULL;
1331 writeq(val64, &bar0->tx_w_round_robin_2);
1332 val64 = 0x0304010002060500ULL;
1333 writeq(val64, &bar0->tx_w_round_robin_3);
1334 val64 = 0x0103020400000000ULL;
1335 writeq(val64, &bar0->tx_w_round_robin_4);
1336 break;
1337 }
1338
1339 /* Enable all configured Tx FIFO partitions */
1340 val64 = readq(&bar0->tx_fifo_partition_0);
1341 val64 |= (TX_FIFO_PARTITION_EN);
1342 writeq(val64, &bar0->tx_fifo_partition_0);
1343
1344 /* Filling the Rx round robin registers as per the
1345 * number of Rings and steering based on QoS.
1346 */
1347 switch (config->rx_ring_num) {
1348 case 1:
1349 val64 = 0x8080808080808080ULL;
1350 writeq(val64, &bar0->rts_qos_steering);
1351 break;
1352 case 2:
1353 val64 = 0x0000010000010000ULL;
1354 writeq(val64, &bar0->rx_w_round_robin_0);
1355 val64 = 0x0100000100000100ULL;
1356 writeq(val64, &bar0->rx_w_round_robin_1);
1357 val64 = 0x0001000001000001ULL;
1358 writeq(val64, &bar0->rx_w_round_robin_2);
1359 val64 = 0x0000010000010000ULL;
1360 writeq(val64, &bar0->rx_w_round_robin_3);
1361 val64 = 0x0100000000000000ULL;
1362 writeq(val64, &bar0->rx_w_round_robin_4);
1363
1364 val64 = 0x8080808040404040ULL;
1365 writeq(val64, &bar0->rts_qos_steering);
1366 break;
1367 case 3:
1368 val64 = 0x0001000102000001ULL;
1369 writeq(val64, &bar0->rx_w_round_robin_0);
1370 val64 = 0x0001020000010001ULL;
1371 writeq(val64, &bar0->rx_w_round_robin_1);
1372 val64 = 0x0200000100010200ULL;
1373 writeq(val64, &bar0->rx_w_round_robin_2);
1374 val64 = 0x0001000102000001ULL;
1375 writeq(val64, &bar0->rx_w_round_robin_3);
1376 val64 = 0x0001020000000000ULL;
1377 writeq(val64, &bar0->rx_w_round_robin_4);
1378
1379 val64 = 0x8080804040402020ULL;
1380 writeq(val64, &bar0->rts_qos_steering);
1381 break;
1382 case 4:
1383 val64 = 0x0001020300010200ULL;
1384 writeq(val64, &bar0->rx_w_round_robin_0);
1385 val64 = 0x0100000102030001ULL;
1386 writeq(val64, &bar0->rx_w_round_robin_1);
1387 val64 = 0x0200010000010203ULL;
1388 writeq(val64, &bar0->rx_w_round_robin_2);
1389 val64 = 0x0001020001000001ULL;
1390 writeq(val64, &bar0->rx_w_round_robin_3);
1391 val64 = 0x0203000100000000ULL;
1392 writeq(val64, &bar0->rx_w_round_robin_4);
1393
1394 val64 = 0x8080404020201010ULL;
1395 writeq(val64, &bar0->rts_qos_steering);
1396 break;
1397 case 5:
1398 val64 = 0x0001000203000102ULL;
1399 writeq(val64, &bar0->rx_w_round_robin_0);
1400 val64 = 0x0001020001030004ULL;
1401 writeq(val64, &bar0->rx_w_round_robin_1);
1402 val64 = 0x0001000203000102ULL;
1403 writeq(val64, &bar0->rx_w_round_robin_2);
1404 val64 = 0x0001020001030004ULL;
1405 writeq(val64, &bar0->rx_w_round_robin_3);
1406 val64 = 0x0001000000000000ULL;
1407 writeq(val64, &bar0->rx_w_round_robin_4);
1408
1409 val64 = 0x8080404020201008ULL;
1410 writeq(val64, &bar0->rts_qos_steering);
1411 break;
1412 case 6:
1413 val64 = 0x0001020304000102ULL;
1414 writeq(val64, &bar0->rx_w_round_robin_0);
1415 val64 = 0x0304050001020001ULL;
1416 writeq(val64, &bar0->rx_w_round_robin_1);
1417 val64 = 0x0203000100000102ULL;
1418 writeq(val64, &bar0->rx_w_round_robin_2);
1419 val64 = 0x0304000102030405ULL;
1420 writeq(val64, &bar0->rx_w_round_robin_3);
1421 val64 = 0x0001000200000000ULL;
1422 writeq(val64, &bar0->rx_w_round_robin_4);
1423
1424 val64 = 0x8080404020100804ULL;
1425 writeq(val64, &bar0->rts_qos_steering);
1426 break;
1427 case 7:
1428 val64 = 0x0001020001020300ULL;
1429 writeq(val64, &bar0->rx_w_round_robin_0);
1430 val64 = 0x0102030400010203ULL;
1431 writeq(val64, &bar0->rx_w_round_robin_1);
1432 val64 = 0x0405060001020001ULL;
1433 writeq(val64, &bar0->rx_w_round_robin_2);
1434 val64 = 0x0304050000010200ULL;
1435 writeq(val64, &bar0->rx_w_round_robin_3);
1436 val64 = 0x0102030000000000ULL;
1437 writeq(val64, &bar0->rx_w_round_robin_4);
1438
1439 val64 = 0x8080402010080402ULL;
1440 writeq(val64, &bar0->rts_qos_steering);
1441 break;
1442 case 8:
1443 val64 = 0x0001020300040105ULL;
1444 writeq(val64, &bar0->rx_w_round_robin_0);
1445 val64 = 0x0200030106000204ULL;
1446 writeq(val64, &bar0->rx_w_round_robin_1);
1447 val64 = 0x0103000502010007ULL;
1448 writeq(val64, &bar0->rx_w_round_robin_2);
1449 val64 = 0x0304010002060500ULL;
1450 writeq(val64, &bar0->rx_w_round_robin_3);
1451 val64 = 0x0103020400000000ULL;
1452 writeq(val64, &bar0->rx_w_round_robin_4);
1453
1454 val64 = 0x8040201008040201ULL;
1455 writeq(val64, &bar0->rts_qos_steering);
1456 break;
1457 }
1458
1459 /* UDP Fix */
1460 val64 = 0;
1461 for (i = 0; i < 8; i++)
1462 writeq(val64, &bar0->rts_frm_len_n[i]);
1463
1464 /* Set the default rts frame length for the rings configured */
1465 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1466 for (i = 0 ; i < config->rx_ring_num ; i++)
1467 writeq(val64, &bar0->rts_frm_len_n[i]);
1468
1469 /* Set the frame length for the configured rings
1470 * desired by the user
1471 */
1472 for (i = 0; i < config->rx_ring_num; i++) {
1473 /* If rts_frm_len[i] == 0 then it is assumed that user not
1474 * specified frame length steering.
1475 * If the user provides the frame length then program
1476 * the rts_frm_len register for those values or else
1477 * leave it as it is.
1478 */
1479 if (rts_frm_len[i] != 0) {
1480 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1481 &bar0->rts_frm_len_n[i]);
1482 }
1483 }
1484
1485 /* Disable differentiated services steering logic */
1486 for (i = 0; i < 64; i++) {
1487 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1488 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1489 dev->name);
1490 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1491 return FAILURE;
1492 }
1493 }
1494
1495 /* Program statistics memory */
1496 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1497
1498 if (nic->device_type == XFRAME_II_DEVICE) {
1499 val64 = STAT_BC(0x320);
1500 writeq(val64, &bar0->stat_byte_cnt);
1501 }
1502
1503 /*
1504 * Initializing the sampling rate for the device to calculate the
1505 * bandwidth utilization.
1506 */
1507 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1508 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1509 writeq(val64, &bar0->mac_link_util);
1510
1511
1512 /*
1513 * Initializing the Transmit and Receive Traffic Interrupt
1514 * Scheme.
1515 */
1516 /*
1517 * TTI Initialization. Default Tx timer gets us about
1518 * 250 interrupts per sec. Continuous interrupts are enabled
1519 * by default.
1520 */
1521 if (nic->device_type == XFRAME_II_DEVICE) {
1522 int count = (nic->config.bus_speed * 125)/2;
1523 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1524 } else {
1525
1526 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1527 }
1528 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1529 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1530 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1531 if (use_continuous_tx_intrs)
1532 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1533 writeq(val64, &bar0->tti_data1_mem);
1534
1535 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1536 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1537 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1538 writeq(val64, &bar0->tti_data2_mem);
1539
1540 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1541 writeq(val64, &bar0->tti_command_mem);
1542
1543 /*
1544 * Once the operation completes, the Strobe bit of the command
1545 * register will be reset. We poll for this particular condition
1546 * We wait for a maximum of 500ms for the operation to complete,
1547 * if it's not complete by then we return error.
1548 */
1549 time = 0;
1550 while (TRUE) {
1551 val64 = readq(&bar0->tti_command_mem);
1552 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1553 break;
1554 }
1555 if (time > 10) {
1556 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1557 dev->name);
1558 return -1;
1559 }
1560 msleep(50);
1561 time++;
1562 }
1563
1564 if (nic->config.bimodal) {
1565 int k = 0;
1566 for (k = 0; k < config->rx_ring_num; k++) {
1567 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1568 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1569 writeq(val64, &bar0->tti_command_mem);
1570
1571 /*
1572 * Once the operation completes, the Strobe bit of the command
1573 * register will be reset. We poll for this particular condition
1574 * We wait for a maximum of 500ms for the operation to complete,
1575 * if it's not complete by then we return error.
1576 */
1577 time = 0;
1578 while (TRUE) {
1579 val64 = readq(&bar0->tti_command_mem);
1580 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1581 break;
1582 }
1583 if (time > 10) {
1584 DBG_PRINT(ERR_DBG,
1585 "%s: TTI init Failed\n",
1586 dev->name);
1587 return -1;
1588 }
1589 time++;
1590 msleep(50);
1591 }
1592 }
1593 } else {
1594
1595 /* RTI Initialization */
1596 if (nic->device_type == XFRAME_II_DEVICE) {
1597 /*
1598 * Programmed to generate Apprx 500 Intrs per
1599 * second
1600 */
1601 int count = (nic->config.bus_speed * 125)/4;
1602 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1603 } else {
1604 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1605 }
1606 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1607 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1608 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1609
1610 writeq(val64, &bar0->rti_data1_mem);
1611
1612 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1613 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1614 if (nic->intr_type == MSI_X)
1615 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1616 RTI_DATA2_MEM_RX_UFC_D(0x40));
1617 else
1618 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1619 RTI_DATA2_MEM_RX_UFC_D(0x80));
1620 writeq(val64, &bar0->rti_data2_mem);
1621
1622 for (i = 0; i < config->rx_ring_num; i++) {
1623 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1624 | RTI_CMD_MEM_OFFSET(i);
1625 writeq(val64, &bar0->rti_command_mem);
1626
1627 /*
1628 * Once the operation completes, the Strobe bit of the
1629 * command register will be reset. We poll for this
1630 * particular condition. We wait for a maximum of 500ms
1631 * for the operation to complete, if it's not complete
1632 * by then we return error.
1633 */
1634 time = 0;
1635 while (TRUE) {
1636 val64 = readq(&bar0->rti_command_mem);
1637 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1638 break;
1639 }
1640 if (time > 10) {
1641 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1642 dev->name);
1643 return -1;
1644 }
1645 time++;
1646 msleep(50);
1647 }
1648 }
1649 }
1650
1651 /*
1652 * Initializing proper values as Pause threshold into all
1653 * the 8 Queues on Rx side.
1654 */
1655 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1656 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1657
1658 /* Disable RMAC PAD STRIPPING */
1659 add = &bar0->mac_cfg;
1660 val64 = readq(&bar0->mac_cfg);
1661 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1662 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1663 writel((u32) (val64), add);
1664 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1665 writel((u32) (val64 >> 32), (add + 4));
1666 val64 = readq(&bar0->mac_cfg);
1667
1668 /* Enable FCS stripping by adapter */
1669 add = &bar0->mac_cfg;
1670 val64 = readq(&bar0->mac_cfg);
1671 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1672 if (nic->device_type == XFRAME_II_DEVICE)
1673 writeq(val64, &bar0->mac_cfg);
1674 else {
1675 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1676 writel((u32) (val64), add);
1677 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1678 writel((u32) (val64 >> 32), (add + 4));
1679 }
1680
1681 /*
1682 * Set the time value to be inserted in the pause frame
1683 * generated by xena.
1684 */
1685 val64 = readq(&bar0->rmac_pause_cfg);
1686 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1687 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1688 writeq(val64, &bar0->rmac_pause_cfg);
1689
1690 /*
1691 * Set the Threshold Limit for Generating the pause frame
1692 * If the amount of data in any Queue exceeds ratio of
1693 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1694 * pause frame is generated
1695 */
1696 val64 = 0;
1697 for (i = 0; i < 4; i++) {
1698 val64 |=
1699 (((u64) 0xFF00 | nic->mac_control.
1700 mc_pause_threshold_q0q3)
1701 << (i * 2 * 8));
1702 }
1703 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1704
1705 val64 = 0;
1706 for (i = 0; i < 4; i++) {
1707 val64 |=
1708 (((u64) 0xFF00 | nic->mac_control.
1709 mc_pause_threshold_q4q7)
1710 << (i * 2 * 8));
1711 }
1712 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1713
1714 /*
1715 * TxDMA will stop Read request if the number of read split has
1716 * exceeded the limit pointed by shared_splits
1717 */
1718 val64 = readq(&bar0->pic_control);
1719 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1720 writeq(val64, &bar0->pic_control);
1721
1722 if (nic->config.bus_speed == 266) {
1723 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1724 writeq(0x0, &bar0->read_retry_delay);
1725 writeq(0x0, &bar0->write_retry_delay);
1726 }
1727
1728 /*
1729 * Programming the Herc to split every write transaction
1730 * that does not start on an ADB to reduce disconnects.
1731 */
1732 if (nic->device_type == XFRAME_II_DEVICE) {
1733 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1734 MISC_LINK_STABILITY_PRD(3);
1735 writeq(val64, &bar0->misc_control);
1736 val64 = readq(&bar0->pic_control2);
1737 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1738 writeq(val64, &bar0->pic_control2);
1739 }
1740 if (strstr(nic->product_name, "CX4")) {
1741 val64 = TMAC_AVG_IPG(0x17);
1742 writeq(val64, &bar0->tmac_avg_ipg);
1743 }
1744
1745 return SUCCESS;
1746}
1747#define LINK_UP_DOWN_INTERRUPT 1
1748#define MAC_RMAC_ERR_TIMER 2
1749
1750static int s2io_link_fault_indication(struct s2io_nic *nic)
1751{
1752 if (nic->intr_type != INTA)
1753 return MAC_RMAC_ERR_TIMER;
1754 if (nic->device_type == XFRAME_II_DEVICE)
1755 return LINK_UP_DOWN_INTERRUPT;
1756 else
1757 return MAC_RMAC_ERR_TIMER;
1758}
1759
1760/**
1761 * do_s2io_write_bits - update alarm bits in alarm register
1762 * @value: alarm bits
1763 * @flag: interrupt status
1764 * @addr: address value
1765 * Description: update alarm bits in alarm register
1766 * Return Value:
1767 * NONE.
1768 */
1769static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1770{
1771 u64 temp64;
1772
1773 temp64 = readq(addr);
1774
1775 if(flag == ENABLE_INTRS)
1776 temp64 &= ~((u64) value);
1777 else
1778 temp64 |= ((u64) value);
1779 writeq(temp64, addr);
1780}
1781
1782void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1783{
1784 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1785 register u64 gen_int_mask = 0;
1786
1787 if (mask & TX_DMA_INTR) {
1788
1789 gen_int_mask |= TXDMA_INT_M;
1790
1791 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1792 TXDMA_PCC_INT | TXDMA_TTI_INT |
1793 TXDMA_LSO_INT | TXDMA_TPA_INT |
1794 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1795
1796 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1797 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1798 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1799 &bar0->pfc_err_mask);
1800
1801 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1802 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1803 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1804
1805 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1806 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1807 PCC_N_SERR | PCC_6_COF_OV_ERR |
1808 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1809 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1810 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1811
1812 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1813 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1814
1815 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1816 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1817 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1818 flag, &bar0->lso_err_mask);
1819
1820 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1821 flag, &bar0->tpa_err_mask);
1822
1823 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1824
1825 }
1826
1827 if (mask & TX_MAC_INTR) {
1828 gen_int_mask |= TXMAC_INT_M;
1829 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1830 &bar0->mac_int_mask);
1831 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1832 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1833 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1834 flag, &bar0->mac_tmac_err_mask);
1835 }
1836
1837 if (mask & TX_XGXS_INTR) {
1838 gen_int_mask |= TXXGXS_INT_M;
1839 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1840 &bar0->xgxs_int_mask);
1841 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1842 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1843 flag, &bar0->xgxs_txgxs_err_mask);
1844 }
1845
1846 if (mask & RX_DMA_INTR) {
1847 gen_int_mask |= RXDMA_INT_M;
1848 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1849 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1850 flag, &bar0->rxdma_int_mask);
1851 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1852 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1853 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1854 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1855 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1856 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1857 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1858 &bar0->prc_pcix_err_mask);
1859 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1860 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1861 &bar0->rpa_err_mask);
1862 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1863 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1864 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1865 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
1866 flag, &bar0->rda_err_mask);
1867 do_s2io_write_bits(RTI_SM_ERR_ALARM |
1868 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1869 flag, &bar0->rti_err_mask);
1870 }
1871
1872 if (mask & RX_MAC_INTR) {
1873 gen_int_mask |= RXMAC_INT_M;
1874 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1875 &bar0->mac_int_mask);
1876 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1877 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1878 RMAC_DOUBLE_ECC_ERR |
1879 RMAC_LINK_STATE_CHANGE_INT,
1880 flag, &bar0->mac_rmac_err_mask);
1881 }
1882
1883 if (mask & RX_XGXS_INTR)
1884 {
1885 gen_int_mask |= RXXGXS_INT_M;
1886 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1887 &bar0->xgxs_int_mask);
1888 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1889 &bar0->xgxs_rxgxs_err_mask);
1890 }
1891
1892 if (mask & MC_INTR) {
1893 gen_int_mask |= MC_INT_M;
1894 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
1895 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1896 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1897 &bar0->mc_err_mask);
1898 }
1899 nic->general_int_mask = gen_int_mask;
1900
1901 /* Remove this line when alarm interrupts are enabled */
1902 nic->general_int_mask = 0;
1903}
1904/**
1905 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1906 * @nic: device private variable,
1907 * @mask: A mask indicating which Intr block must be modified and,
1908 * @flag: A flag indicating whether to enable or disable the Intrs.
1909 * Description: This function will either disable or enable the interrupts
1910 * depending on the flag argument. The mask argument can be used to
1911 * enable/disable any Intr block.
1912 * Return Value: NONE.
1913 */
1914
1915static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1916{
1917 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1918 register u64 temp64 = 0, intr_mask = 0;
1919
1920 intr_mask = nic->general_int_mask;
1921
1922 /* Top level interrupt classification */
1923 /* PIC Interrupts */
1924 if (mask & TX_PIC_INTR) {
1925 /* Enable PIC Intrs in the general intr mask register */
1926 intr_mask |= TXPIC_INT_M;
1927 if (flag == ENABLE_INTRS) {
1928 /*
1929 * If Hercules adapter enable GPIO otherwise
1930 * disable all PCIX, Flash, MDIO, IIC and GPIO
1931 * interrupts for now.
1932 * TODO
1933 */
1934 if (s2io_link_fault_indication(nic) ==
1935 LINK_UP_DOWN_INTERRUPT ) {
1936 do_s2io_write_bits(PIC_INT_GPIO, flag,
1937 &bar0->pic_int_mask);
1938 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
1939 &bar0->gpio_int_mask);
1940 } else
1941 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1942 } else if (flag == DISABLE_INTRS) {
1943 /*
1944 * Disable PIC Intrs in the general
1945 * intr mask register
1946 */
1947 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1948 }
1949 }
1950
1951 /* Tx traffic interrupts */
1952 if (mask & TX_TRAFFIC_INTR) {
1953 intr_mask |= TXTRAFFIC_INT_M;
1954 if (flag == ENABLE_INTRS) {
1955 /*
1956 * Enable all the Tx side interrupts
1957 * writing 0 Enables all 64 TX interrupt levels
1958 */
1959 writeq(0x0, &bar0->tx_traffic_mask);
1960 } else if (flag == DISABLE_INTRS) {
1961 /*
1962 * Disable Tx Traffic Intrs in the general intr mask
1963 * register.
1964 */
1965 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1966 }
1967 }
1968
1969 /* Rx traffic interrupts */
1970 if (mask & RX_TRAFFIC_INTR) {
1971 intr_mask |= RXTRAFFIC_INT_M;
1972 if (flag == ENABLE_INTRS) {
1973 /* writing 0 Enables all 8 RX interrupt levels */
1974 writeq(0x0, &bar0->rx_traffic_mask);
1975 } else if (flag == DISABLE_INTRS) {
1976 /*
1977 * Disable Rx Traffic Intrs in the general intr mask
1978 * register.
1979 */
1980 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1981 }
1982 }
1983
1984 temp64 = readq(&bar0->general_int_mask);
1985 if (flag == ENABLE_INTRS)
1986 temp64 &= ~((u64) intr_mask);
1987 else
1988 temp64 = DISABLE_ALL_INTRS;
1989 writeq(temp64, &bar0->general_int_mask);
1990
1991 nic->general_int_mask = readq(&bar0->general_int_mask);
1992}
1993
1994/**
1995 * verify_pcc_quiescent- Checks for PCC quiescent state
1996 * Return: 1 If PCC is quiescence
1997 * 0 If PCC is not quiescence
1998 */
1999static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2000{
2001 int ret = 0, herc;
2002 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2003 u64 val64 = readq(&bar0->adapter_status);
2004
2005 herc = (sp->device_type == XFRAME_II_DEVICE);
2006
2007 if (flag == FALSE) {
2008 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2009 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2010 ret = 1;
2011 } else {
2012 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2013 ret = 1;
2014 }
2015 } else {
2016 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2017 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2018 ADAPTER_STATUS_RMAC_PCC_IDLE))
2019 ret = 1;
2020 } else {
2021 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2022 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2023 ret = 1;
2024 }
2025 }
2026
2027 return ret;
2028}
2029/**
2030 * verify_xena_quiescence - Checks whether the H/W is ready
2031 * Description: Returns whether the H/W is ready to go or not. Depending
2032 * on whether adapter enable bit was written or not the comparison
2033 * differs and the calling function passes the input argument flag to
2034 * indicate this.
2035 * Return: 1 If xena is quiescence
2036 * 0 If Xena is not quiescence
2037 */
2038
2039static int verify_xena_quiescence(struct s2io_nic *sp)
2040{
2041 int mode;
2042 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2043 u64 val64 = readq(&bar0->adapter_status);
2044 mode = s2io_verify_pci_mode(sp);
2045
2046 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2047 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2048 return 0;
2049 }
2050 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2051 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2052 return 0;
2053 }
2054 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2055 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2056 return 0;
2057 }
2058 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2059 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2060 return 0;
2061 }
2062 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2063 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2064 return 0;
2065 }
2066 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2067 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2068 return 0;
2069 }
2070 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2071 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2072 return 0;
2073 }
2074 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2075 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2076 return 0;
2077 }
2078
2079 /*
2080 * In PCI 33 mode, the P_PLL is not used, and therefore,
2081 * the the P_PLL_LOCK bit in the adapter_status register will
2082 * not be asserted.
2083 */
2084 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2085 sp->device_type == XFRAME_II_DEVICE && mode !=
2086 PCI_MODE_PCI_33) {
2087 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2088 return 0;
2089 }
2090 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2091 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2092 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2093 return 0;
2094 }
2095 return 1;
2096}
2097
2098/**
2099 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2100 * @sp: Pointer to device specifc structure
2101 * Description :
2102 * New procedure to clear mac address reading problems on Alpha platforms
2103 *
2104 */
2105
2106static void fix_mac_address(struct s2io_nic * sp)
2107{
2108 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2109 u64 val64;
2110 int i = 0;
2111
2112 while (fix_mac[i] != END_SIGN) {
2113 writeq(fix_mac[i++], &bar0->gpio_control);
2114 udelay(10);
2115 val64 = readq(&bar0->gpio_control);
2116 }
2117}
2118
2119/**
2120 * start_nic - Turns the device on
2121 * @nic : device private variable.
2122 * Description:
2123 * This function actually turns the device on. Before this function is
2124 * called,all Registers are configured from their reset states
2125 * and shared memory is allocated but the NIC is still quiescent. On
2126 * calling this function, the device interrupts are cleared and the NIC is
2127 * literally switched on by writing into the adapter control register.
2128 * Return Value:
2129 * SUCCESS on success and -1 on failure.
2130 */
2131
2132static int start_nic(struct s2io_nic *nic)
2133{
2134 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2135 struct net_device *dev = nic->dev;
2136 register u64 val64 = 0;
2137 u16 subid, i;
2138 struct mac_info *mac_control;
2139 struct config_param *config;
2140
2141 mac_control = &nic->mac_control;
2142 config = &nic->config;
2143
2144 /* PRC Initialization and configuration */
2145 for (i = 0; i < config->rx_ring_num; i++) {
2146 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2147 &bar0->prc_rxd0_n[i]);
2148
2149 val64 = readq(&bar0->prc_ctrl_n[i]);
2150 if (nic->config.bimodal)
2151 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
2152 if (nic->rxd_mode == RXD_MODE_1)
2153 val64 |= PRC_CTRL_RC_ENABLED;
2154 else
2155 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2156 if (nic->device_type == XFRAME_II_DEVICE)
2157 val64 |= PRC_CTRL_GROUP_READS;
2158 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2159 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2160 writeq(val64, &bar0->prc_ctrl_n[i]);
2161 }
2162
2163 if (nic->rxd_mode == RXD_MODE_3B) {
2164 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2165 val64 = readq(&bar0->rx_pa_cfg);
2166 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2167 writeq(val64, &bar0->rx_pa_cfg);
2168 }
2169
2170 if (vlan_tag_strip == 0) {
2171 val64 = readq(&bar0->rx_pa_cfg);
2172 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2173 writeq(val64, &bar0->rx_pa_cfg);
2174 vlan_strip_flag = 0;
2175 }
2176
2177 /*
2178 * Enabling MC-RLDRAM. After enabling the device, we timeout
2179 * for around 100ms, which is approximately the time required
2180 * for the device to be ready for operation.
2181 */
2182 val64 = readq(&bar0->mc_rldram_mrs);
2183 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2184 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2185 val64 = readq(&bar0->mc_rldram_mrs);
2186
2187 msleep(100); /* Delay by around 100 ms. */
2188
2189 /* Enabling ECC Protection. */
2190 val64 = readq(&bar0->adapter_control);
2191 val64 &= ~ADAPTER_ECC_EN;
2192 writeq(val64, &bar0->adapter_control);
2193
2194 /*
2195 * Verify if the device is ready to be enabled, if so enable
2196 * it.
2197 */
2198 val64 = readq(&bar0->adapter_status);
2199 if (!verify_xena_quiescence(nic)) {
2200 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2201 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2202 (unsigned long long) val64);
2203 return FAILURE;
2204 }
2205
2206 /*
2207 * With some switches, link might be already up at this point.
2208 * Because of this weird behavior, when we enable laser,
2209 * we may not get link. We need to handle this. We cannot
2210 * figure out which switch is misbehaving. So we are forced to
2211 * make a global change.
2212 */
2213
2214 /* Enabling Laser. */
2215 val64 = readq(&bar0->adapter_control);
2216 val64 |= ADAPTER_EOI_TX_ON;
2217 writeq(val64, &bar0->adapter_control);
2218
2219 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2220 /*
2221 * Dont see link state interrupts initally on some switches,
2222 * so directly scheduling the link state task here.
2223 */
2224 schedule_work(&nic->set_link_task);
2225 }
2226 /* SXE-002: Initialize link and activity LED */
2227 subid = nic->pdev->subsystem_device;
2228 if (((subid & 0xFF) >= 0x07) &&
2229 (nic->device_type == XFRAME_I_DEVICE)) {
2230 val64 = readq(&bar0->gpio_control);
2231 val64 |= 0x0000800000000000ULL;
2232 writeq(val64, &bar0->gpio_control);
2233 val64 = 0x0411040400000000ULL;
2234 writeq(val64, (void __iomem *)bar0 + 0x2700);
2235 }
2236
2237 return SUCCESS;
2238}
2239/**
2240 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2241 */
2242static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2243 TxD *txdlp, int get_off)
2244{
2245 struct s2io_nic *nic = fifo_data->nic;
2246 struct sk_buff *skb;
2247 struct TxD *txds;
2248 u16 j, frg_cnt;
2249
2250 txds = txdlp;
2251 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2252 pci_unmap_single(nic->pdev, (dma_addr_t)
2253 txds->Buffer_Pointer, sizeof(u64),
2254 PCI_DMA_TODEVICE);
2255 txds++;
2256 }
2257
2258 skb = (struct sk_buff *) ((unsigned long)
2259 txds->Host_Control);
2260 if (!skb) {
2261 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2262 return NULL;
2263 }
2264 pci_unmap_single(nic->pdev, (dma_addr_t)
2265 txds->Buffer_Pointer,
2266 skb->len - skb->data_len,
2267 PCI_DMA_TODEVICE);
2268 frg_cnt = skb_shinfo(skb)->nr_frags;
2269 if (frg_cnt) {
2270 txds++;
2271 for (j = 0; j < frg_cnt; j++, txds++) {
2272 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2273 if (!txds->Buffer_Pointer)
2274 break;
2275 pci_unmap_page(nic->pdev, (dma_addr_t)
2276 txds->Buffer_Pointer,
2277 frag->size, PCI_DMA_TODEVICE);
2278 }
2279 }
2280 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2281 return(skb);
2282}
2283
2284/**
2285 * free_tx_buffers - Free all queued Tx buffers
2286 * @nic : device private variable.
2287 * Description:
2288 * Free all queued Tx buffers.
2289 * Return Value: void
2290*/
2291
2292static void free_tx_buffers(struct s2io_nic *nic)
2293{
2294 struct net_device *dev = nic->dev;
2295 struct sk_buff *skb;
2296 struct TxD *txdp;
2297 int i, j;
2298 struct mac_info *mac_control;
2299 struct config_param *config;
2300 int cnt = 0;
2301
2302 mac_control = &nic->mac_control;
2303 config = &nic->config;
2304
2305 for (i = 0; i < config->tx_fifo_num; i++) {
2306 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2307 txdp = (struct TxD *) \
2308 mac_control->fifos[i].list_info[j].list_virt_addr;
2309 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2310 if (skb) {
2311 nic->mac_control.stats_info->sw_stat.mem_freed
2312 += skb->truesize;
2313 dev_kfree_skb(skb);
2314 cnt++;
2315 }
2316 }
2317 DBG_PRINT(INTR_DBG,
2318 "%s:forcibly freeing %d skbs on FIFO%d\n",
2319 dev->name, cnt, i);
2320 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2321 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2322 }
2323}
2324
2325/**
2326 * stop_nic - To stop the nic
2327 * @nic ; device private variable.
2328 * Description:
2329 * This function does exactly the opposite of what the start_nic()
2330 * function does. This function is called to stop the device.
2331 * Return Value:
2332 * void.
2333 */
2334
2335static void stop_nic(struct s2io_nic *nic)
2336{
2337 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2338 register u64 val64 = 0;
2339 u16 interruptible;
2340 struct mac_info *mac_control;
2341 struct config_param *config;
2342
2343 mac_control = &nic->mac_control;
2344 config = &nic->config;
2345
2346 /* Disable all interrupts */
2347 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2348 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2349 interruptible |= TX_PIC_INTR;
2350 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2351
2352 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2353 val64 = readq(&bar0->adapter_control);
2354 val64 &= ~(ADAPTER_CNTL_EN);
2355 writeq(val64, &bar0->adapter_control);
2356}
2357
2358/**
2359 * fill_rx_buffers - Allocates the Rx side skbs
2360 * @nic: device private variable
2361 * @ring_no: ring number
2362 * Description:
2363 * The function allocates Rx side skbs and puts the physical
2364 * address of these buffers into the RxD buffer pointers, so that the NIC
2365 * can DMA the received frame into these locations.
2366 * The NIC supports 3 receive modes, viz
2367 * 1. single buffer,
2368 * 2. three buffer and
2369 * 3. Five buffer modes.
2370 * Each mode defines how many fragments the received frame will be split
2371 * up into by the NIC. The frame is split into L3 header, L4 Header,
2372 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2373 * is split into 3 fragments. As of now only single buffer mode is
2374 * supported.
2375 * Return Value:
2376 * SUCCESS on success or an appropriate -ve value on failure.
2377 */
2378
2379static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2380{
2381 struct net_device *dev = nic->dev;
2382 struct sk_buff *skb;
2383 struct RxD_t *rxdp;
2384 int off, off1, size, block_no, block_no1;
2385 u32 alloc_tab = 0;
2386 u32 alloc_cnt;
2387 struct mac_info *mac_control;
2388 struct config_param *config;
2389 u64 tmp;
2390 struct buffAdd *ba;
2391 unsigned long flags;
2392 struct RxD_t *first_rxdp = NULL;
2393 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2394 struct RxD1 *rxdp1;
2395 struct RxD3 *rxdp3;
2396 struct swStat *stats = &nic->mac_control.stats_info->sw_stat;
2397
2398 mac_control = &nic->mac_control;
2399 config = &nic->config;
2400 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2401 atomic_read(&nic->rx_bufs_left[ring_no]);
2402
2403 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2404 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2405 while (alloc_tab < alloc_cnt) {
2406 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2407 block_index;
2408 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2409
2410 rxdp = mac_control->rings[ring_no].
2411 rx_blocks[block_no].rxds[off].virt_addr;
2412
2413 if ((block_no == block_no1) && (off == off1) &&
2414 (rxdp->Host_Control)) {
2415 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2416 dev->name);
2417 DBG_PRINT(INTR_DBG, " info equated\n");
2418 goto end;
2419 }
2420 if (off && (off == rxd_count[nic->rxd_mode])) {
2421 mac_control->rings[ring_no].rx_curr_put_info.
2422 block_index++;
2423 if (mac_control->rings[ring_no].rx_curr_put_info.
2424 block_index == mac_control->rings[ring_no].
2425 block_count)
2426 mac_control->rings[ring_no].rx_curr_put_info.
2427 block_index = 0;
2428 block_no = mac_control->rings[ring_no].
2429 rx_curr_put_info.block_index;
2430 if (off == rxd_count[nic->rxd_mode])
2431 off = 0;
2432 mac_control->rings[ring_no].rx_curr_put_info.
2433 offset = off;
2434 rxdp = mac_control->rings[ring_no].
2435 rx_blocks[block_no].block_virt_addr;
2436 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2437 dev->name, rxdp);
2438 }
2439 if(!napi) {
2440 spin_lock_irqsave(&nic->put_lock, flags);
2441 mac_control->rings[ring_no].put_pos =
2442 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2443 spin_unlock_irqrestore(&nic->put_lock, flags);
2444 } else {
2445 mac_control->rings[ring_no].put_pos =
2446 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2447 }
2448 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2449 ((nic->rxd_mode == RXD_MODE_3B) &&
2450 (rxdp->Control_2 & BIT(0)))) {
2451 mac_control->rings[ring_no].rx_curr_put_info.
2452 offset = off;
2453 goto end;
2454 }
2455 /* calculate size of skb based on ring mode */
2456 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2457 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2458 if (nic->rxd_mode == RXD_MODE_1)
2459 size += NET_IP_ALIGN;
2460 else
2461 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2462
2463 /* allocate skb */
2464 skb = dev_alloc_skb(size);
2465 if(!skb) {
2466 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2467 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2468 if (first_rxdp) {
2469 wmb();
2470 first_rxdp->Control_1 |= RXD_OWN_XENA;
2471 }
2472 nic->mac_control.stats_info->sw_stat. \
2473 mem_alloc_fail_cnt++;
2474 return -ENOMEM ;
2475 }
2476 nic->mac_control.stats_info->sw_stat.mem_allocated
2477 += skb->truesize;
2478 if (nic->rxd_mode == RXD_MODE_1) {
2479 /* 1 buffer mode - normal operation mode */
2480 rxdp1 = (struct RxD1*)rxdp;
2481 memset(rxdp, 0, sizeof(struct RxD1));
2482 skb_reserve(skb, NET_IP_ALIGN);
2483 rxdp1->Buffer0_ptr = pci_map_single
2484 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2485 PCI_DMA_FROMDEVICE);
2486 if( (rxdp1->Buffer0_ptr == 0) ||
2487 (rxdp1->Buffer0_ptr ==
2488 DMA_ERROR_CODE))
2489 goto pci_map_failed;
2490
2491 rxdp->Control_2 =
2492 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2493
2494 } else if (nic->rxd_mode == RXD_MODE_3B) {
2495 /*
2496 * 2 buffer mode -
2497 * 2 buffer mode provides 128
2498 * byte aligned receive buffers.
2499 */
2500
2501 rxdp3 = (struct RxD3*)rxdp;
2502 /* save buffer pointers to avoid frequent dma mapping */
2503 Buffer0_ptr = rxdp3->Buffer0_ptr;
2504 Buffer1_ptr = rxdp3->Buffer1_ptr;
2505 memset(rxdp, 0, sizeof(struct RxD3));
2506 /* restore the buffer pointers for dma sync*/
2507 rxdp3->Buffer0_ptr = Buffer0_ptr;
2508 rxdp3->Buffer1_ptr = Buffer1_ptr;
2509
2510 ba = &mac_control->rings[ring_no].ba[block_no][off];
2511 skb_reserve(skb, BUF0_LEN);
2512 tmp = (u64)(unsigned long) skb->data;
2513 tmp += ALIGN_SIZE;
2514 tmp &= ~ALIGN_SIZE;
2515 skb->data = (void *) (unsigned long)tmp;
2516 skb_reset_tail_pointer(skb);
2517
2518 if (!(rxdp3->Buffer0_ptr))
2519 rxdp3->Buffer0_ptr =
2520 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2521 PCI_DMA_FROMDEVICE);
2522 else
2523 pci_dma_sync_single_for_device(nic->pdev,
2524 (dma_addr_t) rxdp3->Buffer0_ptr,
2525 BUF0_LEN, PCI_DMA_FROMDEVICE);
2526 if( (rxdp3->Buffer0_ptr == 0) ||
2527 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2528 goto pci_map_failed;
2529
2530 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2531 if (nic->rxd_mode == RXD_MODE_3B) {
2532 /* Two buffer mode */
2533
2534 /*
2535 * Buffer2 will have L3/L4 header plus
2536 * L4 payload
2537 */
2538 rxdp3->Buffer2_ptr = pci_map_single
2539 (nic->pdev, skb->data, dev->mtu + 4,
2540 PCI_DMA_FROMDEVICE);
2541
2542 if( (rxdp3->Buffer2_ptr == 0) ||
2543 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2544 goto pci_map_failed;
2545
2546 rxdp3->Buffer1_ptr =
2547 pci_map_single(nic->pdev,
2548 ba->ba_1, BUF1_LEN,
2549 PCI_DMA_FROMDEVICE);
2550 if( (rxdp3->Buffer1_ptr == 0) ||
2551 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2552 pci_unmap_single
2553 (nic->pdev,
2554 (dma_addr_t)rxdp3->Buffer2_ptr,
2555 dev->mtu + 4,
2556 PCI_DMA_FROMDEVICE);
2557 goto pci_map_failed;
2558 }
2559 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2560 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2561 (dev->mtu + 4);
2562 }
2563 rxdp->Control_2 |= BIT(0);
2564 }
2565 rxdp->Host_Control = (unsigned long) (skb);
2566 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2567 rxdp->Control_1 |= RXD_OWN_XENA;
2568 off++;
2569 if (off == (rxd_count[nic->rxd_mode] + 1))
2570 off = 0;
2571 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2572
2573 rxdp->Control_2 |= SET_RXD_MARKER;
2574 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2575 if (first_rxdp) {
2576 wmb();
2577 first_rxdp->Control_1 |= RXD_OWN_XENA;
2578 }
2579 first_rxdp = rxdp;
2580 }
2581 atomic_inc(&nic->rx_bufs_left[ring_no]);
2582 alloc_tab++;
2583 }
2584
2585 end:
2586 /* Transfer ownership of first descriptor to adapter just before
2587 * exiting. Before that, use memory barrier so that ownership
2588 * and other fields are seen by adapter correctly.
2589 */
2590 if (first_rxdp) {
2591 wmb();
2592 first_rxdp->Control_1 |= RXD_OWN_XENA;
2593 }
2594
2595 return SUCCESS;
2596pci_map_failed:
2597 stats->pci_map_fail_cnt++;
2598 stats->mem_freed += skb->truesize;
2599 dev_kfree_skb_irq(skb);
2600 return -ENOMEM;
2601}
2602
2603static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2604{
2605 struct net_device *dev = sp->dev;
2606 int j;
2607 struct sk_buff *skb;
2608 struct RxD_t *rxdp;
2609 struct mac_info *mac_control;
2610 struct buffAdd *ba;
2611 struct RxD1 *rxdp1;
2612 struct RxD3 *rxdp3;
2613
2614 mac_control = &sp->mac_control;
2615 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2616 rxdp = mac_control->rings[ring_no].
2617 rx_blocks[blk].rxds[j].virt_addr;
2618 skb = (struct sk_buff *)
2619 ((unsigned long) rxdp->Host_Control);
2620 if (!skb) {
2621 continue;
2622 }
2623 if (sp->rxd_mode == RXD_MODE_1) {
2624 rxdp1 = (struct RxD1*)rxdp;
2625 pci_unmap_single(sp->pdev, (dma_addr_t)
2626 rxdp1->Buffer0_ptr,
2627 dev->mtu +
2628 HEADER_ETHERNET_II_802_3_SIZE
2629 + HEADER_802_2_SIZE +
2630 HEADER_SNAP_SIZE,
2631 PCI_DMA_FROMDEVICE);
2632 memset(rxdp, 0, sizeof(struct RxD1));
2633 } else if(sp->rxd_mode == RXD_MODE_3B) {
2634 rxdp3 = (struct RxD3*)rxdp;
2635 ba = &mac_control->rings[ring_no].
2636 ba[blk][j];
2637 pci_unmap_single(sp->pdev, (dma_addr_t)
2638 rxdp3->Buffer0_ptr,
2639 BUF0_LEN,
2640 PCI_DMA_FROMDEVICE);
2641 pci_unmap_single(sp->pdev, (dma_addr_t)
2642 rxdp3->Buffer1_ptr,
2643 BUF1_LEN,
2644 PCI_DMA_FROMDEVICE);
2645 pci_unmap_single(sp->pdev, (dma_addr_t)
2646 rxdp3->Buffer2_ptr,
2647 dev->mtu + 4,
2648 PCI_DMA_FROMDEVICE);
2649 memset(rxdp, 0, sizeof(struct RxD3));
2650 }
2651 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2652 dev_kfree_skb(skb);
2653 atomic_dec(&sp->rx_bufs_left[ring_no]);
2654 }
2655}
2656
2657/**
2658 * free_rx_buffers - Frees all Rx buffers
2659 * @sp: device private variable.
2660 * Description:
2661 * This function will free all Rx buffers allocated by host.
2662 * Return Value:
2663 * NONE.
2664 */
2665
2666static void free_rx_buffers(struct s2io_nic *sp)
2667{
2668 struct net_device *dev = sp->dev;
2669 int i, blk = 0, buf_cnt = 0;
2670 struct mac_info *mac_control;
2671 struct config_param *config;
2672
2673 mac_control = &sp->mac_control;
2674 config = &sp->config;
2675
2676 for (i = 0; i < config->rx_ring_num; i++) {
2677 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2678 free_rxd_blk(sp,i,blk);
2679
2680 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2681 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2682 mac_control->rings[i].rx_curr_put_info.offset = 0;
2683 mac_control->rings[i].rx_curr_get_info.offset = 0;
2684 atomic_set(&sp->rx_bufs_left[i], 0);
2685 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2686 dev->name, buf_cnt, i);
2687 }
2688}
2689
2690/**
2691 * s2io_poll - Rx interrupt handler for NAPI support
2692 * @napi : pointer to the napi structure.
2693 * @budget : The number of packets that were budgeted to be processed
2694 * during one pass through the 'Poll" function.
2695 * Description:
2696 * Comes into picture only if NAPI support has been incorporated. It does
2697 * the same thing that rx_intr_handler does, but not in a interrupt context
2698 * also It will process only a given number of packets.
2699 * Return value:
2700 * 0 on success and 1 if there are No Rx packets to be processed.
2701 */
2702
2703static int s2io_poll(struct napi_struct *napi, int budget)
2704{
2705 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2706 struct net_device *dev = nic->dev;
2707 int pkt_cnt = 0, org_pkts_to_process;
2708 struct mac_info *mac_control;
2709 struct config_param *config;
2710 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2711 int i;
2712
2713 atomic_inc(&nic->isr_cnt);
2714 mac_control = &nic->mac_control;
2715 config = &nic->config;
2716
2717 nic->pkts_to_process = budget;
2718 org_pkts_to_process = nic->pkts_to_process;
2719
2720 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2721 readl(&bar0->rx_traffic_int);
2722
2723 for (i = 0; i < config->rx_ring_num; i++) {
2724 rx_intr_handler(&mac_control->rings[i]);
2725 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2726 if (!nic->pkts_to_process) {
2727 /* Quota for the current iteration has been met */
2728 goto no_rx;
2729 }
2730 }
2731
2732 netif_rx_complete(dev, napi);
2733
2734 for (i = 0; i < config->rx_ring_num; i++) {
2735 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2736 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2737 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2738 break;
2739 }
2740 }
2741 /* Re enable the Rx interrupts. */
2742 writeq(0x0, &bar0->rx_traffic_mask);
2743 readl(&bar0->rx_traffic_mask);
2744 atomic_dec(&nic->isr_cnt);
2745 return pkt_cnt;
2746
2747no_rx:
2748 for (i = 0; i < config->rx_ring_num; i++) {
2749 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2750 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2751 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2752 break;
2753 }
2754 }
2755 atomic_dec(&nic->isr_cnt);
2756 return pkt_cnt;
2757}
2758
2759#ifdef CONFIG_NET_POLL_CONTROLLER
2760/**
2761 * s2io_netpoll - netpoll event handler entry point
2762 * @dev : pointer to the device structure.
2763 * Description:
2764 * This function will be called by upper layer to check for events on the
2765 * interface in situations where interrupts are disabled. It is used for
2766 * specific in-kernel networking tasks, such as remote consoles and kernel
2767 * debugging over the network (example netdump in RedHat).
2768 */
2769static void s2io_netpoll(struct net_device *dev)
2770{
2771 struct s2io_nic *nic = dev->priv;
2772 struct mac_info *mac_control;
2773 struct config_param *config;
2774 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2775 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2776 int i;
2777
2778 if (pci_channel_offline(nic->pdev))
2779 return;
2780
2781 disable_irq(dev->irq);
2782
2783 atomic_inc(&nic->isr_cnt);
2784 mac_control = &nic->mac_control;
2785 config = &nic->config;
2786
2787 writeq(val64, &bar0->rx_traffic_int);
2788 writeq(val64, &bar0->tx_traffic_int);
2789
2790 /* we need to free up the transmitted skbufs or else netpoll will
2791 * run out of skbs and will fail and eventually netpoll application such
2792 * as netdump will fail.
2793 */
2794 for (i = 0; i < config->tx_fifo_num; i++)
2795 tx_intr_handler(&mac_control->fifos[i]);
2796
2797 /* check for received packet and indicate up to network */
2798 for (i = 0; i < config->rx_ring_num; i++)
2799 rx_intr_handler(&mac_control->rings[i]);
2800
2801 for (i = 0; i < config->rx_ring_num; i++) {
2802 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2803 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2804 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2805 break;
2806 }
2807 }
2808 atomic_dec(&nic->isr_cnt);
2809 enable_irq(dev->irq);
2810 return;
2811}
2812#endif
2813
2814/**
2815 * rx_intr_handler - Rx interrupt handler
2816 * @nic: device private variable.
2817 * Description:
2818 * If the interrupt is because of a received frame or if the
2819 * receive ring contains fresh as yet un-processed frames,this function is
2820 * called. It picks out the RxD at which place the last Rx processing had
2821 * stopped and sends the skb to the OSM's Rx handler and then increments
2822 * the offset.
2823 * Return Value:
2824 * NONE.
2825 */
2826static void rx_intr_handler(struct ring_info *ring_data)
2827{
2828 struct s2io_nic *nic = ring_data->nic;
2829 struct net_device *dev = (struct net_device *) nic->dev;
2830 int get_block, put_block, put_offset;
2831 struct rx_curr_get_info get_info, put_info;
2832 struct RxD_t *rxdp;
2833 struct sk_buff *skb;
2834 int pkt_cnt = 0;
2835 int i;
2836 struct RxD1* rxdp1;
2837 struct RxD3* rxdp3;
2838
2839 spin_lock(&nic->rx_lock);
2840 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2841 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2842 __FUNCTION__, dev->name);
2843 spin_unlock(&nic->rx_lock);
2844 return;
2845 }
2846
2847 get_info = ring_data->rx_curr_get_info;
2848 get_block = get_info.block_index;
2849 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2850 put_block = put_info.block_index;
2851 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2852 if (!napi) {
2853 spin_lock(&nic->put_lock);
2854 put_offset = ring_data->put_pos;
2855 spin_unlock(&nic->put_lock);
2856 } else
2857 put_offset = ring_data->put_pos;
2858
2859 while (RXD_IS_UP2DT(rxdp)) {
2860 /*
2861 * If your are next to put index then it's
2862 * FIFO full condition
2863 */
2864 if ((get_block == put_block) &&
2865 (get_info.offset + 1) == put_info.offset) {
2866 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2867 break;
2868 }
2869 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2870 if (skb == NULL) {
2871 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2872 dev->name);
2873 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2874 spin_unlock(&nic->rx_lock);
2875 return;
2876 }
2877 if (nic->rxd_mode == RXD_MODE_1) {
2878 rxdp1 = (struct RxD1*)rxdp;
2879 pci_unmap_single(nic->pdev, (dma_addr_t)
2880 rxdp1->Buffer0_ptr,
2881 dev->mtu +
2882 HEADER_ETHERNET_II_802_3_SIZE +
2883 HEADER_802_2_SIZE +
2884 HEADER_SNAP_SIZE,
2885 PCI_DMA_FROMDEVICE);
2886 } else if (nic->rxd_mode == RXD_MODE_3B) {
2887 rxdp3 = (struct RxD3*)rxdp;
2888 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2889 rxdp3->Buffer0_ptr,
2890 BUF0_LEN, PCI_DMA_FROMDEVICE);
2891 pci_unmap_single(nic->pdev, (dma_addr_t)
2892 rxdp3->Buffer2_ptr,
2893 dev->mtu + 4,
2894 PCI_DMA_FROMDEVICE);
2895 }
2896 prefetch(skb->data);
2897 rx_osm_handler(ring_data, rxdp);
2898 get_info.offset++;
2899 ring_data->rx_curr_get_info.offset = get_info.offset;
2900 rxdp = ring_data->rx_blocks[get_block].
2901 rxds[get_info.offset].virt_addr;
2902 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2903 get_info.offset = 0;
2904 ring_data->rx_curr_get_info.offset = get_info.offset;
2905 get_block++;
2906 if (get_block == ring_data->block_count)
2907 get_block = 0;
2908 ring_data->rx_curr_get_info.block_index = get_block;
2909 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2910 }
2911
2912 nic->pkts_to_process -= 1;
2913 if ((napi) && (!nic->pkts_to_process))
2914 break;
2915 pkt_cnt++;
2916 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2917 break;
2918 }
2919 if (nic->lro) {
2920 /* Clear all LRO sessions before exiting */
2921 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2922 struct lro *lro = &nic->lro0_n[i];
2923 if (lro->in_use) {
2924 update_L3L4_header(nic, lro);
2925 queue_rx_frame(lro->parent);
2926 clear_lro_session(lro);
2927 }
2928 }
2929 }
2930
2931 spin_unlock(&nic->rx_lock);
2932}
2933
2934/**
2935 * tx_intr_handler - Transmit interrupt handler
2936 * @nic : device private variable
2937 * Description:
2938 * If an interrupt was raised to indicate DMA complete of the
2939 * Tx packet, this function is called. It identifies the last TxD
2940 * whose buffer was freed and frees all skbs whose data have already
2941 * DMA'ed into the NICs internal memory.
2942 * Return Value:
2943 * NONE
2944 */
2945
2946static void tx_intr_handler(struct fifo_info *fifo_data)
2947{
2948 struct s2io_nic *nic = fifo_data->nic;
2949 struct net_device *dev = (struct net_device *) nic->dev;
2950 struct tx_curr_get_info get_info, put_info;
2951 struct sk_buff *skb;
2952 struct TxD *txdlp;
2953 u8 err_mask;
2954
2955 get_info = fifo_data->tx_curr_get_info;
2956 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2957 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2958 list_virt_addr;
2959 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2960 (get_info.offset != put_info.offset) &&
2961 (txdlp->Host_Control)) {
2962 /* Check for TxD errors */
2963 if (txdlp->Control_1 & TXD_T_CODE) {
2964 unsigned long long err;
2965 err = txdlp->Control_1 & TXD_T_CODE;
2966 if (err & 0x1) {
2967 nic->mac_control.stats_info->sw_stat.
2968 parity_err_cnt++;
2969 }
2970
2971 /* update t_code statistics */
2972 err_mask = err >> 48;
2973 switch(err_mask) {
2974 case 2:
2975 nic->mac_control.stats_info->sw_stat.
2976 tx_buf_abort_cnt++;
2977 break;
2978
2979 case 3:
2980 nic->mac_control.stats_info->sw_stat.
2981 tx_desc_abort_cnt++;
2982 break;
2983
2984 case 7:
2985 nic->mac_control.stats_info->sw_stat.
2986 tx_parity_err_cnt++;
2987 break;
2988
2989 case 10:
2990 nic->mac_control.stats_info->sw_stat.
2991 tx_link_loss_cnt++;
2992 break;
2993
2994 case 15:
2995 nic->mac_control.stats_info->sw_stat.
2996 tx_list_proc_err_cnt++;
2997 break;
2998 }
2999 }
3000
3001 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3002 if (skb == NULL) {
3003 DBG_PRINT(ERR_DBG, "%s: Null skb ",
3004 __FUNCTION__);
3005 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3006 return;
3007 }
3008
3009 /* Updating the statistics block */
3010 nic->stats.tx_bytes += skb->len;
3011 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3012 dev_kfree_skb_irq(skb);
3013
3014 get_info.offset++;
3015 if (get_info.offset == get_info.fifo_len + 1)
3016 get_info.offset = 0;
3017 txdlp = (struct TxD *) fifo_data->list_info
3018 [get_info.offset].list_virt_addr;
3019 fifo_data->tx_curr_get_info.offset =
3020 get_info.offset;
3021 }
3022
3023 spin_lock(&nic->tx_lock);
3024 if (netif_queue_stopped(dev))
3025 netif_wake_queue(dev);
3026 spin_unlock(&nic->tx_lock);
3027}
3028
3029/**
3030 * s2io_mdio_write - Function to write in to MDIO registers
3031 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3032 * @addr : address value
3033 * @value : data value
3034 * @dev : pointer to net_device structure
3035 * Description:
3036 * This function is used to write values to the MDIO registers
3037 * NONE
3038 */
3039static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3040{
3041 u64 val64 = 0x0;
3042 struct s2io_nic *sp = dev->priv;
3043 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3044
3045 //address transaction
3046 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3047 | MDIO_MMD_DEV_ADDR(mmd_type)
3048 | MDIO_MMS_PRT_ADDR(0x0);
3049 writeq(val64, &bar0->mdio_control);
3050 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3051 writeq(val64, &bar0->mdio_control);
3052 udelay(100);
3053
3054 //Data transaction
3055 val64 = 0x0;
3056 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3057 | MDIO_MMD_DEV_ADDR(mmd_type)
3058 | MDIO_MMS_PRT_ADDR(0x0)
3059 | MDIO_MDIO_DATA(value)
3060 | MDIO_OP(MDIO_OP_WRITE_TRANS);
3061 writeq(val64, &bar0->mdio_control);
3062 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3063 writeq(val64, &bar0->mdio_control);
3064 udelay(100);
3065
3066 val64 = 0x0;
3067 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3068 | MDIO_MMD_DEV_ADDR(mmd_type)
3069 | MDIO_MMS_PRT_ADDR(0x0)
3070 | MDIO_OP(MDIO_OP_READ_TRANS);
3071 writeq(val64, &bar0->mdio_control);
3072 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3073 writeq(val64, &bar0->mdio_control);
3074 udelay(100);
3075
3076}
3077
3078/**
3079 * s2io_mdio_read - Function to write in to MDIO registers
3080 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3081 * @addr : address value
3082 * @dev : pointer to net_device structure
3083 * Description:
3084 * This function is used to read values to the MDIO registers
3085 * NONE
3086 */
3087static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3088{
3089 u64 val64 = 0x0;
3090 u64 rval64 = 0x0;
3091 struct s2io_nic *sp = dev->priv;
3092 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3093
3094 /* address transaction */
3095 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3096 | MDIO_MMD_DEV_ADDR(mmd_type)
3097 | MDIO_MMS_PRT_ADDR(0x0);
3098 writeq(val64, &bar0->mdio_control);
3099 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3100 writeq(val64, &bar0->mdio_control);
3101 udelay(100);
3102
3103 /* Data transaction */
3104 val64 = 0x0;
3105 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3106 | MDIO_MMD_DEV_ADDR(mmd_type)
3107 | MDIO_MMS_PRT_ADDR(0x0)
3108 | MDIO_OP(MDIO_OP_READ_TRANS);
3109 writeq(val64, &bar0->mdio_control);
3110 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3111 writeq(val64, &bar0->mdio_control);
3112 udelay(100);
3113
3114 /* Read the value from regs */
3115 rval64 = readq(&bar0->mdio_control);
3116 rval64 = rval64 & 0xFFFF0000;
3117 rval64 = rval64 >> 16;
3118 return rval64;
3119}
3120/**
3121 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3122 * @counter : couter value to be updated
3123 * @flag : flag to indicate the status
3124 * @type : counter type
3125 * Description:
3126 * This function is to check the status of the xpak counters value
3127 * NONE
3128 */
3129
3130static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3131{
3132 u64 mask = 0x3;
3133 u64 val64;
3134 int i;
3135 for(i = 0; i <index; i++)
3136 mask = mask << 0x2;
3137
3138 if(flag > 0)
3139 {
3140 *counter = *counter + 1;
3141 val64 = *regs_stat & mask;
3142 val64 = val64 >> (index * 0x2);
3143 val64 = val64 + 1;
3144 if(val64 == 3)
3145 {
3146 switch(type)
3147 {
3148 case 1:
3149 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3150 "service. Excessive temperatures may "
3151 "result in premature transceiver "
3152 "failure \n");
3153 break;
3154 case 2:
3155 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3156 "service Excessive bias currents may "
3157 "indicate imminent laser diode "
3158 "failure \n");
3159 break;
3160 case 3:
3161 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3162 "service Excessive laser output "
3163 "power may saturate far-end "
3164 "receiver\n");
3165 break;
3166 default:
3167 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3168 "type \n");
3169 }
3170 val64 = 0x0;
3171 }
3172 val64 = val64 << (index * 0x2);
3173 *regs_stat = (*regs_stat & (~mask)) | (val64);
3174
3175 } else {
3176 *regs_stat = *regs_stat & (~mask);
3177 }
3178}
3179
3180/**
3181 * s2io_updt_xpak_counter - Function to update the xpak counters
3182 * @dev : pointer to net_device struct
3183 * Description:
3184 * This function is to upate the status of the xpak counters value
3185 * NONE
3186 */
3187static void s2io_updt_xpak_counter(struct net_device *dev)
3188{
3189 u16 flag = 0x0;
3190 u16 type = 0x0;
3191 u16 val16 = 0x0;
3192 u64 val64 = 0x0;
3193 u64 addr = 0x0;
3194
3195 struct s2io_nic *sp = dev->priv;
3196 struct stat_block *stat_info = sp->mac_control.stats_info;
3197
3198 /* Check the communication with the MDIO slave */
3199 addr = 0x0000;
3200 val64 = 0x0;
3201 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3202 if((val64 == 0xFFFF) || (val64 == 0x0000))
3203 {
3204 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3205 "Returned %llx\n", (unsigned long long)val64);
3206 return;
3207 }
3208
3209 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3210 if(val64 != 0x2040)
3211 {
3212 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3213 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3214 (unsigned long long)val64);
3215 return;
3216 }
3217
3218 /* Loading the DOM register to MDIO register */
3219 addr = 0xA100;
3220 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3221 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3222
3223 /* Reading the Alarm flags */
3224 addr = 0xA070;
3225 val64 = 0x0;
3226 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3227
3228 flag = CHECKBIT(val64, 0x7);
3229 type = 1;
3230 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3231 &stat_info->xpak_stat.xpak_regs_stat,
3232 0x0, flag, type);
3233
3234 if(CHECKBIT(val64, 0x6))
3235 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3236
3237 flag = CHECKBIT(val64, 0x3);
3238 type = 2;
3239 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3240 &stat_info->xpak_stat.xpak_regs_stat,
3241 0x2, flag, type);
3242
3243 if(CHECKBIT(val64, 0x2))
3244 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3245
3246 flag = CHECKBIT(val64, 0x1);
3247 type = 3;
3248 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3249 &stat_info->xpak_stat.xpak_regs_stat,
3250 0x4, flag, type);
3251
3252 if(CHECKBIT(val64, 0x0))
3253 stat_info->xpak_stat.alarm_laser_output_power_low++;
3254
3255 /* Reading the Warning flags */
3256 addr = 0xA074;
3257 val64 = 0x0;
3258 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3259
3260 if(CHECKBIT(val64, 0x7))
3261 stat_info->xpak_stat.warn_transceiver_temp_high++;
3262
3263 if(CHECKBIT(val64, 0x6))
3264 stat_info->xpak_stat.warn_transceiver_temp_low++;
3265
3266 if(CHECKBIT(val64, 0x3))
3267 stat_info->xpak_stat.warn_laser_bias_current_high++;
3268
3269 if(CHECKBIT(val64, 0x2))
3270 stat_info->xpak_stat.warn_laser_bias_current_low++;
3271
3272 if(CHECKBIT(val64, 0x1))
3273 stat_info->xpak_stat.warn_laser_output_power_high++;
3274
3275 if(CHECKBIT(val64, 0x0))
3276 stat_info->xpak_stat.warn_laser_output_power_low++;
3277}
3278
3279/**
3280 * wait_for_cmd_complete - waits for a command to complete.
3281 * @sp : private member of the device structure, which is a pointer to the
3282 * s2io_nic structure.
3283 * Description: Function that waits for a command to Write into RMAC
3284 * ADDR DATA registers to be completed and returns either success or
3285 * error depending on whether the command was complete or not.
3286 * Return value:
3287 * SUCCESS on success and FAILURE on failure.
3288 */
3289
3290static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3291 int bit_state)
3292{
3293 int ret = FAILURE, cnt = 0, delay = 1;
3294 u64 val64;
3295
3296 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3297 return FAILURE;
3298
3299 do {
3300 val64 = readq(addr);
3301 if (bit_state == S2IO_BIT_RESET) {
3302 if (!(val64 & busy_bit)) {
3303 ret = SUCCESS;
3304 break;
3305 }
3306 } else {
3307 if (!(val64 & busy_bit)) {
3308 ret = SUCCESS;
3309 break;
3310 }
3311 }
3312
3313 if(in_interrupt())
3314 mdelay(delay);
3315 else
3316 msleep(delay);
3317
3318 if (++cnt >= 10)
3319 delay = 50;
3320 } while (cnt < 20);
3321 return ret;
3322}
3323/*
3324 * check_pci_device_id - Checks if the device id is supported
3325 * @id : device id
3326 * Description: Function to check if the pci device id is supported by driver.
3327 * Return value: Actual device id if supported else PCI_ANY_ID
3328 */
3329static u16 check_pci_device_id(u16 id)
3330{
3331 switch (id) {
3332 case PCI_DEVICE_ID_HERC_WIN:
3333 case PCI_DEVICE_ID_HERC_UNI:
3334 return XFRAME_II_DEVICE;
3335 case PCI_DEVICE_ID_S2IO_UNI:
3336 case PCI_DEVICE_ID_S2IO_WIN:
3337 return XFRAME_I_DEVICE;
3338 default:
3339 return PCI_ANY_ID;
3340 }
3341}
3342
3343/**
3344 * s2io_reset - Resets the card.
3345 * @sp : private member of the device structure.
3346 * Description: Function to Reset the card. This function then also
3347 * restores the previously saved PCI configuration space registers as
3348 * the card reset also resets the configuration space.
3349 * Return value:
3350 * void.
3351 */
3352
3353static void s2io_reset(struct s2io_nic * sp)
3354{
3355 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3356 u64 val64;
3357 u16 subid, pci_cmd;
3358 int i;
3359 u16 val16;
3360 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3361 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3362
3363 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3364 __FUNCTION__, sp->dev->name);
3365
3366 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3367 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3368
3369 val64 = SW_RESET_ALL;
3370 writeq(val64, &bar0->sw_reset);
3371 if (strstr(sp->product_name, "CX4")) {
3372 msleep(750);
3373 }
3374 msleep(250);
3375 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3376
3377 /* Restore the PCI state saved during initialization. */
3378 pci_restore_state(sp->pdev);
3379 pci_read_config_word(sp->pdev, 0x2, &val16);
3380 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3381 break;
3382 msleep(200);
3383 }
3384
3385 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3386 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3387 }
3388
3389 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3390
3391 s2io_init_pci(sp);
3392
3393 /* Set swapper to enable I/O register access */
3394 s2io_set_swapper(sp);
3395
3396 /* Restore the MSIX table entries from local variables */
3397 restore_xmsi_data(sp);
3398
3399 /* Clear certain PCI/PCI-X fields after reset */
3400 if (sp->device_type == XFRAME_II_DEVICE) {
3401 /* Clear "detected parity error" bit */
3402 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3403
3404 /* Clearing PCIX Ecc status register */
3405 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3406
3407 /* Clearing PCI_STATUS error reflected here */
3408 writeq(BIT(62), &bar0->txpic_int_reg);
3409 }
3410
3411 /* Reset device statistics maintained by OS */
3412 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3413
3414 up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3415 down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3416 up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3417 down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3418 reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3419 mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3420 mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3421 watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3422 /* save link up/down time/cnt, reset/memory/watchdog cnt */
3423 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3424 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3425 sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3426 sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3427 sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3428 sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3429 sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3430 sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3431 sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3432 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3433
3434 /* SXE-002: Configure link and activity LED to turn it off */
3435 subid = sp->pdev->subsystem_device;
3436 if (((subid & 0xFF) >= 0x07) &&
3437 (sp->device_type == XFRAME_I_DEVICE)) {
3438 val64 = readq(&bar0->gpio_control);
3439 val64 |= 0x0000800000000000ULL;
3440 writeq(val64, &bar0->gpio_control);
3441 val64 = 0x0411040400000000ULL;
3442 writeq(val64, (void __iomem *)bar0 + 0x2700);
3443 }
3444
3445 /*
3446 * Clear spurious ECC interrupts that would have occured on
3447 * XFRAME II cards after reset.
3448 */
3449 if (sp->device_type == XFRAME_II_DEVICE) {
3450 val64 = readq(&bar0->pcc_err_reg);
3451 writeq(val64, &bar0->pcc_err_reg);
3452 }
3453
3454 /* restore the previously assigned mac address */
3455 s2io_set_mac_addr(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3456
3457 sp->device_enabled_once = FALSE;
3458}
3459
3460/**
3461 * s2io_set_swapper - to set the swapper controle on the card
3462 * @sp : private member of the device structure,
3463 * pointer to the s2io_nic structure.
3464 * Description: Function to set the swapper control on the card
3465 * correctly depending on the 'endianness' of the system.
3466 * Return value:
3467 * SUCCESS on success and FAILURE on failure.
3468 */
3469
3470static int s2io_set_swapper(struct s2io_nic * sp)
3471{
3472 struct net_device *dev = sp->dev;
3473 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3474 u64 val64, valt, valr;
3475
3476 /*
3477 * Set proper endian settings and verify the same by reading
3478 * the PIF Feed-back register.
3479 */
3480
3481 val64 = readq(&bar0->pif_rd_swapper_fb);
3482 if (val64 != 0x0123456789ABCDEFULL) {
3483 int i = 0;
3484 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3485 0x8100008181000081ULL, /* FE=1, SE=0 */
3486 0x4200004242000042ULL, /* FE=0, SE=1 */
3487 0}; /* FE=0, SE=0 */
3488
3489 while(i<4) {
3490 writeq(value[i], &bar0->swapper_ctrl);
3491 val64 = readq(&bar0->pif_rd_swapper_fb);
3492 if (val64 == 0x0123456789ABCDEFULL)
3493 break;
3494 i++;
3495 }
3496 if (i == 4) {
3497 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3498 dev->name);
3499 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3500 (unsigned long long) val64);
3501 return FAILURE;
3502 }
3503 valr = value[i];
3504 } else {
3505 valr = readq(&bar0->swapper_ctrl);
3506 }
3507
3508 valt = 0x0123456789ABCDEFULL;
3509 writeq(valt, &bar0->xmsi_address);
3510 val64 = readq(&bar0->xmsi_address);
3511
3512 if(val64 != valt) {
3513 int i = 0;
3514 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3515 0x0081810000818100ULL, /* FE=1, SE=0 */
3516 0x0042420000424200ULL, /* FE=0, SE=1 */
3517 0}; /* FE=0, SE=0 */
3518
3519 while(i<4) {
3520 writeq((value[i] | valr), &bar0->swapper_ctrl);
3521 writeq(valt, &bar0->xmsi_address);
3522 val64 = readq(&bar0->xmsi_address);
3523 if(val64 == valt)
3524 break;
3525 i++;
3526 }
3527 if(i == 4) {
3528 unsigned long long x = val64;
3529 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3530 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3531 return FAILURE;
3532 }
3533 }
3534 val64 = readq(&bar0->swapper_ctrl);
3535 val64 &= 0xFFFF000000000000ULL;
3536
3537#ifdef __BIG_ENDIAN
3538 /*
3539 * The device by default set to a big endian format, so a
3540 * big endian driver need not set anything.
3541 */
3542 val64 |= (SWAPPER_CTRL_TXP_FE |
3543 SWAPPER_CTRL_TXP_SE |
3544 SWAPPER_CTRL_TXD_R_FE |
3545 SWAPPER_CTRL_TXD_W_FE |
3546 SWAPPER_CTRL_TXF_R_FE |
3547 SWAPPER_CTRL_RXD_R_FE |
3548 SWAPPER_CTRL_RXD_W_FE |
3549 SWAPPER_CTRL_RXF_W_FE |
3550 SWAPPER_CTRL_XMSI_FE |
3551 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3552 if (sp->intr_type == INTA)
3553 val64 |= SWAPPER_CTRL_XMSI_SE;
3554 writeq(val64, &bar0->swapper_ctrl);
3555#else
3556 /*
3557 * Initially we enable all bits to make it accessible by the
3558 * driver, then we selectively enable only those bits that
3559 * we want to set.
3560 */
3561 val64 |= (SWAPPER_CTRL_TXP_FE |
3562 SWAPPER_CTRL_TXP_SE |
3563 SWAPPER_CTRL_TXD_R_FE |
3564 SWAPPER_CTRL_TXD_R_SE |
3565 SWAPPER_CTRL_TXD_W_FE |
3566 SWAPPER_CTRL_TXD_W_SE |
3567 SWAPPER_CTRL_TXF_R_FE |
3568 SWAPPER_CTRL_RXD_R_FE |
3569 SWAPPER_CTRL_RXD_R_SE |
3570 SWAPPER_CTRL_RXD_W_FE |
3571 SWAPPER_CTRL_RXD_W_SE |
3572 SWAPPER_CTRL_RXF_W_FE |
3573 SWAPPER_CTRL_XMSI_FE |
3574 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3575 if (sp->intr_type == INTA)
3576 val64 |= SWAPPER_CTRL_XMSI_SE;
3577 writeq(val64, &bar0->swapper_ctrl);
3578#endif
3579 val64 = readq(&bar0->swapper_ctrl);
3580
3581 /*
3582 * Verifying if endian settings are accurate by reading a
3583 * feedback register.
3584 */
3585 val64 = readq(&bar0->pif_rd_swapper_fb);
3586 if (val64 != 0x0123456789ABCDEFULL) {
3587 /* Endian settings are incorrect, calls for another dekko. */
3588 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3589 dev->name);
3590 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3591 (unsigned long long) val64);
3592 return FAILURE;
3593 }
3594
3595 return SUCCESS;
3596}
3597
3598static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3599{
3600 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3601 u64 val64;
3602 int ret = 0, cnt = 0;
3603
3604 do {
3605 val64 = readq(&bar0->xmsi_access);
3606 if (!(val64 & BIT(15)))
3607 break;
3608 mdelay(1);
3609 cnt++;
3610 } while(cnt < 5);
3611 if (cnt == 5) {
3612 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3613 ret = 1;
3614 }
3615
3616 return ret;
3617}
3618
3619static void restore_xmsi_data(struct s2io_nic *nic)
3620{
3621 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3622 u64 val64;
3623 int i;
3624
3625 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3626 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3627 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3628 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3629 writeq(val64, &bar0->xmsi_access);
3630 if (wait_for_msix_trans(nic, i)) {
3631 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3632 continue;
3633 }
3634 }
3635}
3636
3637static void store_xmsi_data(struct s2io_nic *nic)
3638{
3639 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3640 u64 val64, addr, data;
3641 int i;
3642
3643 /* Store and display */
3644 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3645 val64 = (BIT(15) | vBIT(i, 26, 6));
3646 writeq(val64, &bar0->xmsi_access);
3647 if (wait_for_msix_trans(nic, i)) {
3648 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3649 continue;
3650 }
3651 addr = readq(&bar0->xmsi_address);
3652 data = readq(&bar0->xmsi_data);
3653 if (addr && data) {
3654 nic->msix_info[i].addr = addr;
3655 nic->msix_info[i].data = data;
3656 }
3657 }
3658}
3659
3660static int s2io_enable_msi_x(struct s2io_nic *nic)
3661{
3662 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3663 u64 tx_mat, rx_mat;
3664 u16 msi_control; /* Temp variable */
3665 int ret, i, j, msix_indx = 1;
3666
3667 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3668 GFP_KERNEL);
3669 if (nic->entries == NULL) {
3670 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3671 __FUNCTION__);
3672 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3673 return -ENOMEM;
3674 }
3675 nic->mac_control.stats_info->sw_stat.mem_allocated
3676 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3677 memset(nic->entries, 0,MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3678
3679 nic->s2io_entries =
3680 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3681 GFP_KERNEL);
3682 if (nic->s2io_entries == NULL) {
3683 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3684 __FUNCTION__);
3685 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3686 kfree(nic->entries);
3687 nic->mac_control.stats_info->sw_stat.mem_freed
3688 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3689 return -ENOMEM;
3690 }
3691 nic->mac_control.stats_info->sw_stat.mem_allocated
3692 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3693 memset(nic->s2io_entries, 0,
3694 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3695
3696 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3697 nic->entries[i].entry = i;
3698 nic->s2io_entries[i].entry = i;
3699 nic->s2io_entries[i].arg = NULL;
3700 nic->s2io_entries[i].in_use = 0;
3701 }
3702
3703 tx_mat = readq(&bar0->tx_mat0_n[0]);
3704 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3705 tx_mat |= TX_MAT_SET(i, msix_indx);
3706 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3707 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3708 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3709 }
3710 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3711
3712 if (!nic->config.bimodal) {
3713 rx_mat = readq(&bar0->rx_mat);
3714 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3715 rx_mat |= RX_MAT_SET(j, msix_indx);
3716 nic->s2io_entries[msix_indx].arg
3717 = &nic->mac_control.rings[j];
3718 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3719 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3720 }
3721 writeq(rx_mat, &bar0->rx_mat);
3722 } else {
3723 tx_mat = readq(&bar0->tx_mat0_n[7]);
3724 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3725 tx_mat |= TX_MAT_SET(i, msix_indx);
3726 nic->s2io_entries[msix_indx].arg
3727 = &nic->mac_control.rings[j];
3728 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3729 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3730 }
3731 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3732 }
3733
3734 nic->avail_msix_vectors = 0;
3735 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3736 /* We fail init if error or we get less vectors than min required */
3737 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3738 nic->avail_msix_vectors = ret;
3739 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3740 }
3741 if (ret) {
3742 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3743 kfree(nic->entries);
3744 nic->mac_control.stats_info->sw_stat.mem_freed
3745 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3746 kfree(nic->s2io_entries);
3747 nic->mac_control.stats_info->sw_stat.mem_freed
3748 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3749 nic->entries = NULL;
3750 nic->s2io_entries = NULL;
3751 nic->avail_msix_vectors = 0;
3752 return -ENOMEM;
3753 }
3754 if (!nic->avail_msix_vectors)
3755 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3756
3757 /*
3758 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3759 * in the herc NIC. (Temp change, needs to be removed later)
3760 */
3761 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3762 msi_control |= 0x1; /* Enable MSI */
3763 pci_write_config_word(nic->pdev, 0x42, msi_control);
3764
3765 return 0;
3766}
3767
3768/* Handle software interrupt used during MSI(X) test */
3769static irqreturn_t __devinit s2io_test_intr(int irq, void *dev_id)
3770{
3771 struct s2io_nic *sp = dev_id;
3772
3773 sp->msi_detected = 1;
3774 wake_up(&sp->msi_wait);
3775
3776 return IRQ_HANDLED;
3777}
3778
3779/* Test interrupt path by forcing a a software IRQ */
3780static int __devinit s2io_test_msi(struct s2io_nic *sp)
3781{
3782 struct pci_dev *pdev = sp->pdev;
3783 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3784 int err;
3785 u64 val64, saved64;
3786
3787 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3788 sp->name, sp);
3789 if (err) {
3790 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3791 sp->dev->name, pci_name(pdev), pdev->irq);
3792 return err;
3793 }
3794
3795 init_waitqueue_head (&sp->msi_wait);
3796 sp->msi_detected = 0;
3797
3798 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3799 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3800 val64 |= SCHED_INT_CTRL_TIMER_EN;
3801 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3802 writeq(val64, &bar0->scheduled_int_ctrl);
3803
3804 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3805
3806 if (!sp->msi_detected) {
3807 /* MSI(X) test failed, go back to INTx mode */
3808 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated"
3809 "using MSI(X) during test\n", sp->dev->name,
3810 pci_name(pdev));
3811
3812 err = -EOPNOTSUPP;
3813 }
3814
3815 free_irq(sp->entries[1].vector, sp);
3816
3817 writeq(saved64, &bar0->scheduled_int_ctrl);
3818
3819 return err;
3820}
3821/* ********************************************************* *
3822 * Functions defined below concern the OS part of the driver *
3823 * ********************************************************* */
3824
3825/**
3826 * s2io_open - open entry point of the driver
3827 * @dev : pointer to the device structure.
3828 * Description:
3829 * This function is the open entry point of the driver. It mainly calls a
3830 * function to allocate Rx buffers and inserts them into the buffer
3831 * descriptors and then enables the Rx part of the NIC.
3832 * Return value:
3833 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3834 * file on failure.
3835 */
3836
3837static int s2io_open(struct net_device *dev)
3838{
3839 struct s2io_nic *sp = dev->priv;
3840 int err = 0;
3841
3842 /*
3843 * Make sure you have link off by default every time
3844 * Nic is initialized
3845 */
3846 netif_carrier_off(dev);
3847 sp->last_link_state = 0;
3848
3849 napi_enable(&sp->napi);
3850
3851 if (sp->intr_type == MSI_X) {
3852 int ret = s2io_enable_msi_x(sp);
3853
3854 if (!ret) {
3855 u16 msi_control;
3856
3857 ret = s2io_test_msi(sp);
3858
3859 /* rollback MSI-X, will re-enable during add_isr() */
3860 kfree(sp->entries);
3861 sp->mac_control.stats_info->sw_stat.mem_freed +=
3862 (MAX_REQUESTED_MSI_X *
3863 sizeof(struct msix_entry));
3864 kfree(sp->s2io_entries);
3865 sp->mac_control.stats_info->sw_stat.mem_freed +=
3866 (MAX_REQUESTED_MSI_X *
3867 sizeof(struct s2io_msix_entry));
3868 sp->entries = NULL;
3869 sp->s2io_entries = NULL;
3870
3871 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3872 msi_control &= 0xFFFE; /* Disable MSI */
3873 pci_write_config_word(sp->pdev, 0x42, msi_control);
3874
3875 pci_disable_msix(sp->pdev);
3876
3877 }
3878 if (ret) {
3879
3880 DBG_PRINT(ERR_DBG,
3881 "%s: MSI-X requested but failed to enable\n",
3882 dev->name);
3883 sp->intr_type = INTA;
3884 }
3885 }
3886
3887 /* NAPI doesn't work well with MSI(X) */
3888 if (sp->intr_type != INTA) {
3889 if(sp->config.napi)
3890 sp->config.napi = 0;
3891 }
3892
3893 /* Initialize H/W and enable interrupts */
3894 err = s2io_card_up(sp);
3895 if (err) {
3896 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3897 dev->name);
3898 goto hw_init_failed;
3899 }
3900
3901 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3902 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3903 s2io_card_down(sp);
3904 err = -ENODEV;
3905 goto hw_init_failed;
3906 }
3907
3908 netif_start_queue(dev);
3909 return 0;
3910
3911hw_init_failed:
3912 napi_disable(&sp->napi);
3913 if (sp->intr_type == MSI_X) {
3914 if (sp->entries) {
3915 kfree(sp->entries);
3916 sp->mac_control.stats_info->sw_stat.mem_freed
3917 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3918 }
3919 if (sp->s2io_entries) {
3920 kfree(sp->s2io_entries);
3921 sp->mac_control.stats_info->sw_stat.mem_freed
3922 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3923 }
3924 }
3925 return err;
3926}
3927
3928/**
3929 * s2io_close -close entry point of the driver
3930 * @dev : device pointer.
3931 * Description:
3932 * This is the stop entry point of the driver. It needs to undo exactly
3933 * whatever was done by the open entry point,thus it's usually referred to
3934 * as the close function.Among other things this function mainly stops the
3935 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3936 * Return value:
3937 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3938 * file on failure.
3939 */
3940
3941static int s2io_close(struct net_device *dev)
3942{
3943 struct s2io_nic *sp = dev->priv;
3944
3945 netif_stop_queue(dev);
3946 napi_disable(&sp->napi);
3947 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3948 s2io_card_down(sp);
3949
3950 return 0;
3951}
3952
3953/**
3954 * s2io_xmit - Tx entry point of te driver
3955 * @skb : the socket buffer containing the Tx data.
3956 * @dev : device pointer.
3957 * Description :
3958 * This function is the Tx entry point of the driver. S2IO NIC supports
3959 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3960 * NOTE: when device cant queue the pkt,just the trans_start variable will
3961 * not be upadted.
3962 * Return value:
3963 * 0 on success & 1 on failure.
3964 */
3965
3966static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3967{
3968 struct s2io_nic *sp = dev->priv;
3969 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3970 register u64 val64;
3971 struct TxD *txdp;
3972 struct TxFIFO_element __iomem *tx_fifo;
3973 unsigned long flags;
3974 u16 vlan_tag = 0;
3975 int vlan_priority = 0;
3976 struct mac_info *mac_control;
3977 struct config_param *config;
3978 int offload_type;
3979 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
3980
3981 mac_control = &sp->mac_control;
3982 config = &sp->config;
3983
3984 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3985
3986 if (unlikely(skb->len <= 0)) {
3987 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3988 dev_kfree_skb_any(skb);
3989 return 0;
3990}
3991
3992 spin_lock_irqsave(&sp->tx_lock, flags);
3993 if (atomic_read(&sp->card_state) == CARD_DOWN) {
3994 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3995 dev->name);
3996 spin_unlock_irqrestore(&sp->tx_lock, flags);
3997 dev_kfree_skb(skb);
3998 return 0;
3999 }
4000
4001 queue = 0;
4002 /* Get Fifo number to Transmit based on vlan priority */
4003 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4004 vlan_tag = vlan_tx_tag_get(skb);
4005 vlan_priority = vlan_tag >> 13;
4006 queue = config->fifo_mapping[vlan_priority];
4007 }
4008
4009 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
4010 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
4011 txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
4012 list_virt_addr;
4013
4014 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
4015 /* Avoid "put" pointer going beyond "get" pointer */
4016 if (txdp->Host_Control ||
4017 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4018 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4019 netif_stop_queue(dev);
4020 dev_kfree_skb(skb);
4021 spin_unlock_irqrestore(&sp->tx_lock, flags);
4022 return 0;
4023 }
4024
4025 offload_type = s2io_offload_type(skb);
4026 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4027 txdp->Control_1 |= TXD_TCP_LSO_EN;
4028 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4029 }
4030 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4031 txdp->Control_2 |=
4032 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4033 TXD_TX_CKO_UDP_EN);
4034 }
4035 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4036 txdp->Control_1 |= TXD_LIST_OWN_XENA;
4037 txdp->Control_2 |= config->tx_intr_type;
4038
4039 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4040 txdp->Control_2 |= TXD_VLAN_ENABLE;
4041 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4042 }
4043
4044 frg_len = skb->len - skb->data_len;
4045 if (offload_type == SKB_GSO_UDP) {
4046 int ufo_size;
4047
4048 ufo_size = s2io_udp_mss(skb);
4049 ufo_size &= ~7;
4050 txdp->Control_1 |= TXD_UFO_EN;
4051 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4052 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4053#ifdef __BIG_ENDIAN
4054 sp->ufo_in_band_v[put_off] =
4055 (u64)skb_shinfo(skb)->ip6_frag_id;
4056#else
4057 sp->ufo_in_band_v[put_off] =
4058 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
4059#endif
4060 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
4061 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4062 sp->ufo_in_band_v,
4063 sizeof(u64), PCI_DMA_TODEVICE);
4064 if((txdp->Buffer_Pointer == 0) ||
4065 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4066 goto pci_map_failed;
4067 txdp++;
4068 }
4069
4070 txdp->Buffer_Pointer = pci_map_single
4071 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4072 if((txdp->Buffer_Pointer == 0) ||
4073 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4074 goto pci_map_failed;
4075
4076 txdp->Host_Control = (unsigned long) skb;
4077 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4078 if (offload_type == SKB_GSO_UDP)
4079 txdp->Control_1 |= TXD_UFO_EN;
4080
4081 frg_cnt = skb_shinfo(skb)->nr_frags;
4082 /* For fragmented SKB. */
4083 for (i = 0; i < frg_cnt; i++) {
4084 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4085 /* A '0' length fragment will be ignored */
4086 if (!frag->size)
4087 continue;
4088 txdp++;
4089 txdp->Buffer_Pointer = (u64) pci_map_page
4090 (sp->pdev, frag->page, frag->page_offset,
4091 frag->size, PCI_DMA_TODEVICE);
4092 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4093 if (offload_type == SKB_GSO_UDP)
4094 txdp->Control_1 |= TXD_UFO_EN;
4095 }
4096 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4097
4098 if (offload_type == SKB_GSO_UDP)
4099 frg_cnt++; /* as Txd0 was used for inband header */
4100
4101 tx_fifo = mac_control->tx_FIFO_start[queue];
4102 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
4103 writeq(val64, &tx_fifo->TxDL_Pointer);
4104
4105 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4106 TX_FIFO_LAST_LIST);
4107 if (offload_type)
4108 val64 |= TX_FIFO_SPECIAL_FUNC;
4109
4110 writeq(val64, &tx_fifo->List_Control);
4111
4112 mmiowb();
4113
4114 put_off++;
4115 if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
4116 put_off = 0;
4117 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
4118
4119 /* Avoid "put" pointer going beyond "get" pointer */
4120 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4121 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4122 DBG_PRINT(TX_DBG,
4123 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4124 put_off, get_off);
4125 netif_stop_queue(dev);
4126 }
4127 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4128 dev->trans_start = jiffies;
4129 spin_unlock_irqrestore(&sp->tx_lock, flags);
4130
4131 return 0;
4132pci_map_failed:
4133 stats->pci_map_fail_cnt++;
4134 netif_stop_queue(dev);
4135 stats->mem_freed += skb->truesize;
4136 dev_kfree_skb(skb);
4137 spin_unlock_irqrestore(&sp->tx_lock, flags);
4138 return 0;
4139}
4140
4141static void
4142s2io_alarm_handle(unsigned long data)
4143{
4144 struct s2io_nic *sp = (struct s2io_nic *)data;
4145 struct net_device *dev = sp->dev;
4146
4147 s2io_handle_errors(dev);
4148 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4149}
4150
4151static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4152{
4153 int rxb_size, level;
4154
4155 if (!sp->lro) {
4156 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4157 level = rx_buffer_level(sp, rxb_size, rng_n);
4158
4159 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4160 int ret;
4161 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4162 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4163 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4164 DBG_PRINT(INFO_DBG, "Out of memory in %s",
4165 __FUNCTION__);
4166 clear_bit(0, (&sp->tasklet_status));
4167 return -1;
4168 }
4169 clear_bit(0, (&sp->tasklet_status));
4170 } else if (level == LOW)
4171 tasklet_schedule(&sp->task);
4172
4173 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4174 DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4175 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4176 }
4177 return 0;
4178}
4179
4180static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4181{
4182 struct ring_info *ring = (struct ring_info *)dev_id;
4183 struct s2io_nic *sp = ring->nic;
4184
4185 atomic_inc(&sp->isr_cnt);
4186
4187 rx_intr_handler(ring);
4188 s2io_chk_rx_buffers(sp, ring->ring_no);
4189
4190 atomic_dec(&sp->isr_cnt);
4191 return IRQ_HANDLED;
4192}
4193
4194static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4195{
4196 struct fifo_info *fifo = (struct fifo_info *)dev_id;
4197 struct s2io_nic *sp = fifo->nic;
4198
4199 atomic_inc(&sp->isr_cnt);
4200 tx_intr_handler(fifo);
4201 atomic_dec(&sp->isr_cnt);
4202 return IRQ_HANDLED;
4203}
4204static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4205{
4206 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4207 u64 val64;
4208
4209 val64 = readq(&bar0->pic_int_status);
4210 if (val64 & PIC_INT_GPIO) {
4211 val64 = readq(&bar0->gpio_int_reg);
4212 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4213 (val64 & GPIO_INT_REG_LINK_UP)) {
4214 /*
4215 * This is unstable state so clear both up/down
4216 * interrupt and adapter to re-evaluate the link state.
4217 */
4218 val64 |= GPIO_INT_REG_LINK_DOWN;
4219 val64 |= GPIO_INT_REG_LINK_UP;
4220 writeq(val64, &bar0->gpio_int_reg);
4221 val64 = readq(&bar0->gpio_int_mask);
4222 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4223 GPIO_INT_MASK_LINK_DOWN);
4224 writeq(val64, &bar0->gpio_int_mask);
4225 }
4226 else if (val64 & GPIO_INT_REG_LINK_UP) {
4227 val64 = readq(&bar0->adapter_status);
4228 /* Enable Adapter */
4229 val64 = readq(&bar0->adapter_control);
4230 val64 |= ADAPTER_CNTL_EN;
4231 writeq(val64, &bar0->adapter_control);
4232 val64 |= ADAPTER_LED_ON;
4233 writeq(val64, &bar0->adapter_control);
4234 if (!sp->device_enabled_once)
4235 sp->device_enabled_once = 1;
4236
4237 s2io_link(sp, LINK_UP);
4238 /*
4239 * unmask link down interrupt and mask link-up
4240 * intr
4241 */
4242 val64 = readq(&bar0->gpio_int_mask);
4243 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4244 val64 |= GPIO_INT_MASK_LINK_UP;
4245 writeq(val64, &bar0->gpio_int_mask);
4246
4247 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4248 val64 = readq(&bar0->adapter_status);
4249 s2io_link(sp, LINK_DOWN);
4250 /* Link is down so unmaks link up interrupt */
4251 val64 = readq(&bar0->gpio_int_mask);
4252 val64 &= ~GPIO_INT_MASK_LINK_UP;
4253 val64 |= GPIO_INT_MASK_LINK_DOWN;
4254 writeq(val64, &bar0->gpio_int_mask);
4255
4256 /* turn off LED */
4257 val64 = readq(&bar0->adapter_control);
4258 val64 = val64 &(~ADAPTER_LED_ON);
4259 writeq(val64, &bar0->adapter_control);
4260 }
4261 }
4262 val64 = readq(&bar0->gpio_int_mask);
4263}
4264
4265/**
4266 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4267 * @value: alarm bits
4268 * @addr: address value
4269 * @cnt: counter variable
4270 * Description: Check for alarm and increment the counter
4271 * Return Value:
4272 * 1 - if alarm bit set
4273 * 0 - if alarm bit is not set
4274 */
4275int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4276 unsigned long long *cnt)
4277{
4278 u64 val64;
4279 val64 = readq(addr);
4280 if ( val64 & value ) {
4281 writeq(val64, addr);
4282 (*cnt)++;
4283 return 1;
4284 }
4285 return 0;
4286
4287}
4288
4289/**
4290 * s2io_handle_errors - Xframe error indication handler
4291 * @nic: device private variable
4292 * Description: Handle alarms such as loss of link, single or
4293 * double ECC errors, critical and serious errors.
4294 * Return Value:
4295 * NONE
4296 */
4297static void s2io_handle_errors(void * dev_id)
4298{
4299 struct net_device *dev = (struct net_device *) dev_id;
4300 struct s2io_nic *sp = dev->priv;
4301 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4302 u64 temp64 = 0,val64=0;
4303 int i = 0;
4304
4305 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4306 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4307
4308 if (unlikely(atomic_read(&sp->card_state) == CARD_DOWN))
4309 return;
4310
4311 if (pci_channel_offline(sp->pdev))
4312 return;
4313
4314 memset(&sw_stat->ring_full_cnt, 0,
4315 sizeof(sw_stat->ring_full_cnt));
4316
4317 /* Handling the XPAK counters update */
4318 if(stats->xpak_timer_count < 72000) {
4319 /* waiting for an hour */
4320 stats->xpak_timer_count++;
4321 } else {
4322 s2io_updt_xpak_counter(dev);
4323 /* reset the count to zero */
4324 stats->xpak_timer_count = 0;
4325 }
4326
4327 /* Handling link status change error Intr */
4328 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4329 val64 = readq(&bar0->mac_rmac_err_reg);
4330 writeq(val64, &bar0->mac_rmac_err_reg);
4331 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4332 schedule_work(&sp->set_link_task);
4333 }
4334
4335 /* In case of a serious error, the device will be Reset. */
4336 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4337 &sw_stat->serious_err_cnt))
4338 goto reset;
4339
4340 /* Check for data parity error */
4341 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4342 &sw_stat->parity_err_cnt))
4343 goto reset;
4344
4345 /* Check for ring full counter */
4346 if (sp->device_type == XFRAME_II_DEVICE) {
4347 val64 = readq(&bar0->ring_bump_counter1);
4348 for (i=0; i<4; i++) {
4349 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4350 temp64 >>= 64 - ((i+1)*16);
4351 sw_stat->ring_full_cnt[i] += temp64;
4352 }
4353
4354 val64 = readq(&bar0->ring_bump_counter2);
4355 for (i=0; i<4; i++) {
4356 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4357 temp64 >>= 64 - ((i+1)*16);
4358 sw_stat->ring_full_cnt[i+4] += temp64;
4359 }
4360 }
4361
4362 val64 = readq(&bar0->txdma_int_status);
4363 /*check for pfc_err*/
4364 if (val64 & TXDMA_PFC_INT) {
4365 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4366 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4367 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4368 &sw_stat->pfc_err_cnt))
4369 goto reset;
4370 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4371 &sw_stat->pfc_err_cnt);
4372 }
4373
4374 /*check for tda_err*/
4375 if (val64 & TXDMA_TDA_INT) {
4376 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4377 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4378 &sw_stat->tda_err_cnt))
4379 goto reset;
4380 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4381 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4382 }
4383 /*check for pcc_err*/
4384 if (val64 & TXDMA_PCC_INT) {
4385 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4386 | PCC_N_SERR | PCC_6_COF_OV_ERR
4387 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4388 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4389 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4390 &sw_stat->pcc_err_cnt))
4391 goto reset;
4392 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4393 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4394 }
4395
4396 /*check for tti_err*/
4397 if (val64 & TXDMA_TTI_INT) {
4398 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4399 &sw_stat->tti_err_cnt))
4400 goto reset;
4401 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4402 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4403 }
4404
4405 /*check for lso_err*/
4406 if (val64 & TXDMA_LSO_INT) {
4407 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4408 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4409 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4410 goto reset;
4411 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4412 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4413 }
4414
4415 /*check for tpa_err*/
4416 if (val64 & TXDMA_TPA_INT) {
4417 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4418 &sw_stat->tpa_err_cnt))
4419 goto reset;
4420 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4421 &sw_stat->tpa_err_cnt);
4422 }
4423
4424 /*check for sm_err*/
4425 if (val64 & TXDMA_SM_INT) {
4426 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4427 &sw_stat->sm_err_cnt))
4428 goto reset;
4429 }
4430
4431 val64 = readq(&bar0->mac_int_status);
4432 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4433 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4434 &bar0->mac_tmac_err_reg,
4435 &sw_stat->mac_tmac_err_cnt))
4436 goto reset;
4437 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4438 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4439 &bar0->mac_tmac_err_reg,
4440 &sw_stat->mac_tmac_err_cnt);
4441 }
4442
4443 val64 = readq(&bar0->xgxs_int_status);
4444 if (val64 & XGXS_INT_STATUS_TXGXS) {
4445 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4446 &bar0->xgxs_txgxs_err_reg,
4447 &sw_stat->xgxs_txgxs_err_cnt))
4448 goto reset;
4449 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4450 &bar0->xgxs_txgxs_err_reg,
4451 &sw_stat->xgxs_txgxs_err_cnt);
4452 }
4453
4454 val64 = readq(&bar0->rxdma_int_status);
4455 if (val64 & RXDMA_INT_RC_INT_M) {
4456 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4457 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4458 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4459 goto reset;
4460 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4461 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4462 &sw_stat->rc_err_cnt);
4463 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4464 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4465 &sw_stat->prc_pcix_err_cnt))
4466 goto reset;
4467 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4468 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4469 &sw_stat->prc_pcix_err_cnt);
4470 }
4471
4472 if (val64 & RXDMA_INT_RPA_INT_M) {
4473 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4474 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4475 goto reset;
4476 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4477 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4478 }
4479
4480 if (val64 & RXDMA_INT_RDA_INT_M) {
4481 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4482 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4483 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4484 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4485 goto reset;
4486 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4487 | RDA_MISC_ERR | RDA_PCIX_ERR,
4488 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4489 }
4490
4491 if (val64 & RXDMA_INT_RTI_INT_M) {
4492 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4493 &sw_stat->rti_err_cnt))
4494 goto reset;
4495 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4496 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4497 }
4498
4499 val64 = readq(&bar0->mac_int_status);
4500 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4501 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4502 &bar0->mac_rmac_err_reg,
4503 &sw_stat->mac_rmac_err_cnt))
4504 goto reset;
4505 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4506 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4507 &sw_stat->mac_rmac_err_cnt);
4508 }
4509
4510 val64 = readq(&bar0->xgxs_int_status);
4511 if (val64 & XGXS_INT_STATUS_RXGXS) {
4512 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4513 &bar0->xgxs_rxgxs_err_reg,
4514 &sw_stat->xgxs_rxgxs_err_cnt))
4515 goto reset;
4516 }
4517
4518 val64 = readq(&bar0->mc_int_status);
4519 if(val64 & MC_INT_STATUS_MC_INT) {
4520 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4521 &sw_stat->mc_err_cnt))
4522 goto reset;
4523
4524 /* Handling Ecc errors */
4525 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4526 writeq(val64, &bar0->mc_err_reg);
4527 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4528 sw_stat->double_ecc_errs++;
4529 if (sp->device_type != XFRAME_II_DEVICE) {
4530 /*
4531 * Reset XframeI only if critical error
4532 */
4533 if (val64 &
4534 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4535 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4536 goto reset;
4537 }
4538 } else
4539 sw_stat->single_ecc_errs++;
4540 }
4541 }
4542 return;
4543
4544reset:
4545 netif_stop_queue(dev);
4546 schedule_work(&sp->rst_timer_task);
4547 sw_stat->soft_reset_cnt++;
4548 return;
4549}
4550
4551/**
4552 * s2io_isr - ISR handler of the device .
4553 * @irq: the irq of the device.
4554 * @dev_id: a void pointer to the dev structure of the NIC.
4555 * Description: This function is the ISR handler of the device. It
4556 * identifies the reason for the interrupt and calls the relevant
4557 * service routines. As a contongency measure, this ISR allocates the
4558 * recv buffers, if their numbers are below the panic value which is
4559 * presently set to 25% of the original number of rcv buffers allocated.
4560 * Return value:
4561 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4562 * IRQ_NONE: will be returned if interrupt is not from our device
4563 */
4564static irqreturn_t s2io_isr(int irq, void *dev_id)
4565{
4566 struct net_device *dev = (struct net_device *) dev_id;
4567 struct s2io_nic *sp = dev->priv;
4568 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4569 int i;
4570 u64 reason = 0;
4571 struct mac_info *mac_control;
4572 struct config_param *config;
4573
4574 /* Pretend we handled any irq's from a disconnected card */
4575 if (pci_channel_offline(sp->pdev))
4576 return IRQ_NONE;
4577
4578 atomic_inc(&sp->isr_cnt);
4579 mac_control = &sp->mac_control;
4580 config = &sp->config;
4581
4582 /*
4583 * Identify the cause for interrupt and call the appropriate
4584 * interrupt handler. Causes for the interrupt could be;
4585 * 1. Rx of packet.
4586 * 2. Tx complete.
4587 * 3. Link down.
4588 * 4. Error in any functional blocks of the NIC.
4589 */
4590 reason = readq(&bar0->general_int_status);
4591
4592 if (!reason) {
4593 /* The interrupt was not raised by us. */
4594 atomic_dec(&sp->isr_cnt);
4595 return IRQ_NONE;
4596 }
4597 else if (unlikely(reason == S2IO_MINUS_ONE) ) {
4598 /* Disable device and get out */
4599 atomic_dec(&sp->isr_cnt);
4600 return IRQ_NONE;
4601 }
4602
4603 if (napi) {
4604 if (reason & GEN_INTR_RXTRAFFIC) {
4605 if (likely (netif_rx_schedule_prep(dev, &sp->napi))) {
4606 __netif_rx_schedule(dev, &sp->napi);
4607 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4608 }
4609 else
4610 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4611 }
4612 } else {
4613 /*
4614 * Rx handler is called by default, without checking for the
4615 * cause of interrupt.
4616 * rx_traffic_int reg is an R1 register, writing all 1's
4617 * will ensure that the actual interrupt causing bit get's
4618 * cleared and hence a read can be avoided.
4619 */
4620 if (reason & GEN_INTR_RXTRAFFIC)
4621 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4622
4623 for (i = 0; i < config->rx_ring_num; i++) {
4624 rx_intr_handler(&mac_control->rings[i]);
4625 }
4626 }
4627
4628 /*
4629 * tx_traffic_int reg is an R1 register, writing all 1's
4630 * will ensure that the actual interrupt causing bit get's
4631 * cleared and hence a read can be avoided.
4632 */
4633 if (reason & GEN_INTR_TXTRAFFIC)
4634 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4635
4636 for (i = 0; i < config->tx_fifo_num; i++)
4637 tx_intr_handler(&mac_control->fifos[i]);
4638
4639 if (reason & GEN_INTR_TXPIC)
4640 s2io_txpic_intr_handle(sp);
4641 /*
4642 * If the Rx buffer count is below the panic threshold then
4643 * reallocate the buffers from the interrupt handler itself,
4644 * else schedule a tasklet to reallocate the buffers.
4645 */
4646 if (!napi) {
4647 for (i = 0; i < config->rx_ring_num; i++)
4648 s2io_chk_rx_buffers(sp, i);
4649 }
4650
4651 writeq(0, &bar0->general_int_mask);
4652 readl(&bar0->general_int_status);
4653
4654 atomic_dec(&sp->isr_cnt);
4655 return IRQ_HANDLED;
4656}
4657
4658/**
4659 * s2io_updt_stats -
4660 */
4661static void s2io_updt_stats(struct s2io_nic *sp)
4662{
4663 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4664 u64 val64;
4665 int cnt = 0;
4666
4667 if (atomic_read(&sp->card_state) == CARD_UP) {
4668 /* Apprx 30us on a 133 MHz bus */
4669 val64 = SET_UPDT_CLICKS(10) |
4670 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4671 writeq(val64, &bar0->stat_cfg);
4672 do {
4673 udelay(100);
4674 val64 = readq(&bar0->stat_cfg);
4675 if (!(val64 & BIT(0)))
4676 break;
4677 cnt++;
4678 if (cnt == 5)
4679 break; /* Updt failed */
4680 } while(1);
4681 }
4682}
4683
4684/**
4685 * s2io_get_stats - Updates the device statistics structure.
4686 * @dev : pointer to the device structure.
4687 * Description:
4688 * This function updates the device statistics structure in the s2io_nic
4689 * structure and returns a pointer to the same.
4690 * Return value:
4691 * pointer to the updated net_device_stats structure.
4692 */
4693
4694static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4695{
4696 struct s2io_nic *sp = dev->priv;
4697 struct mac_info *mac_control;
4698 struct config_param *config;
4699
4700
4701 mac_control = &sp->mac_control;
4702 config = &sp->config;
4703
4704 /* Configure Stats for immediate updt */
4705 s2io_updt_stats(sp);
4706
4707 sp->stats.tx_packets =
4708 le32_to_cpu(mac_control->stats_info->tmac_frms);
4709 sp->stats.tx_errors =
4710 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4711 sp->stats.rx_errors =
4712 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4713 sp->stats.multicast =
4714 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4715 sp->stats.rx_length_errors =
4716 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4717
4718 return (&sp->stats);
4719}
4720
4721/**
4722 * s2io_set_multicast - entry point for multicast address enable/disable.
4723 * @dev : pointer to the device structure
4724 * Description:
4725 * This function is a driver entry point which gets called by the kernel
4726 * whenever multicast addresses must be enabled/disabled. This also gets
4727 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4728 * determine, if multicast address must be enabled or if promiscuous mode
4729 * is to be disabled etc.
4730 * Return value:
4731 * void.
4732 */
4733
4734static void s2io_set_multicast(struct net_device *dev)
4735{
4736 int i, j, prev_cnt;
4737 struct dev_mc_list *mclist;
4738 struct s2io_nic *sp = dev->priv;
4739 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4740 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4741 0xfeffffffffffULL;
4742 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4743 void __iomem *add;
4744
4745 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4746 /* Enable all Multicast addresses */
4747 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4748 &bar0->rmac_addr_data0_mem);
4749 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4750 &bar0->rmac_addr_data1_mem);
4751 val64 = RMAC_ADDR_CMD_MEM_WE |
4752 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4753 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4754 writeq(val64, &bar0->rmac_addr_cmd_mem);
4755 /* Wait till command completes */
4756 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4757 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4758 S2IO_BIT_RESET);
4759
4760 sp->m_cast_flg = 1;
4761 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4762 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4763 /* Disable all Multicast addresses */
4764 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4765 &bar0->rmac_addr_data0_mem);
4766 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4767 &bar0->rmac_addr_data1_mem);
4768 val64 = RMAC_ADDR_CMD_MEM_WE |
4769 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4770 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4771 writeq(val64, &bar0->rmac_addr_cmd_mem);
4772 /* Wait till command completes */
4773 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4774 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4775 S2IO_BIT_RESET);
4776
4777 sp->m_cast_flg = 0;
4778 sp->all_multi_pos = 0;
4779 }
4780
4781 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4782 /* Put the NIC into promiscuous mode */
4783 add = &bar0->mac_cfg;
4784 val64 = readq(&bar0->mac_cfg);
4785 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4786
4787 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4788 writel((u32) val64, add);
4789 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4790 writel((u32) (val64 >> 32), (add + 4));
4791
4792 if (vlan_tag_strip != 1) {
4793 val64 = readq(&bar0->rx_pa_cfg);
4794 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4795 writeq(val64, &bar0->rx_pa_cfg);
4796 vlan_strip_flag = 0;
4797 }
4798
4799 val64 = readq(&bar0->mac_cfg);
4800 sp->promisc_flg = 1;
4801 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4802 dev->name);
4803 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4804 /* Remove the NIC from promiscuous mode */
4805 add = &bar0->mac_cfg;
4806 val64 = readq(&bar0->mac_cfg);
4807 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4808
4809 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4810 writel((u32) val64, add);
4811 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4812 writel((u32) (val64 >> 32), (add + 4));
4813
4814 if (vlan_tag_strip != 0) {
4815 val64 = readq(&bar0->rx_pa_cfg);
4816 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4817 writeq(val64, &bar0->rx_pa_cfg);
4818 vlan_strip_flag = 1;
4819 }
4820
4821 val64 = readq(&bar0->mac_cfg);
4822 sp->promisc_flg = 0;
4823 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4824 dev->name);
4825 }
4826
4827 /* Update individual M_CAST address list */
4828 if ((!sp->m_cast_flg) && dev->mc_count) {
4829 if (dev->mc_count >
4830 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4831 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4832 dev->name);
4833 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4834 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4835 return;
4836 }
4837
4838 prev_cnt = sp->mc_addr_count;
4839 sp->mc_addr_count = dev->mc_count;
4840
4841 /* Clear out the previous list of Mc in the H/W. */
4842 for (i = 0; i < prev_cnt; i++) {
4843 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4844 &bar0->rmac_addr_data0_mem);
4845 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4846 &bar0->rmac_addr_data1_mem);
4847 val64 = RMAC_ADDR_CMD_MEM_WE |
4848 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4849 RMAC_ADDR_CMD_MEM_OFFSET
4850 (MAC_MC_ADDR_START_OFFSET + i);
4851 writeq(val64, &bar0->rmac_addr_cmd_mem);
4852
4853 /* Wait for command completes */
4854 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4855 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4856 S2IO_BIT_RESET)) {
4857 DBG_PRINT(ERR_DBG, "%s: Adding ",
4858 dev->name);
4859 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4860 return;
4861 }
4862 }
4863
4864 /* Create the new Rx filter list and update the same in H/W. */
4865 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4866 i++, mclist = mclist->next) {
4867 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4868 ETH_ALEN);
4869 mac_addr = 0;
4870 for (j = 0; j < ETH_ALEN; j++) {
4871 mac_addr |= mclist->dmi_addr[j];
4872 mac_addr <<= 8;
4873 }
4874 mac_addr >>= 8;
4875 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4876 &bar0->rmac_addr_data0_mem);
4877 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4878 &bar0->rmac_addr_data1_mem);
4879 val64 = RMAC_ADDR_CMD_MEM_WE |
4880 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4881 RMAC_ADDR_CMD_MEM_OFFSET
4882 (i + MAC_MC_ADDR_START_OFFSET);
4883 writeq(val64, &bar0->rmac_addr_cmd_mem);
4884
4885 /* Wait for command completes */
4886 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4887 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4888 S2IO_BIT_RESET)) {
4889 DBG_PRINT(ERR_DBG, "%s: Adding ",
4890 dev->name);
4891 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4892 return;
4893 }
4894 }
4895 }
4896}
4897
4898/**
4899 * s2io_set_mac_addr - Programs the Xframe mac address
4900 * @dev : pointer to the device structure.
4901 * @addr: a uchar pointer to the new mac address which is to be set.
4902 * Description : This procedure will program the Xframe to receive
4903 * frames with new Mac Address
4904 * Return value: SUCCESS on success and an appropriate (-)ve integer
4905 * as defined in errno.h file on failure.
4906 */
4907
4908static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4909{
4910 struct s2io_nic *sp = dev->priv;
4911 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4912 register u64 val64, mac_addr = 0;
4913 int i;
4914 u64 old_mac_addr = 0;
4915
4916 /*
4917 * Set the new MAC address as the new unicast filter and reflect this
4918 * change on the device address registered with the OS. It will be
4919 * at offset 0.
4920 */
4921 for (i = 0; i < ETH_ALEN; i++) {
4922 mac_addr <<= 8;
4923 mac_addr |= addr[i];
4924 old_mac_addr <<= 8;
4925 old_mac_addr |= sp->def_mac_addr[0].mac_addr[i];
4926 }
4927
4928 if(0 == mac_addr)
4929 return SUCCESS;
4930
4931 /* Update the internal structure with this new mac address */
4932 if(mac_addr != old_mac_addr) {
4933 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4934 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_addr);
4935 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_addr >> 8);
4936 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_addr >> 16);
4937 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_addr >> 24);
4938 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_addr >> 32);
4939 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_addr >> 40);
4940 }
4941
4942 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4943 &bar0->rmac_addr_data0_mem);
4944
4945 val64 =
4946 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4947 RMAC_ADDR_CMD_MEM_OFFSET(0);
4948 writeq(val64, &bar0->rmac_addr_cmd_mem);
4949 /* Wait till command completes */
4950 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4951 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
4952 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4953 return FAILURE;
4954 }
4955
4956 return SUCCESS;
4957}
4958
4959/**
4960 * s2io_ethtool_sset - Sets different link parameters.
4961 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4962 * @info: pointer to the structure with parameters given by ethtool to set
4963 * link information.
4964 * Description:
4965 * The function sets different link parameters provided by the user onto
4966 * the NIC.
4967 * Return value:
4968 * 0 on success.
4969*/
4970
4971static int s2io_ethtool_sset(struct net_device *dev,
4972 struct ethtool_cmd *info)
4973{
4974 struct s2io_nic *sp = dev->priv;
4975 if ((info->autoneg == AUTONEG_ENABLE) ||
4976 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4977 return -EINVAL;
4978 else {
4979 s2io_close(sp->dev);
4980 s2io_open(sp->dev);
4981 }
4982
4983 return 0;
4984}
4985
4986/**
4987 * s2io_ethtol_gset - Return link specific information.
4988 * @sp : private member of the device structure, pointer to the
4989 * s2io_nic structure.
4990 * @info : pointer to the structure with parameters given by ethtool
4991 * to return link information.
4992 * Description:
4993 * Returns link specific information like speed, duplex etc.. to ethtool.
4994 * Return value :
4995 * return 0 on success.
4996 */
4997
4998static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4999{
5000 struct s2io_nic *sp = dev->priv;
5001 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5002 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5003 info->port = PORT_FIBRE;
5004 /* info->transceiver?? TODO */
5005
5006 if (netif_carrier_ok(sp->dev)) {
5007 info->speed = 10000;
5008 info->duplex = DUPLEX_FULL;
5009 } else {
5010 info->speed = -1;
5011 info->duplex = -1;
5012 }
5013
5014 info->autoneg = AUTONEG_DISABLE;
5015 return 0;
5016}
5017
5018/**
5019 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5020 * @sp : private member of the device structure, which is a pointer to the
5021 * s2io_nic structure.
5022 * @info : pointer to the structure with parameters given by ethtool to
5023 * return driver information.
5024 * Description:
5025 * Returns driver specefic information like name, version etc.. to ethtool.
5026 * Return value:
5027 * void
5028 */
5029
5030static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5031 struct ethtool_drvinfo *info)
5032{
5033 struct s2io_nic *sp = dev->priv;
5034
5035 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5036 strncpy(info->version, s2io_driver_version, sizeof(info->version));
5037 strncpy(info->fw_version, "", sizeof(info->fw_version));
5038 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5039 info->regdump_len = XENA_REG_SPACE;
5040 info->eedump_len = XENA_EEPROM_SPACE;
5041 info->testinfo_len = S2IO_TEST_LEN;
5042
5043 if (sp->device_type == XFRAME_I_DEVICE)
5044 info->n_stats = XFRAME_I_STAT_LEN;
5045 else
5046 info->n_stats = XFRAME_II_STAT_LEN;
5047}
5048
5049/**
5050 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5051 * @sp: private member of the device structure, which is a pointer to the
5052 * s2io_nic structure.
5053 * @regs : pointer to the structure with parameters given by ethtool for
5054 * dumping the registers.
5055 * @reg_space: The input argumnet into which all the registers are dumped.
5056 * Description:
5057 * Dumps the entire register space of xFrame NIC into the user given
5058 * buffer area.
5059 * Return value :
5060 * void .
5061*/
5062
5063static void s2io_ethtool_gregs(struct net_device *dev,
5064 struct ethtool_regs *regs, void *space)
5065{
5066 int i;
5067 u64 reg;
5068 u8 *reg_space = (u8 *) space;
5069 struct s2io_nic *sp = dev->priv;
5070
5071 regs->len = XENA_REG_SPACE;
5072 regs->version = sp->pdev->subsystem_device;
5073
5074 for (i = 0; i < regs->len; i += 8) {
5075 reg = readq(sp->bar0 + i);
5076 memcpy((reg_space + i), &reg, 8);
5077 }
5078}
5079
5080/**
5081 * s2io_phy_id - timer function that alternates adapter LED.
5082 * @data : address of the private member of the device structure, which
5083 * is a pointer to the s2io_nic structure, provided as an u32.
5084 * Description: This is actually the timer function that alternates the
5085 * adapter LED bit of the adapter control bit to set/reset every time on
5086 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5087 * once every second.
5088*/
5089static void s2io_phy_id(unsigned long data)
5090{
5091 struct s2io_nic *sp = (struct s2io_nic *) data;
5092 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5093 u64 val64 = 0;
5094 u16 subid;
5095
5096 subid = sp->pdev->subsystem_device;
5097 if ((sp->device_type == XFRAME_II_DEVICE) ||
5098 ((subid & 0xFF) >= 0x07)) {
5099 val64 = readq(&bar0->gpio_control);
5100 val64 ^= GPIO_CTRL_GPIO_0;
5101 writeq(val64, &bar0->gpio_control);
5102 } else {
5103 val64 = readq(&bar0->adapter_control);
5104 val64 ^= ADAPTER_LED_ON;
5105 writeq(val64, &bar0->adapter_control);
5106 }
5107
5108 mod_timer(&sp->id_timer, jiffies + HZ / 2);
5109}
5110
5111/**
5112 * s2io_ethtool_idnic - To physically identify the nic on the system.
5113 * @sp : private member of the device structure, which is a pointer to the
5114 * s2io_nic structure.
5115 * @id : pointer to the structure with identification parameters given by
5116 * ethtool.
5117 * Description: Used to physically identify the NIC on the system.
5118 * The Link LED will blink for a time specified by the user for
5119 * identification.
5120 * NOTE: The Link has to be Up to be able to blink the LED. Hence
5121 * identification is possible only if it's link is up.
5122 * Return value:
5123 * int , returns 0 on success
5124 */
5125
5126static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5127{
5128 u64 val64 = 0, last_gpio_ctrl_val;
5129 struct s2io_nic *sp = dev->priv;
5130 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5131 u16 subid;
5132
5133 subid = sp->pdev->subsystem_device;
5134 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5135 if ((sp->device_type == XFRAME_I_DEVICE) &&
5136 ((subid & 0xFF) < 0x07)) {
5137 val64 = readq(&bar0->adapter_control);
5138 if (!(val64 & ADAPTER_CNTL_EN)) {
5139 printk(KERN_ERR
5140 "Adapter Link down, cannot blink LED\n");
5141 return -EFAULT;
5142 }
5143 }
5144 if (sp->id_timer.function == NULL) {
5145 init_timer(&sp->id_timer);
5146 sp->id_timer.function = s2io_phy_id;
5147 sp->id_timer.data = (unsigned long) sp;
5148 }
5149 mod_timer(&sp->id_timer, jiffies);
5150 if (data)
5151 msleep_interruptible(data * HZ);
5152 else
5153 msleep_interruptible(MAX_FLICKER_TIME);
5154 del_timer_sync(&sp->id_timer);
5155
5156 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5157 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5158 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5159 }
5160
5161 return 0;
5162}
5163
5164static void s2io_ethtool_gringparam(struct net_device *dev,
5165 struct ethtool_ringparam *ering)
5166{
5167 struct s2io_nic *sp = dev->priv;
5168 int i,tx_desc_count=0,rx_desc_count=0;
5169
5170 if (sp->rxd_mode == RXD_MODE_1)
5171 ering->rx_max_pending = MAX_RX_DESC_1;
5172 else if (sp->rxd_mode == RXD_MODE_3B)
5173 ering->rx_max_pending = MAX_RX_DESC_2;
5174
5175 ering->tx_max_pending = MAX_TX_DESC;
5176 for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5177 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5178
5179 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5180 ering->tx_pending = tx_desc_count;
5181 rx_desc_count = 0;
5182 for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5183 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5184
5185 ering->rx_pending = rx_desc_count;
5186
5187 ering->rx_mini_max_pending = 0;
5188 ering->rx_mini_pending = 0;
5189 if(sp->rxd_mode == RXD_MODE_1)
5190 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5191 else if (sp->rxd_mode == RXD_MODE_3B)
5192 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5193 ering->rx_jumbo_pending = rx_desc_count;
5194}
5195
5196/**
5197 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5198 * @sp : private member of the device structure, which is a pointer to the
5199 * s2io_nic structure.
5200 * @ep : pointer to the structure with pause parameters given by ethtool.
5201 * Description:
5202 * Returns the Pause frame generation and reception capability of the NIC.
5203 * Return value:
5204 * void
5205 */
5206static void s2io_ethtool_getpause_data(struct net_device *dev,
5207 struct ethtool_pauseparam *ep)
5208{
5209 u64 val64;
5210 struct s2io_nic *sp = dev->priv;
5211 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5212
5213 val64 = readq(&bar0->rmac_pause_cfg);
5214 if (val64 & RMAC_PAUSE_GEN_ENABLE)
5215 ep->tx_pause = TRUE;
5216 if (val64 & RMAC_PAUSE_RX_ENABLE)
5217 ep->rx_pause = TRUE;
5218 ep->autoneg = FALSE;
5219}
5220
5221/**
5222 * s2io_ethtool_setpause_data - set/reset pause frame generation.
5223 * @sp : private member of the device structure, which is a pointer to the
5224 * s2io_nic structure.
5225 * @ep : pointer to the structure with pause parameters given by ethtool.
5226 * Description:
5227 * It can be used to set or reset Pause frame generation or reception
5228 * support of the NIC.
5229 * Return value:
5230 * int, returns 0 on Success
5231 */
5232
5233static int s2io_ethtool_setpause_data(struct net_device *dev,
5234 struct ethtool_pauseparam *ep)
5235{
5236 u64 val64;
5237 struct s2io_nic *sp = dev->priv;
5238 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5239
5240 val64 = readq(&bar0->rmac_pause_cfg);
5241 if (ep->tx_pause)
5242 val64 |= RMAC_PAUSE_GEN_ENABLE;
5243 else
5244 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5245 if (ep->rx_pause)
5246 val64 |= RMAC_PAUSE_RX_ENABLE;
5247 else
5248 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5249 writeq(val64, &bar0->rmac_pause_cfg);
5250 return 0;
5251}
5252
5253/**
5254 * read_eeprom - reads 4 bytes of data from user given offset.
5255 * @sp : private member of the device structure, which is a pointer to the
5256 * s2io_nic structure.
5257 * @off : offset at which the data must be written
5258 * @data : Its an output parameter where the data read at the given
5259 * offset is stored.
5260 * Description:
5261 * Will read 4 bytes of data from the user given offset and return the
5262 * read data.
5263 * NOTE: Will allow to read only part of the EEPROM visible through the
5264 * I2C bus.
5265 * Return value:
5266 * -1 on failure and 0 on success.
5267 */
5268
5269#define S2IO_DEV_ID 5
5270static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5271{
5272 int ret = -1;
5273 u32 exit_cnt = 0;
5274 u64 val64;
5275 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5276
5277 if (sp->device_type == XFRAME_I_DEVICE) {
5278 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5279 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5280 I2C_CONTROL_CNTL_START;
5281 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5282
5283 while (exit_cnt < 5) {
5284 val64 = readq(&bar0->i2c_control);
5285 if (I2C_CONTROL_CNTL_END(val64)) {
5286 *data = I2C_CONTROL_GET_DATA(val64);
5287 ret = 0;
5288 break;
5289 }
5290 msleep(50);
5291 exit_cnt++;
5292 }
5293 }
5294
5295 if (sp->device_type == XFRAME_II_DEVICE) {
5296 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5297 SPI_CONTROL_BYTECNT(0x3) |
5298 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5299 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5300 val64 |= SPI_CONTROL_REQ;
5301 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5302 while (exit_cnt < 5) {
5303 val64 = readq(&bar0->spi_control);
5304 if (val64 & SPI_CONTROL_NACK) {
5305 ret = 1;
5306 break;
5307 } else if (val64 & SPI_CONTROL_DONE) {
5308 *data = readq(&bar0->spi_data);
5309 *data &= 0xffffff;
5310 ret = 0;
5311 break;
5312 }
5313 msleep(50);
5314 exit_cnt++;
5315 }
5316 }
5317 return ret;
5318}
5319
5320/**
5321 * write_eeprom - actually writes the relevant part of the data value.
5322 * @sp : private member of the device structure, which is a pointer to the
5323 * s2io_nic structure.
5324 * @off : offset at which the data must be written
5325 * @data : The data that is to be written
5326 * @cnt : Number of bytes of the data that are actually to be written into
5327 * the Eeprom. (max of 3)
5328 * Description:
5329 * Actually writes the relevant part of the data value into the Eeprom
5330 * through the I2C bus.
5331 * Return value:
5332 * 0 on success, -1 on failure.
5333 */
5334
5335static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5336{
5337 int exit_cnt = 0, ret = -1;
5338 u64 val64;
5339 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5340
5341 if (sp->device_type == XFRAME_I_DEVICE) {
5342 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5343 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5344 I2C_CONTROL_CNTL_START;
5345 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5346
5347 while (exit_cnt < 5) {
5348 val64 = readq(&bar0->i2c_control);
5349 if (I2C_CONTROL_CNTL_END(val64)) {
5350 if (!(val64 & I2C_CONTROL_NACK))
5351 ret = 0;
5352 break;
5353 }
5354 msleep(50);
5355 exit_cnt++;
5356 }
5357 }
5358
5359 if (sp->device_type == XFRAME_II_DEVICE) {
5360 int write_cnt = (cnt == 8) ? 0 : cnt;
5361 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5362
5363 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5364 SPI_CONTROL_BYTECNT(write_cnt) |
5365 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5366 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5367 val64 |= SPI_CONTROL_REQ;
5368 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5369 while (exit_cnt < 5) {
5370 val64 = readq(&bar0->spi_control);
5371 if (val64 & SPI_CONTROL_NACK) {
5372 ret = 1;
5373 break;
5374 } else if (val64 & SPI_CONTROL_DONE) {
5375 ret = 0;
5376 break;
5377 }
5378 msleep(50);
5379 exit_cnt++;
5380 }
5381 }
5382 return ret;
5383}
5384static void s2io_vpd_read(struct s2io_nic *nic)
5385{
5386 u8 *vpd_data;
5387 u8 data;
5388 int i=0, cnt, fail = 0;
5389 int vpd_addr = 0x80;
5390
5391 if (nic->device_type == XFRAME_II_DEVICE) {
5392 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5393 vpd_addr = 0x80;
5394 }
5395 else {
5396 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5397 vpd_addr = 0x50;
5398 }
5399 strcpy(nic->serial_num, "NOT AVAILABLE");
5400
5401 vpd_data = kmalloc(256, GFP_KERNEL);
5402 if (!vpd_data) {
5403 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5404 return;
5405 }
5406 nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5407
5408 for (i = 0; i < 256; i +=4 ) {
5409 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5410 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5411 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5412 for (cnt = 0; cnt <5; cnt++) {
5413 msleep(2);
5414 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5415 if (data == 0x80)
5416 break;
5417 }
5418 if (cnt >= 5) {
5419 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5420 fail = 1;
5421 break;
5422 }
5423 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5424 (u32 *)&vpd_data[i]);
5425 }
5426
5427 if(!fail) {
5428 /* read serial number of adapter */
5429 for (cnt = 0; cnt < 256; cnt++) {
5430 if ((vpd_data[cnt] == 'S') &&
5431 (vpd_data[cnt+1] == 'N') &&
5432 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5433 memset(nic->serial_num, 0, VPD_STRING_LEN);
5434 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5435 vpd_data[cnt+2]);
5436 break;
5437 }
5438 }
5439 }
5440
5441 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5442 memset(nic->product_name, 0, vpd_data[1]);
5443 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5444 }
5445 kfree(vpd_data);
5446 nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5447}
5448
5449/**
5450 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5451 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5452 * @eeprom : pointer to the user level structure provided by ethtool,
5453 * containing all relevant information.
5454 * @data_buf : user defined value to be written into Eeprom.
5455 * Description: Reads the values stored in the Eeprom at given offset
5456 * for a given length. Stores these values int the input argument data
5457 * buffer 'data_buf' and returns these to the caller (ethtool.)
5458 * Return value:
5459 * int 0 on success
5460 */
5461
5462static int s2io_ethtool_geeprom(struct net_device *dev,
5463 struct ethtool_eeprom *eeprom, u8 * data_buf)
5464{
5465 u32 i, valid;
5466 u64 data;
5467 struct s2io_nic *sp = dev->priv;
5468
5469 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5470
5471 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5472 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5473
5474 for (i = 0; i < eeprom->len; i += 4) {
5475 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5476 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5477 return -EFAULT;
5478 }
5479 valid = INV(data);
5480 memcpy((data_buf + i), &valid, 4);
5481 }
5482 return 0;
5483}
5484
5485/**
5486 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5487 * @sp : private member of the device structure, which is a pointer to the
5488 * s2io_nic structure.
5489 * @eeprom : pointer to the user level structure provided by ethtool,
5490 * containing all relevant information.
5491 * @data_buf ; user defined value to be written into Eeprom.
5492 * Description:
5493 * Tries to write the user provided value in the Eeprom, at the offset
5494 * given by the user.
5495 * Return value:
5496 * 0 on success, -EFAULT on failure.
5497 */
5498
5499static int s2io_ethtool_seeprom(struct net_device *dev,
5500 struct ethtool_eeprom *eeprom,
5501 u8 * data_buf)
5502{
5503 int len = eeprom->len, cnt = 0;
5504 u64 valid = 0, data;
5505 struct s2io_nic *sp = dev->priv;
5506
5507 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5508 DBG_PRINT(ERR_DBG,
5509 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5510 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5511 eeprom->magic);
5512 return -EFAULT;
5513 }
5514
5515 while (len) {
5516 data = (u32) data_buf[cnt] & 0x000000FF;
5517 if (data) {
5518 valid = (u32) (data << 24);
5519 } else
5520 valid = data;
5521
5522 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5523 DBG_PRINT(ERR_DBG,
5524 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5525 DBG_PRINT(ERR_DBG,
5526 "write into the specified offset\n");
5527 return -EFAULT;
5528 }
5529 cnt++;
5530 len--;
5531 }
5532
5533 return 0;
5534}
5535
5536/**
5537 * s2io_register_test - reads and writes into all clock domains.
5538 * @sp : private member of the device structure, which is a pointer to the
5539 * s2io_nic structure.
5540 * @data : variable that returns the result of each of the test conducted b
5541 * by the driver.
5542 * Description:
5543 * Read and write into all clock domains. The NIC has 3 clock domains,
5544 * see that registers in all the three regions are accessible.
5545 * Return value:
5546 * 0 on success.
5547 */
5548
5549static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5550{
5551 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5552 u64 val64 = 0, exp_val;
5553 int fail = 0;
5554
5555 val64 = readq(&bar0->pif_rd_swapper_fb);
5556 if (val64 != 0x123456789abcdefULL) {
5557 fail = 1;
5558 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5559 }
5560
5561 val64 = readq(&bar0->rmac_pause_cfg);
5562 if (val64 != 0xc000ffff00000000ULL) {
5563 fail = 1;
5564 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5565 }
5566
5567 val64 = readq(&bar0->rx_queue_cfg);
5568 if (sp->device_type == XFRAME_II_DEVICE)
5569 exp_val = 0x0404040404040404ULL;
5570 else
5571 exp_val = 0x0808080808080808ULL;
5572 if (val64 != exp_val) {
5573 fail = 1;
5574 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5575 }
5576
5577 val64 = readq(&bar0->xgxs_efifo_cfg);
5578 if (val64 != 0x000000001923141EULL) {
5579 fail = 1;
5580 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5581 }
5582
5583 val64 = 0x5A5A5A5A5A5A5A5AULL;
5584 writeq(val64, &bar0->xmsi_data);
5585 val64 = readq(&bar0->xmsi_data);
5586 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5587 fail = 1;
5588 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5589 }
5590
5591 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5592 writeq(val64, &bar0->xmsi_data);
5593 val64 = readq(&bar0->xmsi_data);
5594 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5595 fail = 1;
5596 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5597 }
5598
5599 *data = fail;
5600 return fail;
5601}
5602
5603/**
5604 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5605 * @sp : private member of the device structure, which is a pointer to the
5606 * s2io_nic structure.
5607 * @data:variable that returns the result of each of the test conducted by
5608 * the driver.
5609 * Description:
5610 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5611 * register.
5612 * Return value:
5613 * 0 on success.
5614 */
5615
5616static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5617{
5618 int fail = 0;
5619 u64 ret_data, org_4F0, org_7F0;
5620 u8 saved_4F0 = 0, saved_7F0 = 0;
5621 struct net_device *dev = sp->dev;
5622
5623 /* Test Write Error at offset 0 */
5624 /* Note that SPI interface allows write access to all areas
5625 * of EEPROM. Hence doing all negative testing only for Xframe I.
5626 */
5627 if (sp->device_type == XFRAME_I_DEVICE)
5628 if (!write_eeprom(sp, 0, 0, 3))
5629 fail = 1;
5630
5631 /* Save current values at offsets 0x4F0 and 0x7F0 */
5632 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5633 saved_4F0 = 1;
5634 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5635 saved_7F0 = 1;
5636
5637 /* Test Write at offset 4f0 */
5638 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5639 fail = 1;
5640 if (read_eeprom(sp, 0x4F0, &ret_data))
5641 fail = 1;
5642
5643 if (ret_data != 0x012345) {
5644 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5645 "Data written %llx Data read %llx\n",
5646 dev->name, (unsigned long long)0x12345,
5647 (unsigned long long)ret_data);
5648 fail = 1;
5649 }
5650
5651 /* Reset the EEPROM data go FFFF */
5652 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5653
5654 /* Test Write Request Error at offset 0x7c */
5655 if (sp->device_type == XFRAME_I_DEVICE)
5656 if (!write_eeprom(sp, 0x07C, 0, 3))
5657 fail = 1;
5658
5659 /* Test Write Request at offset 0x7f0 */
5660 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5661 fail = 1;
5662 if (read_eeprom(sp, 0x7F0, &ret_data))
5663 fail = 1;
5664
5665 if (ret_data != 0x012345) {
5666 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5667 "Data written %llx Data read %llx\n",
5668 dev->name, (unsigned long long)0x12345,
5669 (unsigned long long)ret_data);
5670 fail = 1;
5671 }
5672
5673 /* Reset the EEPROM data go FFFF */
5674 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5675
5676 if (sp->device_type == XFRAME_I_DEVICE) {
5677 /* Test Write Error at offset 0x80 */
5678 if (!write_eeprom(sp, 0x080, 0, 3))
5679 fail = 1;
5680
5681 /* Test Write Error at offset 0xfc */
5682 if (!write_eeprom(sp, 0x0FC, 0, 3))
5683 fail = 1;
5684
5685 /* Test Write Error at offset 0x100 */
5686 if (!write_eeprom(sp, 0x100, 0, 3))
5687 fail = 1;
5688
5689 /* Test Write Error at offset 4ec */
5690 if (!write_eeprom(sp, 0x4EC, 0, 3))
5691 fail = 1;
5692 }
5693
5694 /* Restore values at offsets 0x4F0 and 0x7F0 */
5695 if (saved_4F0)
5696 write_eeprom(sp, 0x4F0, org_4F0, 3);
5697 if (saved_7F0)
5698 write_eeprom(sp, 0x7F0, org_7F0, 3);
5699
5700 *data = fail;
5701 return fail;
5702}
5703
5704/**
5705 * s2io_bist_test - invokes the MemBist test of the card .
5706 * @sp : private member of the device structure, which is a pointer to the
5707 * s2io_nic structure.
5708 * @data:variable that returns the result of each of the test conducted by
5709 * the driver.
5710 * Description:
5711 * This invokes the MemBist test of the card. We give around
5712 * 2 secs time for the Test to complete. If it's still not complete
5713 * within this peiod, we consider that the test failed.
5714 * Return value:
5715 * 0 on success and -1 on failure.
5716 */
5717
5718static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5719{
5720 u8 bist = 0;
5721 int cnt = 0, ret = -1;
5722
5723 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5724 bist |= PCI_BIST_START;
5725 pci_write_config_word(sp->pdev, PCI_BIST, bist);
5726
5727 while (cnt < 20) {
5728 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5729 if (!(bist & PCI_BIST_START)) {
5730 *data = (bist & PCI_BIST_CODE_MASK);
5731 ret = 0;
5732 break;
5733 }
5734 msleep(100);
5735 cnt++;
5736 }
5737
5738 return ret;
5739}
5740
5741/**
5742 * s2io-link_test - verifies the link state of the nic
5743 * @sp ; private member of the device structure, which is a pointer to the
5744 * s2io_nic structure.
5745 * @data: variable that returns the result of each of the test conducted by
5746 * the driver.
5747 * Description:
5748 * The function verifies the link state of the NIC and updates the input
5749 * argument 'data' appropriately.
5750 * Return value:
5751 * 0 on success.
5752 */
5753
5754static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5755{
5756 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5757 u64 val64;
5758
5759 val64 = readq(&bar0->adapter_status);
5760 if(!(LINK_IS_UP(val64)))
5761 *data = 1;
5762 else
5763 *data = 0;
5764
5765 return *data;
5766}
5767
5768/**
5769 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5770 * @sp - private member of the device structure, which is a pointer to the
5771 * s2io_nic structure.
5772 * @data - variable that returns the result of each of the test
5773 * conducted by the driver.
5774 * Description:
5775 * This is one of the offline test that tests the read and write
5776 * access to the RldRam chip on the NIC.
5777 * Return value:
5778 * 0 on success.
5779 */
5780
5781static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5782{
5783 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5784 u64 val64;
5785 int cnt, iteration = 0, test_fail = 0;
5786
5787 val64 = readq(&bar0->adapter_control);
5788 val64 &= ~ADAPTER_ECC_EN;
5789 writeq(val64, &bar0->adapter_control);
5790
5791 val64 = readq(&bar0->mc_rldram_test_ctrl);
5792 val64 |= MC_RLDRAM_TEST_MODE;
5793 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5794
5795 val64 = readq(&bar0->mc_rldram_mrs);
5796 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5797 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5798
5799 val64 |= MC_RLDRAM_MRS_ENABLE;
5800 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5801
5802 while (iteration < 2) {
5803 val64 = 0x55555555aaaa0000ULL;
5804 if (iteration == 1) {
5805 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5806 }
5807 writeq(val64, &bar0->mc_rldram_test_d0);
5808
5809 val64 = 0xaaaa5a5555550000ULL;
5810 if (iteration == 1) {
5811 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5812 }
5813 writeq(val64, &bar0->mc_rldram_test_d1);
5814
5815 val64 = 0x55aaaaaaaa5a0000ULL;
5816 if (iteration == 1) {
5817 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5818 }
5819 writeq(val64, &bar0->mc_rldram_test_d2);
5820
5821 val64 = (u64) (0x0000003ffffe0100ULL);
5822 writeq(val64, &bar0->mc_rldram_test_add);
5823
5824 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5825 MC_RLDRAM_TEST_GO;
5826 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5827
5828 for (cnt = 0; cnt < 5; cnt++) {
5829 val64 = readq(&bar0->mc_rldram_test_ctrl);
5830 if (val64 & MC_RLDRAM_TEST_DONE)
5831 break;
5832 msleep(200);
5833 }
5834
5835 if (cnt == 5)
5836 break;
5837
5838 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5839 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5840
5841 for (cnt = 0; cnt < 5; cnt++) {
5842 val64 = readq(&bar0->mc_rldram_test_ctrl);
5843 if (val64 & MC_RLDRAM_TEST_DONE)
5844 break;
5845 msleep(500);
5846 }
5847
5848 if (cnt == 5)
5849 break;
5850
5851 val64 = readq(&bar0->mc_rldram_test_ctrl);
5852 if (!(val64 & MC_RLDRAM_TEST_PASS))
5853 test_fail = 1;
5854
5855 iteration++;
5856 }
5857
5858 *data = test_fail;
5859
5860 /* Bring the adapter out of test mode */
5861 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5862
5863 return test_fail;
5864}
5865
5866/**
5867 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5868 * @sp : private member of the device structure, which is a pointer to the
5869 * s2io_nic structure.
5870 * @ethtest : pointer to a ethtool command specific structure that will be
5871 * returned to the user.
5872 * @data : variable that returns the result of each of the test
5873 * conducted by the driver.
5874 * Description:
5875 * This function conducts 6 tests ( 4 offline and 2 online) to determine
5876 * the health of the card.
5877 * Return value:
5878 * void
5879 */
5880
5881static void s2io_ethtool_test(struct net_device *dev,
5882 struct ethtool_test *ethtest,
5883 uint64_t * data)
5884{
5885 struct s2io_nic *sp = dev->priv;
5886 int orig_state = netif_running(sp->dev);
5887
5888 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5889 /* Offline Tests. */
5890 if (orig_state)
5891 s2io_close(sp->dev);
5892
5893 if (s2io_register_test(sp, &data[0]))
5894 ethtest->flags |= ETH_TEST_FL_FAILED;
5895
5896 s2io_reset(sp);
5897
5898 if (s2io_rldram_test(sp, &data[3]))
5899 ethtest->flags |= ETH_TEST_FL_FAILED;
5900
5901 s2io_reset(sp);
5902
5903 if (s2io_eeprom_test(sp, &data[1]))
5904 ethtest->flags |= ETH_TEST_FL_FAILED;
5905
5906 if (s2io_bist_test(sp, &data[4]))
5907 ethtest->flags |= ETH_TEST_FL_FAILED;
5908
5909 if (orig_state)
5910 s2io_open(sp->dev);
5911
5912 data[2] = 0;
5913 } else {
5914 /* Online Tests. */
5915 if (!orig_state) {
5916 DBG_PRINT(ERR_DBG,
5917 "%s: is not up, cannot run test\n",
5918 dev->name);
5919 data[0] = -1;
5920 data[1] = -1;
5921 data[2] = -1;
5922 data[3] = -1;
5923 data[4] = -1;
5924 }
5925
5926 if (s2io_link_test(sp, &data[2]))
5927 ethtest->flags |= ETH_TEST_FL_FAILED;
5928
5929 data[0] = 0;
5930 data[1] = 0;
5931 data[3] = 0;
5932 data[4] = 0;
5933 }
5934}
5935
5936static void s2io_get_ethtool_stats(struct net_device *dev,
5937 struct ethtool_stats *estats,
5938 u64 * tmp_stats)
5939{
5940 int i = 0, k;
5941 struct s2io_nic *sp = dev->priv;
5942 struct stat_block *stat_info = sp->mac_control.stats_info;
5943
5944 s2io_updt_stats(sp);
5945 tmp_stats[i++] =
5946 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
5947 le32_to_cpu(stat_info->tmac_frms);
5948 tmp_stats[i++] =
5949 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5950 le32_to_cpu(stat_info->tmac_data_octets);
5951 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5952 tmp_stats[i++] =
5953 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5954 le32_to_cpu(stat_info->tmac_mcst_frms);
5955 tmp_stats[i++] =
5956 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5957 le32_to_cpu(stat_info->tmac_bcst_frms);
5958 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5959 tmp_stats[i++] =
5960 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5961 le32_to_cpu(stat_info->tmac_ttl_octets);
5962 tmp_stats[i++] =
5963 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5964 le32_to_cpu(stat_info->tmac_ucst_frms);
5965 tmp_stats[i++] =
5966 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5967 le32_to_cpu(stat_info->tmac_nucst_frms);
5968 tmp_stats[i++] =
5969 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5970 le32_to_cpu(stat_info->tmac_any_err_frms);
5971 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5972 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5973 tmp_stats[i++] =
5974 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5975 le32_to_cpu(stat_info->tmac_vld_ip);
5976 tmp_stats[i++] =
5977 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5978 le32_to_cpu(stat_info->tmac_drop_ip);
5979 tmp_stats[i++] =
5980 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5981 le32_to_cpu(stat_info->tmac_icmp);
5982 tmp_stats[i++] =
5983 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5984 le32_to_cpu(stat_info->tmac_rst_tcp);
5985 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5986 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5987 le32_to_cpu(stat_info->tmac_udp);
5988 tmp_stats[i++] =
5989 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5990 le32_to_cpu(stat_info->rmac_vld_frms);
5991 tmp_stats[i++] =
5992 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5993 le32_to_cpu(stat_info->rmac_data_octets);
5994 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5995 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5996 tmp_stats[i++] =
5997 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5998 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5999 tmp_stats[i++] =
6000 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6001 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
6002 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
6003 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
6004 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6005 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
6006 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6007 tmp_stats[i++] =
6008 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6009 le32_to_cpu(stat_info->rmac_ttl_octets);
6010 tmp_stats[i++] =
6011 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6012 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6013 tmp_stats[i++] =
6014 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6015 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
6016 tmp_stats[i++] =
6017 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6018 le32_to_cpu(stat_info->rmac_discarded_frms);
6019 tmp_stats[i++] =
6020 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6021 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6022 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6023 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
6024 tmp_stats[i++] =
6025 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6026 le32_to_cpu(stat_info->rmac_usized_frms);
6027 tmp_stats[i++] =
6028 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6029 le32_to_cpu(stat_info->rmac_osized_frms);
6030 tmp_stats[i++] =
6031 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6032 le32_to_cpu(stat_info->rmac_frag_frms);
6033 tmp_stats[i++] =
6034 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6035 le32_to_cpu(stat_info->rmac_jabber_frms);
6036 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6037 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6038 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6039 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6040 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6041 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6042 tmp_stats[i++] =
6043 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6044 le32_to_cpu(stat_info->rmac_ip);
6045 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6046 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6047 tmp_stats[i++] =
6048 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6049 le32_to_cpu(stat_info->rmac_drop_ip);
6050 tmp_stats[i++] =
6051 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6052 le32_to_cpu(stat_info->rmac_icmp);
6053 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6054 tmp_stats[i++] =
6055 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6056 le32_to_cpu(stat_info->rmac_udp);
6057 tmp_stats[i++] =
6058 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6059 le32_to_cpu(stat_info->rmac_err_drp_udp);
6060 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6061 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6062 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6063 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6064 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6065 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6066 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6067 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6068 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6069 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6070 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6071 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6072 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6073 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6074 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6075 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6076 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6077 tmp_stats[i++] =
6078 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6079 le32_to_cpu(stat_info->rmac_pause_cnt);
6080 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6081 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6082 tmp_stats[i++] =
6083 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6084 le32_to_cpu(stat_info->rmac_accepted_ip);
6085 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6086 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6087 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6088 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6089 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6090 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6091 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6092 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6093 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6094 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6095 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6096 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6097 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6098 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6099 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6100 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6101 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6102 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6103 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6104
6105 /* Enhanced statistics exist only for Hercules */
6106 if(sp->device_type == XFRAME_II_DEVICE) {
6107 tmp_stats[i++] =
6108 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6109 tmp_stats[i++] =
6110 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6111 tmp_stats[i++] =
6112 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6113 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6114 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6115 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6116 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6117 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6118 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6119 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6120 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6121 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6122 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6123 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6124 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6125 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6126 }
6127
6128 tmp_stats[i++] = 0;
6129 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6130 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6131 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6132 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6133 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6134 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6135 for (k = 0; k < MAX_RX_RINGS; k++)
6136 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6137 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6138 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6139 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6140 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6141 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6142 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6143 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6144 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6145 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6146 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6147 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6148 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6149 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6150 tmp_stats[i++] = stat_info->sw_stat.sending_both;
6151 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6152 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6153 if (stat_info->sw_stat.num_aggregations) {
6154 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6155 int count = 0;
6156 /*
6157 * Since 64-bit divide does not work on all platforms,
6158 * do repeated subtraction.
6159 */
6160 while (tmp >= stat_info->sw_stat.num_aggregations) {
6161 tmp -= stat_info->sw_stat.num_aggregations;
6162 count++;
6163 }
6164 tmp_stats[i++] = count;
6165 }
6166 else
6167 tmp_stats[i++] = 0;
6168 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6169 tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6170 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6171 tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6172 tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6173 tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6174 tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6175 tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6176 tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6177
6178 tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6179 tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6180 tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6181 tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6182 tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6183
6184 tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6185 tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6186 tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6187 tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6188 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6189 tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6190 tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6191 tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6192 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6193 tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6194 tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6195 tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6196 tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6197 tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6198 tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6199 tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6200 tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6201 tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6202 tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6203 tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6204 tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6205 tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6206 tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6207 tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6208 tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6209 tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6210}
6211
6212static int s2io_ethtool_get_regs_len(struct net_device *dev)
6213{
6214 return (XENA_REG_SPACE);
6215}
6216
6217
6218static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6219{
6220 struct s2io_nic *sp = dev->priv;
6221
6222 return (sp->rx_csum);
6223}
6224
6225static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6226{
6227 struct s2io_nic *sp = dev->priv;
6228
6229 if (data)
6230 sp->rx_csum = 1;
6231 else
6232 sp->rx_csum = 0;
6233
6234 return 0;
6235}
6236
6237static int s2io_get_eeprom_len(struct net_device *dev)
6238{
6239 return (XENA_EEPROM_SPACE);
6240}
6241
6242static int s2io_ethtool_self_test_count(struct net_device *dev)
6243{
6244 return (S2IO_TEST_LEN);
6245}
6246
6247static void s2io_ethtool_get_strings(struct net_device *dev,
6248 u32 stringset, u8 * data)
6249{
6250 int stat_size = 0;
6251 struct s2io_nic *sp = dev->priv;
6252
6253 switch (stringset) {
6254 case ETH_SS_TEST:
6255 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6256 break;
6257 case ETH_SS_STATS:
6258 stat_size = sizeof(ethtool_xena_stats_keys);
6259 memcpy(data, &ethtool_xena_stats_keys,stat_size);
6260 if(sp->device_type == XFRAME_II_DEVICE) {
6261 memcpy(data + stat_size,
6262 &ethtool_enhanced_stats_keys,
6263 sizeof(ethtool_enhanced_stats_keys));
6264 stat_size += sizeof(ethtool_enhanced_stats_keys);
6265 }
6266
6267 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6268 sizeof(ethtool_driver_stats_keys));
6269 }
6270}
6271static int s2io_ethtool_get_stats_count(struct net_device *dev)
6272{
6273 struct s2io_nic *sp = dev->priv;
6274 int stat_count = 0;
6275 switch(sp->device_type) {
6276 case XFRAME_I_DEVICE:
6277 stat_count = XFRAME_I_STAT_LEN;
6278 break;
6279
6280 case XFRAME_II_DEVICE:
6281 stat_count = XFRAME_II_STAT_LEN;
6282 break;
6283 }
6284
6285 return stat_count;
6286}
6287
6288static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6289{
6290 if (data)
6291 dev->features |= NETIF_F_IP_CSUM;
6292 else
6293 dev->features &= ~NETIF_F_IP_CSUM;
6294
6295 return 0;
6296}
6297
6298static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6299{
6300 return (dev->features & NETIF_F_TSO) != 0;
6301}
6302static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6303{
6304 if (data)
6305 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6306 else
6307 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6308
6309 return 0;
6310}
6311
6312static const struct ethtool_ops netdev_ethtool_ops = {
6313 .get_settings = s2io_ethtool_gset,
6314 .set_settings = s2io_ethtool_sset,
6315 .get_drvinfo = s2io_ethtool_gdrvinfo,
6316 .get_regs_len = s2io_ethtool_get_regs_len,
6317 .get_regs = s2io_ethtool_gregs,
6318 .get_link = ethtool_op_get_link,
6319 .get_eeprom_len = s2io_get_eeprom_len,
6320 .get_eeprom = s2io_ethtool_geeprom,
6321 .set_eeprom = s2io_ethtool_seeprom,
6322 .get_ringparam = s2io_ethtool_gringparam,
6323 .get_pauseparam = s2io_ethtool_getpause_data,
6324 .set_pauseparam = s2io_ethtool_setpause_data,
6325 .get_rx_csum = s2io_ethtool_get_rx_csum,
6326 .set_rx_csum = s2io_ethtool_set_rx_csum,
6327 .get_tx_csum = ethtool_op_get_tx_csum,
6328 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6329 .get_sg = ethtool_op_get_sg,
6330 .set_sg = ethtool_op_set_sg,
6331 .get_tso = s2io_ethtool_op_get_tso,
6332 .set_tso = s2io_ethtool_op_set_tso,
6333 .get_ufo = ethtool_op_get_ufo,
6334 .set_ufo = ethtool_op_set_ufo,
6335 .self_test_count = s2io_ethtool_self_test_count,
6336 .self_test = s2io_ethtool_test,
6337 .get_strings = s2io_ethtool_get_strings,
6338 .phys_id = s2io_ethtool_idnic,
6339 .get_stats_count = s2io_ethtool_get_stats_count,
6340 .get_ethtool_stats = s2io_get_ethtool_stats
6341};
6342
6343/**
6344 * s2io_ioctl - Entry point for the Ioctl
6345 * @dev : Device pointer.
6346 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6347 * a proprietary structure used to pass information to the driver.
6348 * @cmd : This is used to distinguish between the different commands that
6349 * can be passed to the IOCTL functions.
6350 * Description:
6351 * Currently there are no special functionality supported in IOCTL, hence
6352 * function always return EOPNOTSUPPORTED
6353 */
6354
6355static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6356{
6357 return -EOPNOTSUPP;
6358}
6359
6360/**
6361 * s2io_change_mtu - entry point to change MTU size for the device.
6362 * @dev : device pointer.
6363 * @new_mtu : the new MTU size for the device.
6364 * Description: A driver entry point to change MTU size for the device.
6365 * Before changing the MTU the device must be stopped.
6366 * Return value:
6367 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6368 * file on failure.
6369 */
6370
6371static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6372{
6373 struct s2io_nic *sp = dev->priv;
6374
6375 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6376 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6377 dev->name);
6378 return -EPERM;
6379 }
6380
6381 dev->mtu = new_mtu;
6382 if (netif_running(dev)) {
6383 s2io_card_down(sp);
6384 netif_stop_queue(dev);
6385 if (s2io_card_up(sp)) {
6386 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6387 __FUNCTION__);
6388 }
6389 if (netif_queue_stopped(dev))
6390 netif_wake_queue(dev);
6391 } else { /* Device is down */
6392 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6393 u64 val64 = new_mtu;
6394
6395 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6396 }
6397
6398 return 0;
6399}
6400
6401/**
6402 * s2io_tasklet - Bottom half of the ISR.
6403 * @dev_adr : address of the device structure in dma_addr_t format.
6404 * Description:
6405 * This is the tasklet or the bottom half of the ISR. This is
6406 * an extension of the ISR which is scheduled by the scheduler to be run
6407 * when the load on the CPU is low. All low priority tasks of the ISR can
6408 * be pushed into the tasklet. For now the tasklet is used only to
6409 * replenish the Rx buffers in the Rx buffer descriptors.
6410 * Return value:
6411 * void.
6412 */
6413
6414static void s2io_tasklet(unsigned long dev_addr)
6415{
6416 struct net_device *dev = (struct net_device *) dev_addr;
6417 struct s2io_nic *sp = dev->priv;
6418 int i, ret;
6419 struct mac_info *mac_control;
6420 struct config_param *config;
6421
6422 mac_control = &sp->mac_control;
6423 config = &sp->config;
6424
6425 if (!TASKLET_IN_USE) {
6426 for (i = 0; i < config->rx_ring_num; i++) {
6427 ret = fill_rx_buffers(sp, i);
6428 if (ret == -ENOMEM) {
6429 DBG_PRINT(INFO_DBG, "%s: Out of ",
6430 dev->name);
6431 DBG_PRINT(INFO_DBG, "memory in tasklet\n");
6432 break;
6433 } else if (ret == -EFILL) {
6434 DBG_PRINT(INFO_DBG,
6435 "%s: Rx Ring %d is full\n",
6436 dev->name, i);
6437 break;
6438 }
6439 }
6440 clear_bit(0, (&sp->tasklet_status));
6441 }
6442}
6443
6444/**
6445 * s2io_set_link - Set the LInk status
6446 * @data: long pointer to device private structue
6447 * Description: Sets the link status for the adapter
6448 */
6449
6450static void s2io_set_link(struct work_struct *work)
6451{
6452 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6453 struct net_device *dev = nic->dev;
6454 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6455 register u64 val64;
6456 u16 subid;
6457
6458 rtnl_lock();
6459
6460 if (!netif_running(dev))
6461 goto out_unlock;
6462
6463 if (test_and_set_bit(0, &(nic->link_state))) {
6464 /* The card is being reset, no point doing anything */
6465 goto out_unlock;
6466 }
6467
6468 subid = nic->pdev->subsystem_device;
6469 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6470 /*
6471 * Allow a small delay for the NICs self initiated
6472 * cleanup to complete.
6473 */
6474 msleep(100);
6475 }
6476
6477 val64 = readq(&bar0->adapter_status);
6478 if (LINK_IS_UP(val64)) {
6479 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6480 if (verify_xena_quiescence(nic)) {
6481 val64 = readq(&bar0->adapter_control);
6482 val64 |= ADAPTER_CNTL_EN;
6483 writeq(val64, &bar0->adapter_control);
6484 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6485 nic->device_type, subid)) {
6486 val64 = readq(&bar0->gpio_control);
6487 val64 |= GPIO_CTRL_GPIO_0;
6488 writeq(val64, &bar0->gpio_control);
6489 val64 = readq(&bar0->gpio_control);
6490 } else {
6491 val64 |= ADAPTER_LED_ON;
6492 writeq(val64, &bar0->adapter_control);
6493 }
6494 nic->device_enabled_once = TRUE;
6495 } else {
6496 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6497 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6498 netif_stop_queue(dev);
6499 }
6500 }
6501 val64 = readq(&bar0->adapter_control);
6502 val64 |= ADAPTER_LED_ON;
6503 writeq(val64, &bar0->adapter_control);
6504 s2io_link(nic, LINK_UP);
6505 } else {
6506 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6507 subid)) {
6508 val64 = readq(&bar0->gpio_control);
6509 val64 &= ~GPIO_CTRL_GPIO_0;
6510 writeq(val64, &bar0->gpio_control);
6511 val64 = readq(&bar0->gpio_control);
6512 }
6513 /* turn off LED */
6514 val64 = readq(&bar0->adapter_control);
6515 val64 = val64 &(~ADAPTER_LED_ON);
6516 writeq(val64, &bar0->adapter_control);
6517 s2io_link(nic, LINK_DOWN);
6518 }
6519 clear_bit(0, &(nic->link_state));
6520
6521out_unlock:
6522 rtnl_unlock();
6523}
6524
6525static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6526 struct buffAdd *ba,
6527 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6528 u64 *temp2, int size)
6529{
6530 struct net_device *dev = sp->dev;
6531 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6532
6533 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6534 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6535 /* allocate skb */
6536 if (*skb) {
6537 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6538 /*
6539 * As Rx frame are not going to be processed,
6540 * using same mapped address for the Rxd
6541 * buffer pointer
6542 */
6543 rxdp1->Buffer0_ptr = *temp0;
6544 } else {
6545 *skb = dev_alloc_skb(size);
6546 if (!(*skb)) {
6547 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6548 DBG_PRINT(INFO_DBG, "memory to allocate ");
6549 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6550 sp->mac_control.stats_info->sw_stat. \
6551 mem_alloc_fail_cnt++;
6552 return -ENOMEM ;
6553 }
6554 sp->mac_control.stats_info->sw_stat.mem_allocated
6555 += (*skb)->truesize;
6556 /* storing the mapped addr in a temp variable
6557 * such it will be used for next rxd whose
6558 * Host Control is NULL
6559 */
6560 rxdp1->Buffer0_ptr = *temp0 =
6561 pci_map_single( sp->pdev, (*skb)->data,
6562 size - NET_IP_ALIGN,
6563 PCI_DMA_FROMDEVICE);
6564 if( (rxdp1->Buffer0_ptr == 0) ||
6565 (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
6566 goto memalloc_failed;
6567 }
6568 rxdp->Host_Control = (unsigned long) (*skb);
6569 }
6570 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6571 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6572 /* Two buffer Mode */
6573 if (*skb) {
6574 rxdp3->Buffer2_ptr = *temp2;
6575 rxdp3->Buffer0_ptr = *temp0;
6576 rxdp3->Buffer1_ptr = *temp1;
6577 } else {
6578 *skb = dev_alloc_skb(size);
6579 if (!(*skb)) {
6580 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6581 DBG_PRINT(INFO_DBG, "memory to allocate ");
6582 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6583 sp->mac_control.stats_info->sw_stat. \
6584 mem_alloc_fail_cnt++;
6585 return -ENOMEM;
6586 }
6587 sp->mac_control.stats_info->sw_stat.mem_allocated
6588 += (*skb)->truesize;
6589 rxdp3->Buffer2_ptr = *temp2 =
6590 pci_map_single(sp->pdev, (*skb)->data,
6591 dev->mtu + 4,
6592 PCI_DMA_FROMDEVICE);
6593 if( (rxdp3->Buffer2_ptr == 0) ||
6594 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
6595 goto memalloc_failed;
6596 }
6597 rxdp3->Buffer0_ptr = *temp0 =
6598 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6599 PCI_DMA_FROMDEVICE);
6600 if( (rxdp3->Buffer0_ptr == 0) ||
6601 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
6602 pci_unmap_single (sp->pdev,
6603 (dma_addr_t)rxdp3->Buffer2_ptr,
6604 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6605 goto memalloc_failed;
6606 }
6607 rxdp->Host_Control = (unsigned long) (*skb);
6608
6609 /* Buffer-1 will be dummy buffer not used */
6610 rxdp3->Buffer1_ptr = *temp1 =
6611 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6612 PCI_DMA_FROMDEVICE);
6613 if( (rxdp3->Buffer1_ptr == 0) ||
6614 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
6615 pci_unmap_single (sp->pdev,
6616 (dma_addr_t)rxdp3->Buffer0_ptr,
6617 BUF0_LEN, PCI_DMA_FROMDEVICE);
6618 pci_unmap_single (sp->pdev,
6619 (dma_addr_t)rxdp3->Buffer2_ptr,
6620 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6621 goto memalloc_failed;
6622 }
6623 }
6624 }
6625 return 0;
6626 memalloc_failed:
6627 stats->pci_map_fail_cnt++;
6628 stats->mem_freed += (*skb)->truesize;
6629 dev_kfree_skb(*skb);
6630 return -ENOMEM;
6631}
6632
6633static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6634 int size)
6635{
6636 struct net_device *dev = sp->dev;
6637 if (sp->rxd_mode == RXD_MODE_1) {
6638 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6639 } else if (sp->rxd_mode == RXD_MODE_3B) {
6640 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6641 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6642 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6643 }
6644}
6645
6646static int rxd_owner_bit_reset(struct s2io_nic *sp)
6647{
6648 int i, j, k, blk_cnt = 0, size;
6649 struct mac_info * mac_control = &sp->mac_control;
6650 struct config_param *config = &sp->config;
6651 struct net_device *dev = sp->dev;
6652 struct RxD_t *rxdp = NULL;
6653 struct sk_buff *skb = NULL;
6654 struct buffAdd *ba = NULL;
6655 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6656
6657 /* Calculate the size based on ring mode */
6658 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6659 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6660 if (sp->rxd_mode == RXD_MODE_1)
6661 size += NET_IP_ALIGN;
6662 else if (sp->rxd_mode == RXD_MODE_3B)
6663 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6664
6665 for (i = 0; i < config->rx_ring_num; i++) {
6666 blk_cnt = config->rx_cfg[i].num_rxd /
6667 (rxd_count[sp->rxd_mode] +1);
6668
6669 for (j = 0; j < blk_cnt; j++) {
6670 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6671 rxdp = mac_control->rings[i].
6672 rx_blocks[j].rxds[k].virt_addr;
6673 if(sp->rxd_mode == RXD_MODE_3B)
6674 ba = &mac_control->rings[i].ba[j][k];
6675 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6676 &skb,(u64 *)&temp0_64,
6677 (u64 *)&temp1_64,
6678 (u64 *)&temp2_64,
6679 size) == ENOMEM) {
6680 return 0;
6681 }
6682
6683 set_rxd_buffer_size(sp, rxdp, size);
6684 wmb();
6685 /* flip the Ownership bit to Hardware */
6686 rxdp->Control_1 |= RXD_OWN_XENA;
6687 }
6688 }
6689 }
6690 return 0;
6691
6692}
6693
6694static int s2io_add_isr(struct s2io_nic * sp)
6695{
6696 int ret = 0;
6697 struct net_device *dev = sp->dev;
6698 int err = 0;
6699
6700 if (sp->intr_type == MSI_X)
6701 ret = s2io_enable_msi_x(sp);
6702 if (ret) {
6703 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6704 sp->intr_type = INTA;
6705 }
6706
6707 /* Store the values of the MSIX table in the struct s2io_nic structure */
6708 store_xmsi_data(sp);
6709
6710 /* After proper initialization of H/W, register ISR */
6711 if (sp->intr_type == MSI_X) {
6712 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6713
6714 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6715 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6716 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6717 dev->name, i);
6718 err = request_irq(sp->entries[i].vector,
6719 s2io_msix_fifo_handle, 0, sp->desc[i],
6720 sp->s2io_entries[i].arg);
6721 /* If either data or addr is zero print it */
6722 if(!(sp->msix_info[i].addr &&
6723 sp->msix_info[i].data)) {
6724 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6725 "Data:0x%lx\n",sp->desc[i],
6726 (unsigned long long)
6727 sp->msix_info[i].addr,
6728 (unsigned long)
6729 ntohl(sp->msix_info[i].data));
6730 } else {
6731 msix_tx_cnt++;
6732 }
6733 } else {
6734 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6735 dev->name, i);
6736 err = request_irq(sp->entries[i].vector,
6737 s2io_msix_ring_handle, 0, sp->desc[i],
6738 sp->s2io_entries[i].arg);
6739 /* If either data or addr is zero print it */
6740 if(!(sp->msix_info[i].addr &&
6741 sp->msix_info[i].data)) {
6742 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6743 "Data:0x%lx\n",sp->desc[i],
6744 (unsigned long long)
6745 sp->msix_info[i].addr,
6746 (unsigned long)
6747 ntohl(sp->msix_info[i].data));
6748 } else {
6749 msix_rx_cnt++;
6750 }
6751 }
6752 if (err) {
6753 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6754 "failed\n", dev->name, i);
6755 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6756 return -1;
6757 }
6758 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6759 }
6760 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6761 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
6762 }
6763 if (sp->intr_type == INTA) {
6764 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6765 sp->name, dev);
6766 if (err) {
6767 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6768 dev->name);
6769 return -1;
6770 }
6771 }
6772 return 0;
6773}
6774static void s2io_rem_isr(struct s2io_nic * sp)
6775{
6776 int cnt = 0;
6777 struct net_device *dev = sp->dev;
6778 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6779
6780 if (sp->intr_type == MSI_X) {
6781 int i;
6782 u16 msi_control;
6783
6784 for (i=1; (sp->s2io_entries[i].in_use ==
6785 MSIX_REGISTERED_SUCCESS); i++) {
6786 int vector = sp->entries[i].vector;
6787 void *arg = sp->s2io_entries[i].arg;
6788
6789 free_irq(vector, arg);
6790 }
6791
6792 kfree(sp->entries);
6793 stats->mem_freed +=
6794 (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
6795 kfree(sp->s2io_entries);
6796 stats->mem_freed +=
6797 (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
6798 sp->entries = NULL;
6799 sp->s2io_entries = NULL;
6800
6801 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6802 msi_control &= 0xFFFE; /* Disable MSI */
6803 pci_write_config_word(sp->pdev, 0x42, msi_control);
6804
6805 pci_disable_msix(sp->pdev);
6806 } else {
6807 free_irq(sp->pdev->irq, dev);
6808 }
6809 /* Waiting till all Interrupt handlers are complete */
6810 cnt = 0;
6811 do {
6812 msleep(10);
6813 if (!atomic_read(&sp->isr_cnt))
6814 break;
6815 cnt++;
6816 } while(cnt < 5);
6817}
6818
6819static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
6820{
6821 int cnt = 0;
6822 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6823 unsigned long flags;
6824 register u64 val64 = 0;
6825
6826 del_timer_sync(&sp->alarm_timer);
6827 /* If s2io_set_link task is executing, wait till it completes. */
6828 while (test_and_set_bit(0, &(sp->link_state))) {
6829 msleep(50);
6830 }
6831 atomic_set(&sp->card_state, CARD_DOWN);
6832
6833 /* disable Tx and Rx traffic on the NIC */
6834 if (do_io)
6835 stop_nic(sp);
6836
6837 s2io_rem_isr(sp);
6838
6839 /* Kill tasklet. */
6840 tasklet_kill(&sp->task);
6841
6842 /* Check if the device is Quiescent and then Reset the NIC */
6843 while(do_io) {
6844 /* As per the HW requirement we need to replenish the
6845 * receive buffer to avoid the ring bump. Since there is
6846 * no intention of processing the Rx frame at this pointwe are
6847 * just settting the ownership bit of rxd in Each Rx
6848 * ring to HW and set the appropriate buffer size
6849 * based on the ring mode
6850 */
6851 rxd_owner_bit_reset(sp);
6852
6853 val64 = readq(&bar0->adapter_status);
6854 if (verify_xena_quiescence(sp)) {
6855 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6856 break;
6857 }
6858
6859 msleep(50);
6860 cnt++;
6861 if (cnt == 10) {
6862 DBG_PRINT(ERR_DBG,
6863 "s2io_close:Device not Quiescent ");
6864 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6865 (unsigned long long) val64);
6866 break;
6867 }
6868 }
6869 if (do_io)
6870 s2io_reset(sp);
6871
6872 spin_lock_irqsave(&sp->tx_lock, flags);
6873 /* Free all Tx buffers */
6874 free_tx_buffers(sp);
6875 spin_unlock_irqrestore(&sp->tx_lock, flags);
6876
6877 /* Free all Rx buffers */
6878 spin_lock_irqsave(&sp->rx_lock, flags);
6879 free_rx_buffers(sp);
6880 spin_unlock_irqrestore(&sp->rx_lock, flags);
6881
6882 clear_bit(0, &(sp->link_state));
6883}
6884
6885static void s2io_card_down(struct s2io_nic * sp)
6886{
6887 do_s2io_card_down(sp, 1);
6888}
6889
6890static int s2io_card_up(struct s2io_nic * sp)
6891{
6892 int i, ret = 0;
6893 struct mac_info *mac_control;
6894 struct config_param *config;
6895 struct net_device *dev = (struct net_device *) sp->dev;
6896 u16 interruptible;
6897
6898 /* Initialize the H/W I/O registers */
6899 if (init_nic(sp) != 0) {
6900 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6901 dev->name);
6902 s2io_reset(sp);
6903 return -ENODEV;
6904 }
6905
6906 /*
6907 * Initializing the Rx buffers. For now we are considering only 1
6908 * Rx ring and initializing buffers into 30 Rx blocks
6909 */
6910 mac_control = &sp->mac_control;
6911 config = &sp->config;
6912
6913 for (i = 0; i < config->rx_ring_num; i++) {
6914 if ((ret = fill_rx_buffers(sp, i))) {
6915 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6916 dev->name);
6917 s2io_reset(sp);
6918 free_rx_buffers(sp);
6919 return -ENOMEM;
6920 }
6921 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6922 atomic_read(&sp->rx_bufs_left[i]));
6923 }
6924 /* Maintain the state prior to the open */
6925 if (sp->promisc_flg)
6926 sp->promisc_flg = 0;
6927 if (sp->m_cast_flg) {
6928 sp->m_cast_flg = 0;
6929 sp->all_multi_pos= 0;
6930 }
6931
6932 /* Setting its receive mode */
6933 s2io_set_multicast(dev);
6934
6935 if (sp->lro) {
6936 /* Initialize max aggregatable pkts per session based on MTU */
6937 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6938 /* Check if we can use(if specified) user provided value */
6939 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6940 sp->lro_max_aggr_per_sess = lro_max_pkts;
6941 }
6942
6943 /* Enable Rx Traffic and interrupts on the NIC */
6944 if (start_nic(sp)) {
6945 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6946 s2io_reset(sp);
6947 free_rx_buffers(sp);
6948 return -ENODEV;
6949 }
6950
6951 /* Add interrupt service routine */
6952 if (s2io_add_isr(sp) != 0) {
6953 if (sp->intr_type == MSI_X)
6954 s2io_rem_isr(sp);
6955 s2io_reset(sp);
6956 free_rx_buffers(sp);
6957 return -ENODEV;
6958 }
6959
6960 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6961
6962 /* Enable tasklet for the device */
6963 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6964
6965 /* Enable select interrupts */
6966 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
6967 if (sp->intr_type != INTA)
6968 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6969 else {
6970 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6971 interruptible |= TX_PIC_INTR;
6972 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6973 }
6974
6975
6976 atomic_set(&sp->card_state, CARD_UP);
6977 return 0;
6978}
6979
6980/**
6981 * s2io_restart_nic - Resets the NIC.
6982 * @data : long pointer to the device private structure
6983 * Description:
6984 * This function is scheduled to be run by the s2io_tx_watchdog
6985 * function after 0.5 secs to reset the NIC. The idea is to reduce
6986 * the run time of the watch dog routine which is run holding a
6987 * spin lock.
6988 */
6989
6990static void s2io_restart_nic(struct work_struct *work)
6991{
6992 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6993 struct net_device *dev = sp->dev;
6994
6995 rtnl_lock();
6996
6997 if (!netif_running(dev))
6998 goto out_unlock;
6999
7000 s2io_card_down(sp);
7001 if (s2io_card_up(sp)) {
7002 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
7003 dev->name);
7004 }
7005 netif_wake_queue(dev);
7006 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
7007 dev->name);
7008out_unlock:
7009 rtnl_unlock();
7010}
7011
7012/**
7013 * s2io_tx_watchdog - Watchdog for transmit side.
7014 * @dev : Pointer to net device structure
7015 * Description:
7016 * This function is triggered if the Tx Queue is stopped
7017 * for a pre-defined amount of time when the Interface is still up.
7018 * If the Interface is jammed in such a situation, the hardware is
7019 * reset (by s2io_close) and restarted again (by s2io_open) to
7020 * overcome any problem that might have been caused in the hardware.
7021 * Return value:
7022 * void
7023 */
7024
7025static void s2io_tx_watchdog(struct net_device *dev)
7026{
7027 struct s2io_nic *sp = dev->priv;
7028
7029 if (netif_carrier_ok(dev)) {
7030 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
7031 schedule_work(&sp->rst_timer_task);
7032 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
7033 }
7034}
7035
7036/**
7037 * rx_osm_handler - To perform some OS related operations on SKB.
7038 * @sp: private member of the device structure,pointer to s2io_nic structure.
7039 * @skb : the socket buffer pointer.
7040 * @len : length of the packet
7041 * @cksum : FCS checksum of the frame.
7042 * @ring_no : the ring from which this RxD was extracted.
7043 * Description:
7044 * This function is called by the Rx interrupt serivce routine to perform
7045 * some OS related operations on the SKB before passing it to the upper
7046 * layers. It mainly checks if the checksum is OK, if so adds it to the
7047 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7048 * to the upper layer. If the checksum is wrong, it increments the Rx
7049 * packet error count, frees the SKB and returns error.
7050 * Return value:
7051 * SUCCESS on success and -1 on failure.
7052 */
7053static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7054{
7055 struct s2io_nic *sp = ring_data->nic;
7056 struct net_device *dev = (struct net_device *) sp->dev;
7057 struct sk_buff *skb = (struct sk_buff *)
7058 ((unsigned long) rxdp->Host_Control);
7059 int ring_no = ring_data->ring_no;
7060 u16 l3_csum, l4_csum;
7061 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7062 struct lro *lro;
7063 u8 err_mask;
7064
7065 skb->dev = dev;
7066
7067 if (err) {
7068 /* Check for parity error */
7069 if (err & 0x1) {
7070 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7071 }
7072 err_mask = err >> 48;
7073 switch(err_mask) {
7074 case 1:
7075 sp->mac_control.stats_info->sw_stat.
7076 rx_parity_err_cnt++;
7077 break;
7078
7079 case 2:
7080 sp->mac_control.stats_info->sw_stat.
7081 rx_abort_cnt++;
7082 break;
7083
7084 case 3:
7085 sp->mac_control.stats_info->sw_stat.
7086 rx_parity_abort_cnt++;
7087 break;
7088
7089 case 4:
7090 sp->mac_control.stats_info->sw_stat.
7091 rx_rda_fail_cnt++;
7092 break;
7093
7094 case 5:
7095 sp->mac_control.stats_info->sw_stat.
7096 rx_unkn_prot_cnt++;
7097 break;
7098
7099 case 6:
7100 sp->mac_control.stats_info->sw_stat.
7101 rx_fcs_err_cnt++;
7102 break;
7103
7104 case 7:
7105 sp->mac_control.stats_info->sw_stat.
7106 rx_buf_size_err_cnt++;
7107 break;
7108
7109 case 8:
7110 sp->mac_control.stats_info->sw_stat.
7111 rx_rxd_corrupt_cnt++;
7112 break;
7113
7114 case 15:
7115 sp->mac_control.stats_info->sw_stat.
7116 rx_unkn_err_cnt++;
7117 break;
7118 }
7119 /*
7120 * Drop the packet if bad transfer code. Exception being
7121 * 0x5, which could be due to unsupported IPv6 extension header.
7122 * In this case, we let stack handle the packet.
7123 * Note that in this case, since checksum will be incorrect,
7124 * stack will validate the same.
7125 */
7126 if (err_mask != 0x5) {
7127 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7128 dev->name, err_mask);
7129 sp->stats.rx_crc_errors++;
7130 sp->mac_control.stats_info->sw_stat.mem_freed
7131 += skb->truesize;
7132 dev_kfree_skb(skb);
7133 atomic_dec(&sp->rx_bufs_left[ring_no]);
7134 rxdp->Host_Control = 0;
7135 return 0;
7136 }
7137 }
7138
7139 /* Updating statistics */
7140 sp->stats.rx_packets++;
7141 rxdp->Host_Control = 0;
7142 if (sp->rxd_mode == RXD_MODE_1) {
7143 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7144
7145 sp->stats.rx_bytes += len;
7146 skb_put(skb, len);
7147
7148 } else if (sp->rxd_mode == RXD_MODE_3B) {
7149 int get_block = ring_data->rx_curr_get_info.block_index;
7150 int get_off = ring_data->rx_curr_get_info.offset;
7151 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7152 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7153 unsigned char *buff = skb_push(skb, buf0_len);
7154
7155 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7156 sp->stats.rx_bytes += buf0_len + buf2_len;
7157 memcpy(buff, ba->ba_0, buf0_len);
7158 skb_put(skb, buf2_len);
7159 }
7160
7161 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
7162 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7163 (sp->rx_csum)) {
7164 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7165 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7166 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7167 /*
7168 * NIC verifies if the Checksum of the received
7169 * frame is Ok or not and accordingly returns
7170 * a flag in the RxD.
7171 */
7172 skb->ip_summed = CHECKSUM_UNNECESSARY;
7173 if (sp->lro) {
7174 u32 tcp_len;
7175 u8 *tcp;
7176 int ret = 0;
7177
7178 ret = s2io_club_tcp_session(skb->data, &tcp,
7179 &tcp_len, &lro, rxdp, sp);
7180 switch (ret) {
7181 case 3: /* Begin anew */
7182 lro->parent = skb;
7183 goto aggregate;
7184 case 1: /* Aggregate */
7185 {
7186 lro_append_pkt(sp, lro,
7187 skb, tcp_len);
7188 goto aggregate;
7189 }
7190 case 4: /* Flush session */
7191 {
7192 lro_append_pkt(sp, lro,
7193 skb, tcp_len);
7194 queue_rx_frame(lro->parent);
7195 clear_lro_session(lro);
7196 sp->mac_control.stats_info->
7197 sw_stat.flush_max_pkts++;
7198 goto aggregate;
7199 }
7200 case 2: /* Flush both */
7201 lro->parent->data_len =
7202 lro->frags_len;
7203 sp->mac_control.stats_info->
7204 sw_stat.sending_both++;
7205 queue_rx_frame(lro->parent);
7206 clear_lro_session(lro);
7207 goto send_up;
7208 case 0: /* sessions exceeded */
7209 case -1: /* non-TCP or not
7210 * L2 aggregatable
7211 */
7212 case 5: /*
7213 * First pkt in session not
7214 * L3/L4 aggregatable
7215 */
7216 break;
7217 default:
7218 DBG_PRINT(ERR_DBG,
7219 "%s: Samadhana!!\n",
7220 __FUNCTION__);
7221 BUG();
7222 }
7223 }
7224 } else {
7225 /*
7226 * Packet with erroneous checksum, let the
7227 * upper layers deal with it.
7228 */
7229 skb->ip_summed = CHECKSUM_NONE;
7230 }
7231 } else {
7232 skb->ip_summed = CHECKSUM_NONE;
7233 }
7234 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7235 if (!sp->lro) {
7236 skb->protocol = eth_type_trans(skb, dev);
7237 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
7238 vlan_strip_flag)) {
7239 /* Queueing the vlan frame to the upper layer */
7240 if (napi)
7241 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
7242 RXD_GET_VLAN_TAG(rxdp->Control_2));
7243 else
7244 vlan_hwaccel_rx(skb, sp->vlgrp,
7245 RXD_GET_VLAN_TAG(rxdp->Control_2));
7246 } else {
7247 if (napi)
7248 netif_receive_skb(skb);
7249 else
7250 netif_rx(skb);
7251 }
7252 } else {
7253send_up:
7254 queue_rx_frame(skb);
7255 }
7256 dev->last_rx = jiffies;
7257aggregate:
7258 atomic_dec(&sp->rx_bufs_left[ring_no]);
7259 return SUCCESS;
7260}
7261
7262/**
7263 * s2io_link - stops/starts the Tx queue.
7264 * @sp : private member of the device structure, which is a pointer to the
7265 * s2io_nic structure.
7266 * @link : inidicates whether link is UP/DOWN.
7267 * Description:
7268 * This function stops/starts the Tx queue depending on whether the link
7269 * status of the NIC is is down or up. This is called by the Alarm
7270 * interrupt handler whenever a link change interrupt comes up.
7271 * Return value:
7272 * void.
7273 */
7274
7275static void s2io_link(struct s2io_nic * sp, int link)
7276{
7277 struct net_device *dev = (struct net_device *) sp->dev;
7278
7279 if (link != sp->last_link_state) {
7280 if (link == LINK_DOWN) {
7281 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7282 netif_carrier_off(dev);
7283 if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7284 sp->mac_control.stats_info->sw_stat.link_up_time =
7285 jiffies - sp->start_time;
7286 sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7287 } else {
7288 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7289 if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7290 sp->mac_control.stats_info->sw_stat.link_down_time =
7291 jiffies - sp->start_time;
7292 sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7293 netif_carrier_on(dev);
7294 }
7295 }
7296 sp->last_link_state = link;
7297 sp->start_time = jiffies;
7298}
7299
7300/**
7301 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7302 * @sp : private member of the device structure, which is a pointer to the
7303 * s2io_nic structure.
7304 * Description:
7305 * This function initializes a few of the PCI and PCI-X configuration registers
7306 * with recommended values.
7307 * Return value:
7308 * void
7309 */
7310
7311static void s2io_init_pci(struct s2io_nic * sp)
7312{
7313 u16 pci_cmd = 0, pcix_cmd = 0;
7314
7315 /* Enable Data Parity Error Recovery in PCI-X command register. */
7316 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7317 &(pcix_cmd));
7318 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7319 (pcix_cmd | 1));
7320 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7321 &(pcix_cmd));
7322
7323 /* Set the PErr Response bit in PCI command register. */
7324 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7325 pci_write_config_word(sp->pdev, PCI_COMMAND,
7326 (pci_cmd | PCI_COMMAND_PARITY));
7327 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7328}
7329
7330static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
7331{
7332 if ( tx_fifo_num > 8) {
7333 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
7334 "supported\n");
7335 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
7336 tx_fifo_num = 8;
7337 }
7338 if ( rx_ring_num > 8) {
7339 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
7340 "supported\n");
7341 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
7342 rx_ring_num = 8;
7343 }
7344 if (*dev_intr_type != INTA)
7345 napi = 0;
7346
7347#ifndef CONFIG_PCI_MSI
7348 if (*dev_intr_type != INTA) {
7349 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
7350 "MSI/MSI-X. Defaulting to INTA\n");
7351 *dev_intr_type = INTA;
7352 }
7353#else
7354 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7355 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7356 "Defaulting to INTA\n");
7357 *dev_intr_type = INTA;
7358 }
7359#endif
7360 if ((*dev_intr_type == MSI_X) &&
7361 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7362 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7363 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7364 "Defaulting to INTA\n");
7365 *dev_intr_type = INTA;
7366 }
7367
7368 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7369 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7370 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7371 rx_ring_mode = 1;
7372 }
7373 return SUCCESS;
7374}
7375
7376/**
7377 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7378 * or Traffic class respectively.
7379 * @nic: device peivate variable
7380 * Description: The function configures the receive steering to
7381 * desired receive ring.
7382 * Return Value: SUCCESS on success and
7383 * '-1' on failure (endian settings incorrect).
7384 */
7385static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7386{
7387 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7388 register u64 val64 = 0;
7389
7390 if (ds_codepoint > 63)
7391 return FAILURE;
7392
7393 val64 = RTS_DS_MEM_DATA(ring);
7394 writeq(val64, &bar0->rts_ds_mem_data);
7395
7396 val64 = RTS_DS_MEM_CTRL_WE |
7397 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7398 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7399
7400 writeq(val64, &bar0->rts_ds_mem_ctrl);
7401
7402 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7403 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7404 S2IO_BIT_RESET);
7405}
7406
7407/**
7408 * s2io_init_nic - Initialization of the adapter .
7409 * @pdev : structure containing the PCI related information of the device.
7410 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7411 * Description:
7412 * The function initializes an adapter identified by the pci_dec structure.
7413 * All OS related initialization including memory and device structure and
7414 * initlaization of the device private variable is done. Also the swapper
7415 * control register is initialized to enable read and write into the I/O
7416 * registers of the device.
7417 * Return value:
7418 * returns 0 on success and negative on failure.
7419 */
7420
7421static int __devinit
7422s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7423{
7424 struct s2io_nic *sp;
7425 struct net_device *dev;
7426 int i, j, ret;
7427 int dma_flag = FALSE;
7428 u32 mac_up, mac_down;
7429 u64 val64 = 0, tmp64 = 0;
7430 struct XENA_dev_config __iomem *bar0 = NULL;
7431 u16 subid;
7432 struct mac_info *mac_control;
7433 struct config_param *config;
7434 int mode;
7435 u8 dev_intr_type = intr_type;
7436
7437 if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
7438 return ret;
7439
7440 if ((ret = pci_enable_device(pdev))) {
7441 DBG_PRINT(ERR_DBG,
7442 "s2io_init_nic: pci_enable_device failed\n");
7443 return ret;
7444 }
7445
7446 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7447 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7448 dma_flag = TRUE;
7449 if (pci_set_consistent_dma_mask
7450 (pdev, DMA_64BIT_MASK)) {
7451 DBG_PRINT(ERR_DBG,
7452 "Unable to obtain 64bit DMA for \
7453 consistent allocations\n");
7454 pci_disable_device(pdev);
7455 return -ENOMEM;
7456 }
7457 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7458 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7459 } else {
7460 pci_disable_device(pdev);
7461 return -ENOMEM;
7462 }
7463 if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7464 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7465 pci_disable_device(pdev);
7466 return -ENODEV;
7467 }
7468
7469 dev = alloc_etherdev(sizeof(struct s2io_nic));
7470 if (dev == NULL) {
7471 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7472 pci_disable_device(pdev);
7473 pci_release_regions(pdev);
7474 return -ENODEV;
7475 }
7476
7477 pci_set_master(pdev);
7478 pci_set_drvdata(pdev, dev);
7479 SET_MODULE_OWNER(dev);
7480 SET_NETDEV_DEV(dev, &pdev->dev);
7481
7482 /* Private member variable initialized to s2io NIC structure */
7483 sp = dev->priv;
7484 memset(sp, 0, sizeof(struct s2io_nic));
7485 sp->dev = dev;
7486 sp->pdev = pdev;
7487 sp->high_dma_flag = dma_flag;
7488 sp->device_enabled_once = FALSE;
7489 if (rx_ring_mode == 1)
7490 sp->rxd_mode = RXD_MODE_1;
7491 if (rx_ring_mode == 2)
7492 sp->rxd_mode = RXD_MODE_3B;
7493
7494 sp->intr_type = dev_intr_type;
7495
7496 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7497 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7498 sp->device_type = XFRAME_II_DEVICE;
7499 else
7500 sp->device_type = XFRAME_I_DEVICE;
7501
7502 sp->lro = lro;
7503
7504 /* Initialize some PCI/PCI-X fields of the NIC. */
7505 s2io_init_pci(sp);
7506
7507 /*
7508 * Setting the device configuration parameters.
7509 * Most of these parameters can be specified by the user during
7510 * module insertion as they are module loadable parameters. If
7511 * these parameters are not not specified during load time, they
7512 * are initialized with default values.
7513 */
7514 mac_control = &sp->mac_control;
7515 config = &sp->config;
7516
7517 /* Tx side parameters. */
7518 config->tx_fifo_num = tx_fifo_num;
7519 for (i = 0; i < MAX_TX_FIFOS; i++) {
7520 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7521 config->tx_cfg[i].fifo_priority = i;
7522 }
7523
7524 /* mapping the QoS priority to the configured fifos */
7525 for (i = 0; i < MAX_TX_FIFOS; i++)
7526 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7527
7528 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7529 for (i = 0; i < config->tx_fifo_num; i++) {
7530 config->tx_cfg[i].f_no_snoop =
7531 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7532 if (config->tx_cfg[i].fifo_len < 65) {
7533 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7534 break;
7535 }
7536 }
7537 /* + 2 because one Txd for skb->data and one Txd for UFO */
7538 config->max_txds = MAX_SKB_FRAGS + 2;
7539
7540 /* Rx side parameters. */
7541 config->rx_ring_num = rx_ring_num;
7542 for (i = 0; i < MAX_RX_RINGS; i++) {
7543 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7544 (rxd_count[sp->rxd_mode] + 1);
7545 config->rx_cfg[i].ring_priority = i;
7546 }
7547
7548 for (i = 0; i < rx_ring_num; i++) {
7549 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7550 config->rx_cfg[i].f_no_snoop =
7551 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7552 }
7553
7554 /* Setting Mac Control parameters */
7555 mac_control->rmac_pause_time = rmac_pause_time;
7556 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7557 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7558
7559
7560 /* Initialize Ring buffer parameters. */
7561 for (i = 0; i < config->rx_ring_num; i++)
7562 atomic_set(&sp->rx_bufs_left[i], 0);
7563
7564 /* Initialize the number of ISRs currently running */
7565 atomic_set(&sp->isr_cnt, 0);
7566
7567 /* initialize the shared memory used by the NIC and the host */
7568 if (init_shared_mem(sp)) {
7569 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7570 dev->name);
7571 ret = -ENOMEM;
7572 goto mem_alloc_failed;
7573 }
7574
7575 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7576 pci_resource_len(pdev, 0));
7577 if (!sp->bar0) {
7578 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7579 dev->name);
7580 ret = -ENOMEM;
7581 goto bar0_remap_failed;
7582 }
7583
7584 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7585 pci_resource_len(pdev, 2));
7586 if (!sp->bar1) {
7587 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7588 dev->name);
7589 ret = -ENOMEM;
7590 goto bar1_remap_failed;
7591 }
7592
7593 dev->irq = pdev->irq;
7594 dev->base_addr = (unsigned long) sp->bar0;
7595
7596 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7597 for (j = 0; j < MAX_TX_FIFOS; j++) {
7598 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7599 (sp->bar1 + (j * 0x00020000));
7600 }
7601
7602 /* Driver entry points */
7603 dev->open = &s2io_open;
7604 dev->stop = &s2io_close;
7605 dev->hard_start_xmit = &s2io_xmit;
7606 dev->get_stats = &s2io_get_stats;
7607 dev->set_multicast_list = &s2io_set_multicast;
7608 dev->do_ioctl = &s2io_ioctl;
7609 dev->change_mtu = &s2io_change_mtu;
7610 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7611 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7612 dev->vlan_rx_register = s2io_vlan_rx_register;
7613
7614 /*
7615 * will use eth_mac_addr() for dev->set_mac_address
7616 * mac address will be set every time dev->open() is called
7617 */
7618 netif_napi_add(dev, &sp->napi, s2io_poll, 32);
7619
7620#ifdef CONFIG_NET_POLL_CONTROLLER
7621 dev->poll_controller = s2io_netpoll;
7622#endif
7623
7624 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7625 if (sp->high_dma_flag == TRUE)
7626 dev->features |= NETIF_F_HIGHDMA;
7627 dev->features |= NETIF_F_TSO;
7628 dev->features |= NETIF_F_TSO6;
7629 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
7630 dev->features |= NETIF_F_UFO;
7631 dev->features |= NETIF_F_HW_CSUM;
7632 }
7633
7634 dev->tx_timeout = &s2io_tx_watchdog;
7635 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7636 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7637 INIT_WORK(&sp->set_link_task, s2io_set_link);
7638
7639 pci_save_state(sp->pdev);
7640
7641 /* Setting swapper control on the NIC, for proper reset operation */
7642 if (s2io_set_swapper(sp)) {
7643 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7644 dev->name);
7645 ret = -EAGAIN;
7646 goto set_swap_failed;
7647 }
7648
7649 /* Verify if the Herc works on the slot its placed into */
7650 if (sp->device_type & XFRAME_II_DEVICE) {
7651 mode = s2io_verify_pci_mode(sp);
7652 if (mode < 0) {
7653 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7654 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7655 ret = -EBADSLT;
7656 goto set_swap_failed;
7657 }
7658 }
7659
7660 /* Not needed for Herc */
7661 if (sp->device_type & XFRAME_I_DEVICE) {
7662 /*
7663 * Fix for all "FFs" MAC address problems observed on
7664 * Alpha platforms
7665 */
7666 fix_mac_address(sp);
7667 s2io_reset(sp);
7668 }
7669
7670 /*
7671 * MAC address initialization.
7672 * For now only one mac address will be read and used.
7673 */
7674 bar0 = sp->bar0;
7675 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7676 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7677 writeq(val64, &bar0->rmac_addr_cmd_mem);
7678 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7679 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7680 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7681 mac_down = (u32) tmp64;
7682 mac_up = (u32) (tmp64 >> 32);
7683
7684 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7685 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7686 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7687 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7688 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7689 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7690
7691 /* Set the factory defined MAC address initially */
7692 dev->addr_len = ETH_ALEN;
7693 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7694
7695 /* Store the values of the MSIX table in the s2io_nic structure */
7696 store_xmsi_data(sp);
7697 /* reset Nic and bring it to known state */
7698 s2io_reset(sp);
7699
7700 /*
7701 * Initialize the tasklet status and link state flags
7702 * and the card state parameter
7703 */
7704 atomic_set(&(sp->card_state), 0);
7705 sp->tasklet_status = 0;
7706 sp->link_state = 0;
7707
7708 /* Initialize spinlocks */
7709 spin_lock_init(&sp->tx_lock);
7710
7711 if (!napi)
7712 spin_lock_init(&sp->put_lock);
7713 spin_lock_init(&sp->rx_lock);
7714
7715 /*
7716 * SXE-002: Configure link and activity LED to init state
7717 * on driver load.
7718 */
7719 subid = sp->pdev->subsystem_device;
7720 if ((subid & 0xFF) >= 0x07) {
7721 val64 = readq(&bar0->gpio_control);
7722 val64 |= 0x0000800000000000ULL;
7723 writeq(val64, &bar0->gpio_control);
7724 val64 = 0x0411040400000000ULL;
7725 writeq(val64, (void __iomem *) bar0 + 0x2700);
7726 val64 = readq(&bar0->gpio_control);
7727 }
7728
7729 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
7730
7731 if (register_netdev(dev)) {
7732 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7733 ret = -ENODEV;
7734 goto register_failed;
7735 }
7736 s2io_vpd_read(sp);
7737 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
7738 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7739 sp->product_name, pdev->revision);
7740 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7741 s2io_driver_version);
7742 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7743 "%02x:%02x:%02x:%02x:%02x:%02x", dev->name,
7744 sp->def_mac_addr[0].mac_addr[0],
7745 sp->def_mac_addr[0].mac_addr[1],
7746 sp->def_mac_addr[0].mac_addr[2],
7747 sp->def_mac_addr[0].mac_addr[3],
7748 sp->def_mac_addr[0].mac_addr[4],
7749 sp->def_mac_addr[0].mac_addr[5]);
7750 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7751 if (sp->device_type & XFRAME_II_DEVICE) {
7752 mode = s2io_print_pci_mode(sp);
7753 if (mode < 0) {
7754 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7755 ret = -EBADSLT;
7756 unregister_netdev(dev);
7757 goto set_swap_failed;
7758 }
7759 }
7760 switch(sp->rxd_mode) {
7761 case RXD_MODE_1:
7762 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7763 dev->name);
7764 break;
7765 case RXD_MODE_3B:
7766 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7767 dev->name);
7768 break;
7769 }
7770
7771 if (napi)
7772 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7773 switch(sp->intr_type) {
7774 case INTA:
7775 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7776 break;
7777 case MSI_X:
7778 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7779 break;
7780 }
7781 if (sp->lro)
7782 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7783 dev->name);
7784 if (ufo)
7785 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7786 " enabled\n", dev->name);
7787 /* Initialize device name */
7788 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7789
7790 /* Initialize bimodal Interrupts */
7791 sp->config.bimodal = bimodal;
7792 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7793 sp->config.bimodal = 0;
7794 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7795 dev->name);
7796 }
7797
7798 /*
7799 * Make Link state as off at this point, when the Link change
7800 * interrupt comes the state will be automatically changed to
7801 * the right state.
7802 */
7803 netif_carrier_off(dev);
7804
7805 return 0;
7806
7807 register_failed:
7808 set_swap_failed:
7809 iounmap(sp->bar1);
7810 bar1_remap_failed:
7811 iounmap(sp->bar0);
7812 bar0_remap_failed:
7813 mem_alloc_failed:
7814 free_shared_mem(sp);
7815 pci_disable_device(pdev);
7816 pci_release_regions(pdev);
7817 pci_set_drvdata(pdev, NULL);
7818 free_netdev(dev);
7819
7820 return ret;
7821}
7822
7823/**
7824 * s2io_rem_nic - Free the PCI device
7825 * @pdev: structure containing the PCI related information of the device.
7826 * Description: This function is called by the Pci subsystem to release a
7827 * PCI device and free up all resource held up by the device. This could
7828 * be in response to a Hot plug event or when the driver is to be removed
7829 * from memory.
7830 */
7831
7832static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7833{
7834 struct net_device *dev =
7835 (struct net_device *) pci_get_drvdata(pdev);
7836 struct s2io_nic *sp;
7837
7838 if (dev == NULL) {
7839 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7840 return;
7841 }
7842
7843 flush_scheduled_work();
7844
7845 sp = dev->priv;
7846 unregister_netdev(dev);
7847
7848 free_shared_mem(sp);
7849 iounmap(sp->bar0);
7850 iounmap(sp->bar1);
7851 pci_release_regions(pdev);
7852 pci_set_drvdata(pdev, NULL);
7853 free_netdev(dev);
7854 pci_disable_device(pdev);
7855}
7856
7857/**
7858 * s2io_starter - Entry point for the driver
7859 * Description: This function is the entry point for the driver. It verifies
7860 * the module loadable parameters and initializes PCI configuration space.
7861 */
7862
7863int __init s2io_starter(void)
7864{
7865 return pci_register_driver(&s2io_driver);
7866}
7867
7868/**
7869 * s2io_closer - Cleanup routine for the driver
7870 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7871 */
7872
7873static __exit void s2io_closer(void)
7874{
7875 pci_unregister_driver(&s2io_driver);
7876 DBG_PRINT(INIT_DBG, "cleanup done\n");
7877}
7878
7879module_init(s2io_starter);
7880module_exit(s2io_closer);
7881
7882static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7883 struct tcphdr **tcp, struct RxD_t *rxdp)
7884{
7885 int ip_off;
7886 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7887
7888 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7889 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7890 __FUNCTION__);
7891 return -1;
7892 }
7893
7894 /* TODO:
7895 * By default the VLAN field in the MAC is stripped by the card, if this
7896 * feature is turned off in rx_pa_cfg register, then the ip_off field
7897 * has to be shifted by a further 2 bytes
7898 */
7899 switch (l2_type) {
7900 case 0: /* DIX type */
7901 case 4: /* DIX type with VLAN */
7902 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7903 break;
7904 /* LLC, SNAP etc are considered non-mergeable */
7905 default:
7906 return -1;
7907 }
7908
7909 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7910 ip_len = (u8)((*ip)->ihl);
7911 ip_len <<= 2;
7912 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7913
7914 return 0;
7915}
7916
7917static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7918 struct tcphdr *tcp)
7919{
7920 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7921 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7922 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7923 return -1;
7924 return 0;
7925}
7926
7927static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7928{
7929 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7930}
7931
7932static void initiate_new_session(struct lro *lro, u8 *l2h,
7933 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7934{
7935 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7936 lro->l2h = l2h;
7937 lro->iph = ip;
7938 lro->tcph = tcp;
7939 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7940 lro->tcp_ack = ntohl(tcp->ack_seq);
7941 lro->sg_num = 1;
7942 lro->total_len = ntohs(ip->tot_len);
7943 lro->frags_len = 0;
7944 /*
7945 * check if we saw TCP timestamp. Other consistency checks have
7946 * already been done.
7947 */
7948 if (tcp->doff == 8) {
7949 u32 *ptr;
7950 ptr = (u32 *)(tcp+1);
7951 lro->saw_ts = 1;
7952 lro->cur_tsval = *(ptr+1);
7953 lro->cur_tsecr = *(ptr+2);
7954 }
7955 lro->in_use = 1;
7956}
7957
7958static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7959{
7960 struct iphdr *ip = lro->iph;
7961 struct tcphdr *tcp = lro->tcph;
7962 __sum16 nchk;
7963 struct stat_block *statinfo = sp->mac_control.stats_info;
7964 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7965
7966 /* Update L3 header */
7967 ip->tot_len = htons(lro->total_len);
7968 ip->check = 0;
7969 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7970 ip->check = nchk;
7971
7972 /* Update L4 header */
7973 tcp->ack_seq = lro->tcp_ack;
7974 tcp->window = lro->window;
7975
7976 /* Update tsecr field if this session has timestamps enabled */
7977 if (lro->saw_ts) {
7978 u32 *ptr = (u32 *)(tcp + 1);
7979 *(ptr+2) = lro->cur_tsecr;
7980 }
7981
7982 /* Update counters required for calculation of
7983 * average no. of packets aggregated.
7984 */
7985 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7986 statinfo->sw_stat.num_aggregations++;
7987}
7988
7989static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7990 struct tcphdr *tcp, u32 l4_pyld)
7991{
7992 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7993 lro->total_len += l4_pyld;
7994 lro->frags_len += l4_pyld;
7995 lro->tcp_next_seq += l4_pyld;
7996 lro->sg_num++;
7997
7998 /* Update ack seq no. and window ad(from this pkt) in LRO object */
7999 lro->tcp_ack = tcp->ack_seq;
8000 lro->window = tcp->window;
8001
8002 if (lro->saw_ts) {
8003 u32 *ptr;
8004 /* Update tsecr and tsval from this packet */
8005 ptr = (u32 *) (tcp + 1);
8006 lro->cur_tsval = *(ptr + 1);
8007 lro->cur_tsecr = *(ptr + 2);
8008 }
8009}
8010
8011static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8012 struct tcphdr *tcp, u32 tcp_pyld_len)
8013{
8014 u8 *ptr;
8015
8016 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8017
8018 if (!tcp_pyld_len) {
8019 /* Runt frame or a pure ack */
8020 return -1;
8021 }
8022
8023 if (ip->ihl != 5) /* IP has options */
8024 return -1;
8025
8026 /* If we see CE codepoint in IP header, packet is not mergeable */
8027 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8028 return -1;
8029
8030 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8031 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
8032 tcp->ece || tcp->cwr || !tcp->ack) {
8033 /*
8034 * Currently recognize only the ack control word and
8035 * any other control field being set would result in
8036 * flushing the LRO session
8037 */
8038 return -1;
8039 }
8040
8041 /*
8042 * Allow only one TCP timestamp option. Don't aggregate if
8043 * any other options are detected.
8044 */
8045 if (tcp->doff != 5 && tcp->doff != 8)
8046 return -1;
8047
8048 if (tcp->doff == 8) {
8049 ptr = (u8 *)(tcp + 1);
8050 while (*ptr == TCPOPT_NOP)
8051 ptr++;
8052 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8053 return -1;
8054
8055 /* Ensure timestamp value increases monotonically */
8056 if (l_lro)
8057 if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
8058 return -1;
8059
8060 /* timestamp echo reply should be non-zero */
8061 if (*((u32 *)(ptr+6)) == 0)
8062 return -1;
8063 }
8064
8065 return 0;
8066}
8067
8068static int
8069s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
8070 struct RxD_t *rxdp, struct s2io_nic *sp)
8071{
8072 struct iphdr *ip;
8073 struct tcphdr *tcph;
8074 int ret = 0, i;
8075
8076 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8077 rxdp))) {
8078 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8079 ip->saddr, ip->daddr);
8080 } else {
8081 return ret;
8082 }
8083
8084 tcph = (struct tcphdr *)*tcp;
8085 *tcp_len = get_l4_pyld_length(ip, tcph);
8086 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8087 struct lro *l_lro = &sp->lro0_n[i];
8088 if (l_lro->in_use) {
8089 if (check_for_socket_match(l_lro, ip, tcph))
8090 continue;
8091 /* Sock pair matched */
8092 *lro = l_lro;
8093
8094 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8095 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8096 "0x%x, actual 0x%x\n", __FUNCTION__,
8097 (*lro)->tcp_next_seq,
8098 ntohl(tcph->seq));
8099
8100 sp->mac_control.stats_info->
8101 sw_stat.outof_sequence_pkts++;
8102 ret = 2;
8103 break;
8104 }
8105
8106 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8107 ret = 1; /* Aggregate */
8108 else
8109 ret = 2; /* Flush both */
8110 break;
8111 }
8112 }
8113
8114 if (ret == 0) {
8115 /* Before searching for available LRO objects,
8116 * check if the pkt is L3/L4 aggregatable. If not
8117 * don't create new LRO session. Just send this
8118 * packet up.
8119 */
8120 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8121 return 5;
8122 }
8123
8124 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8125 struct lro *l_lro = &sp->lro0_n[i];
8126 if (!(l_lro->in_use)) {
8127 *lro = l_lro;
8128 ret = 3; /* Begin anew */
8129 break;
8130 }
8131 }
8132 }
8133
8134 if (ret == 0) { /* sessions exceeded */
8135 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8136 __FUNCTION__);
8137 *lro = NULL;
8138 return ret;
8139 }
8140
8141 switch (ret) {
8142 case 3:
8143 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
8144 break;
8145 case 2:
8146 update_L3L4_header(sp, *lro);
8147 break;
8148 case 1:
8149 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8150 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8151 update_L3L4_header(sp, *lro);
8152 ret = 4; /* Flush the LRO */
8153 }
8154 break;
8155 default:
8156 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8157 __FUNCTION__);
8158 break;
8159 }
8160
8161 return ret;
8162}
8163
8164static void clear_lro_session(struct lro *lro)
8165{
8166 static u16 lro_struct_size = sizeof(struct lro);
8167
8168 memset(lro, 0, lro_struct_size);
8169}
8170
8171static void queue_rx_frame(struct sk_buff *skb)
8172{
8173 struct net_device *dev = skb->dev;
8174
8175 skb->protocol = eth_type_trans(skb, dev);
8176 if (napi)
8177 netif_receive_skb(skb);
8178 else
8179 netif_rx(skb);
8180}
8181
8182static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8183 struct sk_buff *skb,
8184 u32 tcp_len)
8185{
8186 struct sk_buff *first = lro->parent;
8187
8188 first->len += tcp_len;
8189 first->data_len = lro->frags_len;
8190 skb_pull(skb, (skb->len - tcp_len));
8191 if (skb_shinfo(first)->frag_list)
8192 lro->last_frag->next = skb;
8193 else
8194 skb_shinfo(first)->frag_list = skb;
8195 first->truesize += skb->truesize;
8196 lro->last_frag = skb;
8197 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8198 return;
8199}
8200
8201/**
8202 * s2io_io_error_detected - called when PCI error is detected
8203 * @pdev: Pointer to PCI device
8204 * @state: The current pci connection state
8205 *
8206 * This function is called after a PCI bus error affecting
8207 * this device has been detected.
8208 */
8209static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8210 pci_channel_state_t state)
8211{
8212 struct net_device *netdev = pci_get_drvdata(pdev);
8213 struct s2io_nic *sp = netdev->priv;
8214
8215 netif_device_detach(netdev);
8216
8217 if (netif_running(netdev)) {
8218 /* Bring down the card, while avoiding PCI I/O */
8219 do_s2io_card_down(sp, 0);
8220 }
8221 pci_disable_device(pdev);
8222
8223 return PCI_ERS_RESULT_NEED_RESET;
8224}
8225
8226/**
8227 * s2io_io_slot_reset - called after the pci bus has been reset.
8228 * @pdev: Pointer to PCI device
8229 *
8230 * Restart the card from scratch, as if from a cold-boot.
8231 * At this point, the card has exprienced a hard reset,
8232 * followed by fixups by BIOS, and has its config space
8233 * set up identically to what it was at cold boot.
8234 */
8235static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8236{
8237 struct net_device *netdev = pci_get_drvdata(pdev);
8238 struct s2io_nic *sp = netdev->priv;
8239
8240 if (pci_enable_device(pdev)) {
8241 printk(KERN_ERR "s2io: "
8242 "Cannot re-enable PCI device after reset.\n");
8243 return PCI_ERS_RESULT_DISCONNECT;
8244 }
8245
8246 pci_set_master(pdev);
8247 s2io_reset(sp);
8248
8249 return PCI_ERS_RESULT_RECOVERED;
8250}
8251
8252/**
8253 * s2io_io_resume - called when traffic can start flowing again.
8254 * @pdev: Pointer to PCI device
8255 *
8256 * This callback is called when the error recovery driver tells
8257 * us that its OK to resume normal operation.
8258 */
8259static void s2io_io_resume(struct pci_dev *pdev)
8260{
8261 struct net_device *netdev = pci_get_drvdata(pdev);
8262 struct s2io_nic *sp = netdev->priv;
8263
8264 if (netif_running(netdev)) {
8265 if (s2io_card_up(sp)) {
8266 printk(KERN_ERR "s2io: "
8267 "Can't bring device back up after reset.\n");
8268 return;
8269 }
8270
8271 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8272 s2io_card_down(sp);
8273 printk(KERN_ERR "s2io: "
8274 "Can't resetore mac addr after reset.\n");
8275 return;
8276 }
8277 }
8278
8279 netif_device_attach(netdev);
8280 netif_wake_queue(netdev);
8281}