]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/vxge/vxge-main.c
vxge: add support for ethtool firmware flashing
[net-next-2.6.git] / drivers / net / vxge / vxge-main.c
CommitLineData
703da5a1
RV
1/******************************************************************************
2* This software may be used and distributed according to the terms of
3* the GNU General Public License (GPL), incorporated herein by reference.
4* Drivers based on or derived from this code fall under the GPL and must
5* retain the authorship, copyright and license notice. This file is not
6* a complete program and may only be used when the entire operating
7* system is licensed under the GPL.
8* See the file COPYING in this distribution for more information.
9*
926bd900 10* vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
703da5a1 11* Virtualized Server Adapter.
926bd900 12* Copyright(c) 2002-2010 Exar Corp.
703da5a1
RV
13*
14* The module loadable parameters that are supported by the driver and a brief
15* explanation of all the variables:
16* vlan_tag_strip:
17* Strip VLAN Tag enable/disable. Instructs the device to remove
18* the VLAN tag from all received tagged frames that are not
19* replicated at the internal L2 switch.
20* 0 - Do not strip the VLAN tag.
21* 1 - Strip the VLAN tag.
22*
23* addr_learn_en:
24* Enable learning the mac address of the guest OS interface in
25* a virtualization environment.
26* 0 - DISABLE
27* 1 - ENABLE
28*
29* max_config_port:
30* Maximum number of port to be supported.
31* MIN -1 and MAX - 2
32*
33* max_config_vpath:
34* This configures the maximum no of VPATH configures for each
35* device function.
36* MIN - 1 and MAX - 17
37*
38* max_config_dev:
39* This configures maximum no of Device function to be enabled.
40* MIN - 1 and MAX - 17
41*
42******************************************************************************/
43
75f5e1c6
JP
44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
703da5a1
RV
46#include <linux/if_vlan.h>
47#include <linux/pci.h>
5a0e3ad6 48#include <linux/slab.h>
2b05e002 49#include <linux/tcp.h>
703da5a1
RV
50#include <net/ip.h>
51#include <linux/netdevice.h>
52#include <linux/etherdevice.h>
e8ac1756 53#include <linux/firmware.h>
703da5a1
RV
54#include "vxge-main.h"
55#include "vxge-reg.h"
56
57MODULE_LICENSE("Dual BSD/GPL");
58MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
59 "Virtualized Server Adapter");
60
a3aa1884 61static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = {
703da5a1
RV
62 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
63 PCI_ANY_ID},
64 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
65 PCI_ANY_ID},
66 {0}
67};
68
69MODULE_DEVICE_TABLE(pci, vxge_id_table);
70
71VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE);
72VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT);
73VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT);
74VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT);
75VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT);
76VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV);
77
78static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] =
79 {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
80static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
81 {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF};
82module_param_array(bw_percentage, uint, NULL, 0);
83
84static struct vxge_drv_config *driver_config;
85
42821a5b 86static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
87 struct macInfo *mac);
88static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
89 struct macInfo *mac);
90static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
91static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
92static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
93static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
42821a5b 94
703da5a1
RV
95static inline int is_vxge_card_up(struct vxgedev *vdev)
96{
97 return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
98}
99
100static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
101{
ff67df55
BL
102 struct sk_buff **skb_ptr = NULL;
103 struct sk_buff **temp;
104#define NR_SKB_COMPLETED 128
105 struct sk_buff *completed[NR_SKB_COMPLETED];
106 int more;
703da5a1 107
ff67df55
BL
108 do {
109 more = 0;
110 skb_ptr = completed;
111
98f45da2 112 if (__netif_tx_trylock(fifo->txq)) {
ff67df55
BL
113 vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
114 NR_SKB_COMPLETED, &more);
98f45da2 115 __netif_tx_unlock(fifo->txq);
ff67df55 116 }
98f45da2 117
ff67df55
BL
118 /* free SKBs */
119 for (temp = completed; temp != skb_ptr; temp++)
120 dev_kfree_skb_irq(*temp);
98f45da2 121 } while (more);
703da5a1
RV
122}
123
124static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
125{
126 int i;
127
128 /* Complete all transmits */
129 for (i = 0; i < vdev->no_of_vpath; i++)
130 VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo);
131}
132
133static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
134{
135 int i;
136 struct vxge_ring *ring;
137
138 /* Complete all receives*/
139 for (i = 0; i < vdev->no_of_vpath; i++) {
140 ring = &vdev->vpaths[i].ring;
141 vxge_hw_vpath_poll_rx(ring->handle);
142 }
143}
144
703da5a1
RV
145/*
146 * vxge_callback_link_up
147 *
148 * This function is called during interrupt context to notify link up state
149 * change.
150 */
42821a5b 151static void
703da5a1
RV
152vxge_callback_link_up(struct __vxge_hw_device *hldev)
153{
154 struct net_device *dev = hldev->ndev;
155 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
156
157 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
158 vdev->ndev->name, __func__, __LINE__);
75f5e1c6 159 netdev_notice(vdev->ndev, "Link Up\n");
703da5a1
RV
160 vdev->stats.link_up++;
161
162 netif_carrier_on(vdev->ndev);
d03848e0 163 netif_tx_wake_all_queues(vdev->ndev);
703da5a1
RV
164
165 vxge_debug_entryexit(VXGE_TRACE,
166 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
167}
168
169/*
170 * vxge_callback_link_down
171 *
172 * This function is called during interrupt context to notify link down state
173 * change.
174 */
42821a5b 175static void
703da5a1
RV
176vxge_callback_link_down(struct __vxge_hw_device *hldev)
177{
178 struct net_device *dev = hldev->ndev;
179 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
180
181 vxge_debug_entryexit(VXGE_TRACE,
182 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
75f5e1c6 183 netdev_notice(vdev->ndev, "Link Down\n");
703da5a1
RV
184
185 vdev->stats.link_down++;
186 netif_carrier_off(vdev->ndev);
d03848e0 187 netif_tx_stop_all_queues(vdev->ndev);
703da5a1
RV
188
189 vxge_debug_entryexit(VXGE_TRACE,
190 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
191}
192
193/*
194 * vxge_rx_alloc
195 *
196 * Allocate SKB.
197 */
198static struct sk_buff*
199vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
200{
201 struct net_device *dev;
202 struct sk_buff *skb;
203 struct vxge_rx_priv *rx_priv;
204
205 dev = ring->ndev;
206 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
207 ring->ndev->name, __func__, __LINE__);
208
209 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
210
211 /* try to allocate skb first. this one may fail */
212 skb = netdev_alloc_skb(dev, skb_size +
213 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
214 if (skb == NULL) {
215 vxge_debug_mem(VXGE_ERR,
216 "%s: out of memory to allocate SKB", dev->name);
217 ring->stats.skb_alloc_fail++;
218 return NULL;
219 }
220
221 vxge_debug_mem(VXGE_TRACE,
222 "%s: %s:%d Skb : 0x%p", ring->ndev->name,
223 __func__, __LINE__, skb);
224
225 skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
226
227 rx_priv->skb = skb;
ea11bbe0 228 rx_priv->skb_data = NULL;
703da5a1
RV
229 rx_priv->data_size = skb_size;
230 vxge_debug_entryexit(VXGE_TRACE,
231 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
232
233 return skb;
234}
235
236/*
237 * vxge_rx_map
238 */
239static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
240{
241 struct vxge_rx_priv *rx_priv;
242 dma_addr_t dma_addr;
243
244 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
245 ring->ndev->name, __func__, __LINE__);
246 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
247
ea11bbe0
BL
248 rx_priv->skb_data = rx_priv->skb->data;
249 dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
703da5a1
RV
250 rx_priv->data_size, PCI_DMA_FROMDEVICE);
251
fa15e99b 252 if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) {
703da5a1
RV
253 ring->stats.pci_map_fail++;
254 return -EIO;
255 }
256 vxge_debug_mem(VXGE_TRACE,
257 "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
258 ring->ndev->name, __func__, __LINE__,
259 (unsigned long long)dma_addr);
260 vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size);
261
262 rx_priv->data_dma = dma_addr;
263 vxge_debug_entryexit(VXGE_TRACE,
264 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
265
266 return 0;
267}
268
269/*
270 * vxge_rx_initial_replenish
271 * Allocation of RxD as an initial replenish procedure.
272 */
273static enum vxge_hw_status
274vxge_rx_initial_replenish(void *dtrh, void *userdata)
275{
276 struct vxge_ring *ring = (struct vxge_ring *)userdata;
277 struct vxge_rx_priv *rx_priv;
278
279 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
280 ring->ndev->name, __func__, __LINE__);
281 if (vxge_rx_alloc(dtrh, ring,
282 VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL)
283 return VXGE_HW_FAIL;
284
285 if (vxge_rx_map(dtrh, ring)) {
286 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
287 dev_kfree_skb(rx_priv->skb);
288
289 return VXGE_HW_FAIL;
290 }
291 vxge_debug_entryexit(VXGE_TRACE,
292 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
293
294 return VXGE_HW_OK;
295}
296
297static inline void
298vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
299 int pkt_length, struct vxge_hw_ring_rxd_info *ext_info)
300{
301
302 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
303 ring->ndev->name, __func__, __LINE__);
304 skb_record_rx_queue(skb, ring->driver_id);
305 skb->protocol = eth_type_trans(skb, ring->ndev);
306
307 ring->stats.rx_frms++;
308 ring->stats.rx_bytes += pkt_length;
309
310 if (skb->pkt_type == PACKET_MULTICAST)
311 ring->stats.rx_mcast++;
312
313 vxge_debug_rx(VXGE_TRACE,
314 "%s: %s:%d skb protocol = %d",
315 ring->ndev->name, __func__, __LINE__, skb->protocol);
316
317 if (ring->gro_enable) {
318 if (ring->vlgrp && ext_info->vlan &&
319 (ring->vlan_tag_strip ==
320 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
a5d165b5 321 vlan_gro_receive(ring->napi_p, ring->vlgrp,
703da5a1
RV
322 ext_info->vlan, skb);
323 else
a5d165b5 324 napi_gro_receive(ring->napi_p, skb);
703da5a1
RV
325 } else {
326 if (ring->vlgrp && vlan &&
327 (ring->vlan_tag_strip ==
328 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
329 vlan_hwaccel_receive_skb(skb, ring->vlgrp, vlan);
330 else
331 netif_receive_skb(skb);
332 }
333 vxge_debug_entryexit(VXGE_TRACE,
334 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
335}
336
337static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
338 struct vxge_rx_priv *rx_priv)
339{
340 pci_dma_sync_single_for_device(ring->pdev,
341 rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE);
342
343 vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
344 vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
345}
346
347static inline void vxge_post(int *dtr_cnt, void **first_dtr,
348 void *post_dtr, struct __vxge_hw_ring *ringh)
349{
350 int dtr_count = *dtr_cnt;
351 if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) {
352 if (*first_dtr)
353 vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr);
354 *first_dtr = post_dtr;
355 } else
356 vxge_hw_ring_rxd_post_post(ringh, post_dtr);
357 dtr_count++;
358 *dtr_cnt = dtr_count;
359}
360
361/*
362 * vxge_rx_1b_compl
363 *
364 * If the interrupt is because of a received frame or if the receive ring
365 * contains fresh as yet un-processed frames, this function is called.
366 */
42821a5b 367static enum vxge_hw_status
703da5a1
RV
368vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
369 u8 t_code, void *userdata)
370{
371 struct vxge_ring *ring = (struct vxge_ring *)userdata;
372 struct net_device *dev = ring->ndev;
373 unsigned int dma_sizes;
374 void *first_dtr = NULL;
375 int dtr_cnt = 0;
376 int data_size;
377 dma_addr_t data_dma;
378 int pkt_length;
379 struct sk_buff *skb;
380 struct vxge_rx_priv *rx_priv;
381 struct vxge_hw_ring_rxd_info ext_info;
382 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
383 ring->ndev->name, __func__, __LINE__);
384 ring->pkts_processed = 0;
385
3363276f 386 vxge_hw_ring_replenish(ringh);
703da5a1
RV
387
388 do {
3f23e436 389 prefetch((char *)dtr + L1_CACHE_BYTES);
703da5a1
RV
390 rx_priv = vxge_hw_ring_rxd_private_get(dtr);
391 skb = rx_priv->skb;
392 data_size = rx_priv->data_size;
393 data_dma = rx_priv->data_dma;
ea11bbe0 394 prefetch(rx_priv->skb_data);
703da5a1
RV
395
396 vxge_debug_rx(VXGE_TRACE,
397 "%s: %s:%d skb = 0x%p",
398 ring->ndev->name, __func__, __LINE__, skb);
399
400 vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes);
401 pkt_length = dma_sizes;
402
22fa125e
SH
403 pkt_length -= ETH_FCS_LEN;
404
703da5a1
RV
405 vxge_debug_rx(VXGE_TRACE,
406 "%s: %s:%d Packet Length = %d",
407 ring->ndev->name, __func__, __LINE__, pkt_length);
408
409 vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info);
410
411 /* check skb validity */
412 vxge_assert(skb);
413
414 prefetch((char *)skb + L1_CACHE_BYTES);
415 if (unlikely(t_code)) {
416
417 if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
418 VXGE_HW_OK) {
419
420 ring->stats.rx_errors++;
421 vxge_debug_rx(VXGE_TRACE,
422 "%s: %s :%d Rx T_code is %d",
423 ring->ndev->name, __func__,
424 __LINE__, t_code);
425
426 /* If the t_code is not supported and if the
427 * t_code is other than 0x5 (unparseable packet
428 * such as unknown UPV6 header), Drop it !!!
429 */
430 vxge_re_pre_post(dtr, ring, rx_priv);
431
432 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
433 ring->stats.rx_dropped++;
434 continue;
435 }
436 }
437
438 if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
439
440 if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
441
442 if (!vxge_rx_map(dtr, ring)) {
443 skb_put(skb, pkt_length);
444
445 pci_unmap_single(ring->pdev, data_dma,
446 data_size, PCI_DMA_FROMDEVICE);
447
448 vxge_hw_ring_rxd_pre_post(ringh, dtr);
449 vxge_post(&dtr_cnt, &first_dtr, dtr,
450 ringh);
451 } else {
452 dev_kfree_skb(rx_priv->skb);
453 rx_priv->skb = skb;
454 rx_priv->data_size = data_size;
455 vxge_re_pre_post(dtr, ring, rx_priv);
456
457 vxge_post(&dtr_cnt, &first_dtr, dtr,
458 ringh);
459 ring->stats.rx_dropped++;
460 break;
461 }
462 } else {
463 vxge_re_pre_post(dtr, ring, rx_priv);
464
465 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
466 ring->stats.rx_dropped++;
467 break;
468 }
469 } else {
470 struct sk_buff *skb_up;
471
472 skb_up = netdev_alloc_skb(dev, pkt_length +
473 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
474 if (skb_up != NULL) {
475 skb_reserve(skb_up,
476 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
477
478 pci_dma_sync_single_for_cpu(ring->pdev,
479 data_dma, data_size,
480 PCI_DMA_FROMDEVICE);
481
482 vxge_debug_mem(VXGE_TRACE,
483 "%s: %s:%d skb_up = %p",
484 ring->ndev->name, __func__,
485 __LINE__, skb);
486 memcpy(skb_up->data, skb->data, pkt_length);
487
488 vxge_re_pre_post(dtr, ring, rx_priv);
489
490 vxge_post(&dtr_cnt, &first_dtr, dtr,
491 ringh);
492 /* will netif_rx small SKB instead */
493 skb = skb_up;
494 skb_put(skb, pkt_length);
495 } else {
496 vxge_re_pre_post(dtr, ring, rx_priv);
497
498 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
499 vxge_debug_rx(VXGE_ERR,
500 "%s: vxge_rx_1b_compl: out of "
501 "memory", dev->name);
502 ring->stats.skb_alloc_fail++;
503 break;
504 }
505 }
506
507 if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
508 !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
509 ring->rx_csum && /* Offload Rx side CSUM */
510 ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
511 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
512 skb->ip_summed = CHECKSUM_UNNECESSARY;
513 else
bc8acf2c 514 skb_checksum_none_assert(skb);
703da5a1 515
47f01db4
JM
516 /* rth_hash_type and rth_it_hit are non-zero regardless of
517 * whether rss is enabled. Only the rth_value is zero/non-zero
518 * if rss is disabled/enabled, so key off of that.
519 */
520 if (ext_info.rth_value)
521 skb->rxhash = ext_info.rth_value;
522
703da5a1
RV
523 vxge_rx_complete(ring, skb, ext_info.vlan,
524 pkt_length, &ext_info);
525
526 ring->budget--;
527 ring->pkts_processed++;
528 if (!ring->budget)
529 break;
530
531 } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr,
532 &t_code) == VXGE_HW_OK);
533
534 if (first_dtr)
535 vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
536
703da5a1
RV
537 vxge_debug_entryexit(VXGE_TRACE,
538 "%s:%d Exiting...",
539 __func__, __LINE__);
540 return VXGE_HW_OK;
541}
542
543/*
544 * vxge_xmit_compl
545 *
546 * If an interrupt was raised to indicate DMA complete of the Tx packet,
547 * this function is called. It identifies the last TxD whose buffer was
548 * freed and frees all skbs whose data have already DMA'ed into the NICs
549 * internal memory.
550 */
42821a5b 551static enum vxge_hw_status
703da5a1
RV
552vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
553 enum vxge_hw_fifo_tcode t_code, void *userdata,
ff67df55 554 struct sk_buff ***skb_ptr, int nr_skb, int *more)
703da5a1
RV
555{
556 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
ff67df55 557 struct sk_buff *skb, **done_skb = *skb_ptr;
703da5a1
RV
558 int pkt_cnt = 0;
559
560 vxge_debug_entryexit(VXGE_TRACE,
561 "%s:%d Entered....", __func__, __LINE__);
562
563 do {
564 int frg_cnt;
565 skb_frag_t *frag;
566 int i = 0, j;
567 struct vxge_tx_priv *txd_priv =
568 vxge_hw_fifo_txdl_private_get(dtr);
569
570 skb = txd_priv->skb;
571 frg_cnt = skb_shinfo(skb)->nr_frags;
572 frag = &skb_shinfo(skb)->frags[0];
573
574 vxge_debug_tx(VXGE_TRACE,
575 "%s: %s:%d fifo_hw = %p dtr = %p "
576 "tcode = 0x%x", fifo->ndev->name, __func__,
577 __LINE__, fifo_hw, dtr, t_code);
578 /* check skb validity */
579 vxge_assert(skb);
580 vxge_debug_tx(VXGE_TRACE,
581 "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
582 fifo->ndev->name, __func__, __LINE__,
583 skb, txd_priv, frg_cnt);
584 if (unlikely(t_code)) {
585 fifo->stats.tx_errors++;
586 vxge_debug_tx(VXGE_ERR,
587 "%s: tx: dtr %p completed due to "
588 "error t_code %01x", fifo->ndev->name,
589 dtr, t_code);
590 vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code);
591 }
592
593 /* for unfragmented skb */
594 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
595 skb_headlen(skb), PCI_DMA_TODEVICE);
596
597 for (j = 0; j < frg_cnt; j++) {
598 pci_unmap_page(fifo->pdev,
599 txd_priv->dma_buffers[i++],
600 frag->size, PCI_DMA_TODEVICE);
601 frag += 1;
602 }
603
604 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
605
606 /* Updating the statistics block */
607 fifo->stats.tx_frms++;
608 fifo->stats.tx_bytes += skb->len;
609
ff67df55
BL
610 *done_skb++ = skb;
611
612 if (--nr_skb <= 0) {
613 *more = 1;
614 break;
615 }
703da5a1
RV
616
617 pkt_cnt++;
618 if (pkt_cnt > fifo->indicate_max_pkts)
619 break;
620
621 } while (vxge_hw_fifo_txdl_next_completed(fifo_hw,
622 &dtr, &t_code) == VXGE_HW_OK);
623
ff67df55 624 *skb_ptr = done_skb;
98f45da2
JM
625 if (netif_tx_queue_stopped(fifo->txq))
626 netif_tx_wake_queue(fifo->txq);
703da5a1 627
703da5a1
RV
628 vxge_debug_entryexit(VXGE_TRACE,
629 "%s: %s:%d Exiting...",
630 fifo->ndev->name, __func__, __LINE__);
631 return VXGE_HW_OK;
632}
633
28679751 634/* select a vpath to transmit the packet */
98f45da2 635static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
703da5a1
RV
636{
637 u16 queue_len, counter = 0;
638 if (skb->protocol == htons(ETH_P_IP)) {
639 struct iphdr *ip;
640 struct tcphdr *th;
641
642 ip = ip_hdr(skb);
643
644 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
645 th = (struct tcphdr *)(((unsigned char *)ip) +
646 ip->ihl*4);
647
648 queue_len = vdev->no_of_vpath;
649 counter = (ntohs(th->source) +
650 ntohs(th->dest)) &
651 vdev->vpath_selector[queue_len - 1];
652 if (counter >= queue_len)
653 counter = queue_len - 1;
703da5a1
RV
654 }
655 }
656 return counter;
657}
658
659static enum vxge_hw_status vxge_search_mac_addr_in_list(
660 struct vxge_vpath *vpath, u64 del_mac)
661{
662 struct list_head *entry, *next;
663 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
664 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac)
665 return TRUE;
666 }
667 return FALSE;
668}
669
670static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
671{
672 struct macInfo mac_info;
673 u8 *mac_address = NULL;
674 u64 mac_addr = 0, vpath_vector = 0;
675 int vpath_idx = 0;
676 enum vxge_hw_status status = VXGE_HW_OK;
677 struct vxge_vpath *vpath = NULL;
678 struct __vxge_hw_device *hldev;
679
680 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
681
682 mac_address = (u8 *)&mac_addr;
683 memcpy(mac_address, mac_header, ETH_ALEN);
684
685 /* Is this mac address already in the list? */
686 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
687 vpath = &vdev->vpaths[vpath_idx];
688 if (vxge_search_mac_addr_in_list(vpath, mac_addr))
689 return vpath_idx;
690 }
691
692 memset(&mac_info, 0, sizeof(struct macInfo));
693 memcpy(mac_info.macaddr, mac_header, ETH_ALEN);
694
695 /* Any vpath has room to add mac address to its da table? */
696 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
697 vpath = &vdev->vpaths[vpath_idx];
698 if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) {
699 /* Add this mac address to this vpath */
700 mac_info.vpath_no = vpath_idx;
701 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
702 status = vxge_add_mac_addr(vdev, &mac_info);
703 if (status != VXGE_HW_OK)
704 return -EPERM;
705 return vpath_idx;
706 }
707 }
708
709 mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST;
710 vpath_idx = 0;
711 mac_info.vpath_no = vpath_idx;
712 /* Is the first vpath already selected as catch-basin ? */
713 vpath = &vdev->vpaths[vpath_idx];
714 if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) {
715 /* Add this mac address to this vpath */
716 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
717 return -EPERM;
718 return vpath_idx;
719 }
720
721 /* Select first vpath as catch-basin */
722 vpath_vector = vxge_mBIT(vpath->device_id);
723 status = vxge_hw_mgmt_reg_write(vpath->vdev->devh,
724 vxge_hw_mgmt_reg_type_mrpcim,
725 0,
726 (ulong)offsetof(
727 struct vxge_hw_mrpcim_reg,
728 rts_mgr_cbasin_cfg),
729 vpath_vector);
730 if (status != VXGE_HW_OK) {
731 vxge_debug_tx(VXGE_ERR,
732 "%s: Unable to set the vpath-%d in catch-basin mode",
733 VXGE_DRIVER_NAME, vpath->device_id);
734 return -EPERM;
735 }
736
737 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
738 return -EPERM;
739
740 return vpath_idx;
741}
742
743/**
744 * vxge_xmit
745 * @skb : the socket buffer containing the Tx data.
746 * @dev : device pointer.
747 *
748 * This function is the Tx entry point of the driver. Neterion NIC supports
749 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
703da5a1 750*/
61357325 751static netdev_tx_t
703da5a1
RV
752vxge_xmit(struct sk_buff *skb, struct net_device *dev)
753{
754 struct vxge_fifo *fifo = NULL;
755 void *dtr_priv;
756 void *dtr = NULL;
757 struct vxgedev *vdev = NULL;
758 enum vxge_hw_status status;
759 int frg_cnt, first_frg_len;
760 skb_frag_t *frag;
761 int i = 0, j = 0, avail;
762 u64 dma_pointer;
763 struct vxge_tx_priv *txdl_priv = NULL;
764 struct __vxge_hw_fifo *fifo_hw;
703da5a1 765 int offload_type;
703da5a1 766 int vpath_no = 0;
703da5a1
RV
767
768 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
769 dev->name, __func__, __LINE__);
770
771 /* A buffer with no data will be dropped */
772 if (unlikely(skb->len <= 0)) {
773 vxge_debug_tx(VXGE_ERR,
774 "%s: Buffer has no data..", dev->name);
775 dev_kfree_skb(skb);
776 return NETDEV_TX_OK;
777 }
778
779 vdev = (struct vxgedev *)netdev_priv(dev);
780
781 if (unlikely(!is_vxge_card_up(vdev))) {
782 vxge_debug_tx(VXGE_ERR,
783 "%s: vdev not initialized", dev->name);
784 dev_kfree_skb(skb);
785 return NETDEV_TX_OK;
786 }
787
788 if (vdev->config.addr_learn_en) {
789 vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN);
790 if (vpath_no == -EPERM) {
791 vxge_debug_tx(VXGE_ERR,
792 "%s: Failed to store the mac address",
793 dev->name);
794 dev_kfree_skb(skb);
795 return NETDEV_TX_OK;
796 }
797 }
798
799 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
800 vpath_no = skb_get_queue_mapping(skb);
801 else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
98f45da2 802 vpath_no = vxge_get_vpath_no(vdev, skb);
703da5a1
RV
803
804 vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
805
806 if (vpath_no >= vdev->no_of_vpath)
807 vpath_no = 0;
808
809 fifo = &vdev->vpaths[vpath_no].fifo;
810 fifo_hw = fifo->handle;
811
98f45da2 812 if (netif_tx_queue_stopped(fifo->txq))
d03848e0 813 return NETDEV_TX_BUSY;
d03848e0 814
703da5a1
RV
815 avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
816 if (avail == 0) {
817 vxge_debug_tx(VXGE_ERR,
818 "%s: No free TXDs available", dev->name);
819 fifo->stats.txd_not_free++;
98f45da2 820 goto _exit0;
703da5a1
RV
821 }
822
4403b371
BL
823 /* Last TXD? Stop tx queue to avoid dropping packets. TX
824 * completion will resume the queue.
825 */
826 if (avail == 1)
98f45da2 827 netif_tx_stop_queue(fifo->txq);
4403b371 828
703da5a1
RV
829 status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
830 if (unlikely(status != VXGE_HW_OK)) {
831 vxge_debug_tx(VXGE_ERR,
832 "%s: Out of descriptors .", dev->name);
833 fifo->stats.txd_out_of_desc++;
98f45da2 834 goto _exit0;
703da5a1
RV
835 }
836
837 vxge_debug_tx(VXGE_TRACE,
838 "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
839 dev->name, __func__, __LINE__,
840 fifo_hw, dtr, dtr_priv);
841
eab6d18d 842 if (vlan_tx_tag_present(skb)) {
703da5a1
RV
843 u16 vlan_tag = vlan_tx_tag_get(skb);
844 vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
845 }
846
847 first_frg_len = skb_headlen(skb);
848
849 dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len,
850 PCI_DMA_TODEVICE);
851
852 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
853 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
703da5a1 854 fifo->stats.pci_map_fail++;
98f45da2 855 goto _exit0;
703da5a1
RV
856 }
857
858 txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
859 txdl_priv->skb = skb;
860 txdl_priv->dma_buffers[j] = dma_pointer;
861
862 frg_cnt = skb_shinfo(skb)->nr_frags;
863 vxge_debug_tx(VXGE_TRACE,
864 "%s: %s:%d skb = %p txdl_priv = %p "
865 "frag_cnt = %d dma_pointer = 0x%llx", dev->name,
866 __func__, __LINE__, skb, txdl_priv,
867 frg_cnt, (unsigned long long)dma_pointer);
868
869 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
870 first_frg_len);
871
872 frag = &skb_shinfo(skb)->frags[0];
873 for (i = 0; i < frg_cnt; i++) {
874 /* ignore 0 length fragment */
875 if (!frag->size)
876 continue;
877
98f45da2 878 dma_pointer = (u64) pci_map_page(fifo->pdev, frag->page,
703da5a1
RV
879 frag->page_offset, frag->size,
880 PCI_DMA_TODEVICE);
881
882 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer)))
98f45da2 883 goto _exit2;
703da5a1
RV
884 vxge_debug_tx(VXGE_TRACE,
885 "%s: %s:%d frag = %d dma_pointer = 0x%llx",
886 dev->name, __func__, __LINE__, i,
887 (unsigned long long)dma_pointer);
888
889 txdl_priv->dma_buffers[j] = dma_pointer;
890 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
891 frag->size);
892 frag += 1;
893 }
894
895 offload_type = vxge_offload_type(skb);
896
897 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
703da5a1
RV
898 int mss = vxge_tcp_mss(skb);
899 if (mss) {
98f45da2 900 vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d",
703da5a1
RV
901 dev->name, __func__, __LINE__, mss);
902 vxge_hw_fifo_txdl_mss_set(dtr, mss);
903 } else {
904 vxge_assert(skb->len <=
905 dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE);
906 vxge_assert(0);
907 goto _exit1;
908 }
909 }
910
911 if (skb->ip_summed == CHECKSUM_PARTIAL)
912 vxge_hw_fifo_txdl_cksum_set_bits(dtr,
913 VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN |
914 VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN |
915 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
916
917 vxge_hw_fifo_txdl_post(fifo_hw, dtr);
703da5a1 918
703da5a1
RV
919 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
920 dev->name, __func__, __LINE__);
6ed10654 921 return NETDEV_TX_OK;
703da5a1 922
98f45da2 923_exit2:
703da5a1 924 vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
703da5a1
RV
925_exit1:
926 j = 0;
927 frag = &skb_shinfo(skb)->frags[0];
928
929 pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++],
930 skb_headlen(skb), PCI_DMA_TODEVICE);
931
932 for (; j < i; j++) {
933 pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j],
934 frag->size, PCI_DMA_TODEVICE);
935 frag += 1;
936 }
937
938 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
98f45da2
JM
939_exit0:
940 netif_tx_stop_queue(fifo->txq);
703da5a1 941 dev_kfree_skb(skb);
703da5a1 942
6ed10654 943 return NETDEV_TX_OK;
703da5a1
RV
944}
945
946/*
947 * vxge_rx_term
948 *
949 * Function will be called by hw function to abort all outstanding receive
950 * descriptors.
951 */
952static void
953vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
954{
955 struct vxge_ring *ring = (struct vxge_ring *)userdata;
956 struct vxge_rx_priv *rx_priv =
957 vxge_hw_ring_rxd_private_get(dtrh);
958
959 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
960 ring->ndev->name, __func__, __LINE__);
961 if (state != VXGE_HW_RXD_STATE_POSTED)
962 return;
963
964 pci_unmap_single(ring->pdev, rx_priv->data_dma,
965 rx_priv->data_size, PCI_DMA_FROMDEVICE);
966
967 dev_kfree_skb(rx_priv->skb);
ea11bbe0 968 rx_priv->skb_data = NULL;
703da5a1
RV
969
970 vxge_debug_entryexit(VXGE_TRACE,
971 "%s: %s:%d Exiting...",
972 ring->ndev->name, __func__, __LINE__);
973}
974
975/*
976 * vxge_tx_term
977 *
978 * Function will be called to abort all outstanding tx descriptors
979 */
980static void
981vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
982{
983 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
984 skb_frag_t *frag;
985 int i = 0, j, frg_cnt;
986 struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh);
987 struct sk_buff *skb = txd_priv->skb;
988
989 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
990
991 if (state != VXGE_HW_TXDL_STATE_POSTED)
992 return;
993
994 /* check skb validity */
995 vxge_assert(skb);
996 frg_cnt = skb_shinfo(skb)->nr_frags;
997 frag = &skb_shinfo(skb)->frags[0];
998
999 /* for unfragmented skb */
1000 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
1001 skb_headlen(skb), PCI_DMA_TODEVICE);
1002
1003 for (j = 0; j < frg_cnt; j++) {
1004 pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++],
1005 frag->size, PCI_DMA_TODEVICE);
1006 frag += 1;
1007 }
1008
1009 dev_kfree_skb(skb);
1010
1011 vxge_debug_entryexit(VXGE_TRACE,
1012 "%s:%d Exiting...", __func__, __LINE__);
1013}
1014
1015/**
1016 * vxge_set_multicast
1017 * @dev: pointer to the device structure
1018 *
1019 * Entry point for multicast address enable/disable
1020 * This function is a driver entry point which gets called by the kernel
1021 * whenever multicast addresses must be enabled/disabled. This also gets
1022 * called to set/reset promiscuous mode. Depending on the deivce flag, we
1023 * determine, if multicast address must be enabled or if promiscuous mode
1024 * is to be disabled etc.
1025 */
1026static void vxge_set_multicast(struct net_device *dev)
1027{
22bedad3 1028 struct netdev_hw_addr *ha;
703da5a1
RV
1029 struct vxgedev *vdev;
1030 int i, mcast_cnt = 0;
7adf7d1b
JM
1031 struct __vxge_hw_device *hldev;
1032 struct vxge_vpath *vpath;
703da5a1
RV
1033 enum vxge_hw_status status = VXGE_HW_OK;
1034 struct macInfo mac_info;
1035 int vpath_idx = 0;
1036 struct vxge_mac_addrs *mac_entry;
1037 struct list_head *list_head;
1038 struct list_head *entry, *next;
1039 u8 *mac_address = NULL;
1040
1041 vxge_debug_entryexit(VXGE_TRACE,
1042 "%s:%d", __func__, __LINE__);
1043
1044 vdev = (struct vxgedev *)netdev_priv(dev);
1045 hldev = (struct __vxge_hw_device *)vdev->devh;
1046
1047 if (unlikely(!is_vxge_card_up(vdev)))
1048 return;
1049
1050 if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
1051 for (i = 0; i < vdev->no_of_vpath; i++) {
7adf7d1b
JM
1052 vpath = &vdev->vpaths[i];
1053 vxge_assert(vpath->is_open);
1054 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1055 if (status != VXGE_HW_OK)
1056 vxge_debug_init(VXGE_ERR, "failed to enable "
1057 "multicast, status %d", status);
703da5a1
RV
1058 vdev->all_multi_flg = 1;
1059 }
7adf7d1b 1060 } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
703da5a1 1061 for (i = 0; i < vdev->no_of_vpath; i++) {
7adf7d1b
JM
1062 vpath = &vdev->vpaths[i];
1063 vxge_assert(vpath->is_open);
1064 status = vxge_hw_vpath_mcast_disable(vpath->handle);
1065 if (status != VXGE_HW_OK)
1066 vxge_debug_init(VXGE_ERR, "failed to disable "
1067 "multicast, status %d", status);
1068 vdev->all_multi_flg = 0;
703da5a1
RV
1069 }
1070 }
1071
703da5a1
RV
1072
1073 if (!vdev->config.addr_learn_en) {
7adf7d1b
JM
1074 for (i = 0; i < vdev->no_of_vpath; i++) {
1075 vpath = &vdev->vpaths[i];
1076 vxge_assert(vpath->is_open);
1077
1078 if (dev->flags & IFF_PROMISC)
703da5a1 1079 status = vxge_hw_vpath_promisc_enable(
7adf7d1b
JM
1080 vpath->handle);
1081 else
703da5a1 1082 status = vxge_hw_vpath_promisc_disable(
7adf7d1b
JM
1083 vpath->handle);
1084 if (status != VXGE_HW_OK)
1085 vxge_debug_init(VXGE_ERR, "failed to %s promisc"
1086 ", status %d", dev->flags&IFF_PROMISC ?
1087 "enable" : "disable", status);
703da5a1
RV
1088 }
1089 }
1090
1091 memset(&mac_info, 0, sizeof(struct macInfo));
1092 /* Update individual M_CAST address list */
4cd24eaf 1093 if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
703da5a1
RV
1094 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1095 list_head = &vdev->vpaths[0].mac_addr_list;
4cd24eaf 1096 if ((netdev_mc_count(dev) +
703da5a1
RV
1097 (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
1098 vdev->vpaths[0].max_mac_addr_cnt)
1099 goto _set_all_mcast;
1100
1101 /* Delete previous MC's */
1102 for (i = 0; i < mcast_cnt; i++) {
703da5a1 1103 list_for_each_safe(entry, next, list_head) {
703da5a1
RV
1104 mac_entry = (struct vxge_mac_addrs *) entry;
1105 /* Copy the mac address to delete */
1106 mac_address = (u8 *)&mac_entry->macaddr;
1107 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1108
1109 /* Is this a multicast address */
1110 if (0x01 & mac_info.macaddr[0]) {
1111 for (vpath_idx = 0; vpath_idx <
1112 vdev->no_of_vpath;
1113 vpath_idx++) {
1114 mac_info.vpath_no = vpath_idx;
1115 status = vxge_del_mac_addr(
1116 vdev,
1117 &mac_info);
1118 }
1119 }
1120 }
1121 }
1122
1123 /* Add new ones */
22bedad3
JP
1124 netdev_for_each_mc_addr(ha, dev) {
1125 memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
703da5a1
RV
1126 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1127 vpath_idx++) {
1128 mac_info.vpath_no = vpath_idx;
1129 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1130 status = vxge_add_mac_addr(vdev, &mac_info);
1131 if (status != VXGE_HW_OK) {
1132 vxge_debug_init(VXGE_ERR,
1133 "%s:%d Setting individual"
1134 "multicast address failed",
1135 __func__, __LINE__);
1136 goto _set_all_mcast;
1137 }
1138 }
1139 }
1140
1141 return;
1142_set_all_mcast:
1143 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1144 /* Delete previous MC's */
1145 for (i = 0; i < mcast_cnt; i++) {
703da5a1 1146 list_for_each_safe(entry, next, list_head) {
703da5a1
RV
1147 mac_entry = (struct vxge_mac_addrs *) entry;
1148 /* Copy the mac address to delete */
1149 mac_address = (u8 *)&mac_entry->macaddr;
1150 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1151
1152 /* Is this a multicast address */
1153 if (0x01 & mac_info.macaddr[0])
1154 break;
1155 }
1156
1157 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1158 vpath_idx++) {
1159 mac_info.vpath_no = vpath_idx;
1160 status = vxge_del_mac_addr(vdev, &mac_info);
1161 }
1162 }
1163
1164 /* Enable all multicast */
1165 for (i = 0; i < vdev->no_of_vpath; i++) {
7adf7d1b
JM
1166 vpath = &vdev->vpaths[i];
1167 vxge_assert(vpath->is_open);
1168
1169 status = vxge_hw_vpath_mcast_enable(vpath->handle);
703da5a1
RV
1170 if (status != VXGE_HW_OK) {
1171 vxge_debug_init(VXGE_ERR,
1172 "%s:%d Enabling all multicasts failed",
1173 __func__, __LINE__);
1174 }
1175 vdev->all_multi_flg = 1;
1176 }
1177 dev->flags |= IFF_ALLMULTI;
1178 }
1179
1180 vxge_debug_entryexit(VXGE_TRACE,
1181 "%s:%d Exiting...", __func__, __LINE__);
1182}
1183
1184/**
1185 * vxge_set_mac_addr
1186 * @dev: pointer to the device structure
1187 *
1188 * Update entry "0" (default MAC addr)
1189 */
1190static int vxge_set_mac_addr(struct net_device *dev, void *p)
1191{
1192 struct sockaddr *addr = p;
1193 struct vxgedev *vdev;
1194 struct __vxge_hw_device *hldev;
1195 enum vxge_hw_status status = VXGE_HW_OK;
1196 struct macInfo mac_info_new, mac_info_old;
1197 int vpath_idx = 0;
1198
1199 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1200
1201 vdev = (struct vxgedev *)netdev_priv(dev);
1202 hldev = vdev->devh;
1203
1204 if (!is_valid_ether_addr(addr->sa_data))
1205 return -EINVAL;
1206
1207 memset(&mac_info_new, 0, sizeof(struct macInfo));
1208 memset(&mac_info_old, 0, sizeof(struct macInfo));
1209
1210 vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...",
1211 __func__, __LINE__);
1212
1213 /* Get the old address */
1214 memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len);
1215
1216 /* Copy the new address */
1217 memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len);
1218
1219 /* First delete the old mac address from all the vpaths
1220 as we can't specify the index while adding new mac address */
1221 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1222 struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx];
1223 if (!vpath->is_open) {
1224 /* This can happen when this interface is added/removed
1225 to the bonding interface. Delete this station address
1226 from the linked list */
1227 vxge_mac_list_del(vpath, &mac_info_old);
1228
1229 /* Add this new address to the linked list
1230 for later restoring */
1231 vxge_mac_list_add(vpath, &mac_info_new);
1232
1233 continue;
1234 }
1235 /* Delete the station address */
1236 mac_info_old.vpath_no = vpath_idx;
1237 status = vxge_del_mac_addr(vdev, &mac_info_old);
1238 }
1239
1240 if (unlikely(!is_vxge_card_up(vdev))) {
1241 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1242 return VXGE_HW_OK;
1243 }
1244
1245 /* Set this mac address to all the vpaths */
1246 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1247 mac_info_new.vpath_no = vpath_idx;
1248 mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1249 status = vxge_add_mac_addr(vdev, &mac_info_new);
1250 if (status != VXGE_HW_OK)
1251 return -EINVAL;
1252 }
1253
1254 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1255
1256 return status;
1257}
1258
1259/*
1260 * vxge_vpath_intr_enable
1261 * @vdev: pointer to vdev
1262 * @vp_id: vpath for which to enable the interrupts
1263 *
1264 * Enables the interrupts for the vpath
1265*/
42821a5b 1266static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
703da5a1
RV
1267{
1268 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
b59c9457
SH
1269 int msix_id = 0;
1270 int tim_msix_id[4] = {0, 1, 0, 0};
1271 int alarm_msix_id = VXGE_ALARM_MSIX_ID;
703da5a1
RV
1272
1273 vxge_hw_vpath_intr_enable(vpath->handle);
1274
1275 if (vdev->config.intr_type == INTA)
1276 vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
1277 else {
703da5a1
RV
1278 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
1279 alarm_msix_id);
1280
b59c9457 1281 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
703da5a1
RV
1282 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1283 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
1284
1285 /* enable the alarm vector */
b59c9457
SH
1286 msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1287 VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
1288 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
703da5a1
RV
1289 }
1290}
1291
1292/*
1293 * vxge_vpath_intr_disable
1294 * @vdev: pointer to vdev
1295 * @vp_id: vpath for which to disable the interrupts
1296 *
1297 * Disables the interrupts for the vpath
1298*/
42821a5b 1299static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
703da5a1
RV
1300{
1301 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
4d2a5b40 1302 struct __vxge_hw_device *hldev;
703da5a1
RV
1303 int msix_id;
1304
4d2a5b40
JM
1305 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
1306
1307 vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
1308
703da5a1
RV
1309 vxge_hw_vpath_intr_disable(vpath->handle);
1310
1311 if (vdev->config.intr_type == INTA)
1312 vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
1313 else {
b59c9457 1314 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
703da5a1
RV
1315 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1316 vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
1317
1318 /* disable the alarm vector */
b59c9457
SH
1319 msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1320 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
703da5a1
RV
1321 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1322 }
1323}
1324
1325/*
1326 * vxge_reset_vpath
1327 * @vdev: pointer to vdev
1328 * @vp_id: vpath to reset
1329 *
1330 * Resets the vpath
1331*/
1332static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1333{
1334 enum vxge_hw_status status = VXGE_HW_OK;
7adf7d1b 1335 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
703da5a1
RV
1336 int ret = 0;
1337
1338 /* check if device is down already */
1339 if (unlikely(!is_vxge_card_up(vdev)))
1340 return 0;
1341
1342 /* is device reset already scheduled */
1343 if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1344 return 0;
1345
7adf7d1b
JM
1346 if (vpath->handle) {
1347 if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
703da5a1 1348 if (is_vxge_card_up(vdev) &&
7adf7d1b 1349 vxge_hw_vpath_recover_from_reset(vpath->handle)
703da5a1
RV
1350 != VXGE_HW_OK) {
1351 vxge_debug_init(VXGE_ERR,
1352 "vxge_hw_vpath_recover_from_reset"
1353 "failed for vpath:%d", vp_id);
1354 return status;
1355 }
1356 } else {
1357 vxge_debug_init(VXGE_ERR,
1358 "vxge_hw_vpath_reset failed for"
1359 "vpath:%d", vp_id);
1360 return status;
1361 }
1362 } else
1363 return VXGE_HW_FAIL;
1364
7adf7d1b
JM
1365 vxge_restore_vpath_mac_addr(vpath);
1366 vxge_restore_vpath_vid_table(vpath);
703da5a1
RV
1367
1368 /* Enable all broadcast */
7adf7d1b
JM
1369 vxge_hw_vpath_bcast_enable(vpath->handle);
1370
1371 /* Enable all multicast */
1372 if (vdev->all_multi_flg) {
1373 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1374 if (status != VXGE_HW_OK)
1375 vxge_debug_init(VXGE_ERR,
1376 "%s:%d Enabling multicast failed",
1377 __func__, __LINE__);
1378 }
703da5a1
RV
1379
1380 /* Enable the interrupts */
1381 vxge_vpath_intr_enable(vdev, vp_id);
1382
1383 smp_wmb();
1384
1385 /* Enable the flow of traffic through the vpath */
7adf7d1b 1386 vxge_hw_vpath_enable(vpath->handle);
703da5a1
RV
1387
1388 smp_wmb();
7adf7d1b
JM
1389 vxge_hw_vpath_rx_doorbell_init(vpath->handle);
1390 vpath->ring.last_status = VXGE_HW_OK;
703da5a1
RV
1391
1392 /* Vpath reset done */
1393 clear_bit(vp_id, &vdev->vp_reset);
1394
1395 /* Start the vpath queue */
98f45da2
JM
1396 if (netif_tx_queue_stopped(vpath->fifo.txq))
1397 netif_tx_wake_queue(vpath->fifo.txq);
703da5a1
RV
1398
1399 return ret;
1400}
1401
1402static int do_vxge_reset(struct vxgedev *vdev, int event)
1403{
1404 enum vxge_hw_status status;
1405 int ret = 0, vp_id, i;
1406
1407 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1408
1409 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) {
1410 /* check if device is down already */
1411 if (unlikely(!is_vxge_card_up(vdev)))
1412 return 0;
1413
1414 /* is reset already scheduled */
1415 if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1416 return 0;
1417 }
1418
1419 if (event == VXGE_LL_FULL_RESET) {
1420 /* wait for all the vpath reset to complete */
1421 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1422 while (test_bit(vp_id, &vdev->vp_reset))
1423 msleep(50);
1424 }
1425
1426 /* if execution mode is set to debug, don't reset the adapter */
1427 if (unlikely(vdev->exec_mode)) {
1428 vxge_debug_init(VXGE_ERR,
1429 "%s: execution mode is debug, returning..",
1430 vdev->ndev->name);
7adf7d1b
JM
1431 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1432 netif_tx_stop_all_queues(vdev->ndev);
1433 return 0;
703da5a1
RV
1434 }
1435 }
1436
1437 if (event == VXGE_LL_FULL_RESET) {
4d2a5b40 1438 vxge_hw_device_wait_receive_idle(vdev->devh);
703da5a1
RV
1439 vxge_hw_device_intr_disable(vdev->devh);
1440
1441 switch (vdev->cric_err_event) {
1442 case VXGE_HW_EVENT_UNKNOWN:
d03848e0 1443 netif_tx_stop_all_queues(vdev->ndev);
703da5a1
RV
1444 vxge_debug_init(VXGE_ERR,
1445 "fatal: %s: Disabling device due to"
1446 "unknown error",
1447 vdev->ndev->name);
1448 ret = -EPERM;
1449 goto out;
1450 case VXGE_HW_EVENT_RESET_START:
1451 break;
1452 case VXGE_HW_EVENT_RESET_COMPLETE:
1453 case VXGE_HW_EVENT_LINK_DOWN:
1454 case VXGE_HW_EVENT_LINK_UP:
1455 case VXGE_HW_EVENT_ALARM_CLEARED:
1456 case VXGE_HW_EVENT_ECCERR:
1457 case VXGE_HW_EVENT_MRPCIM_ECCERR:
1458 ret = -EPERM;
1459 goto out;
1460 case VXGE_HW_EVENT_FIFO_ERR:
1461 case VXGE_HW_EVENT_VPATH_ERR:
1462 break;
1463 case VXGE_HW_EVENT_CRITICAL_ERR:
d03848e0 1464 netif_tx_stop_all_queues(vdev->ndev);
703da5a1
RV
1465 vxge_debug_init(VXGE_ERR,
1466 "fatal: %s: Disabling device due to"
1467 "serious error",
1468 vdev->ndev->name);
1469 /* SOP or device reset required */
1470 /* This event is not currently used */
1471 ret = -EPERM;
1472 goto out;
1473 case VXGE_HW_EVENT_SERR:
d03848e0 1474 netif_tx_stop_all_queues(vdev->ndev);
703da5a1
RV
1475 vxge_debug_init(VXGE_ERR,
1476 "fatal: %s: Disabling device due to"
1477 "serious error",
1478 vdev->ndev->name);
1479 ret = -EPERM;
1480 goto out;
1481 case VXGE_HW_EVENT_SRPCIM_SERR:
1482 case VXGE_HW_EVENT_MRPCIM_SERR:
1483 ret = -EPERM;
1484 goto out;
1485 case VXGE_HW_EVENT_SLOT_FREEZE:
d03848e0 1486 netif_tx_stop_all_queues(vdev->ndev);
703da5a1
RV
1487 vxge_debug_init(VXGE_ERR,
1488 "fatal: %s: Disabling device due to"
1489 "slot freeze",
1490 vdev->ndev->name);
1491 ret = -EPERM;
1492 goto out;
1493 default:
1494 break;
1495
1496 }
1497 }
1498
1499 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET))
d03848e0 1500 netif_tx_stop_all_queues(vdev->ndev);
703da5a1
RV
1501
1502 if (event == VXGE_LL_FULL_RESET) {
1503 status = vxge_reset_all_vpaths(vdev);
1504 if (status != VXGE_HW_OK) {
1505 vxge_debug_init(VXGE_ERR,
1506 "fatal: %s: can not reset vpaths",
1507 vdev->ndev->name);
1508 ret = -EPERM;
1509 goto out;
1510 }
1511 }
1512
1513 if (event == VXGE_LL_COMPL_RESET) {
1514 for (i = 0; i < vdev->no_of_vpath; i++)
1515 if (vdev->vpaths[i].handle) {
1516 if (vxge_hw_vpath_recover_from_reset(
1517 vdev->vpaths[i].handle)
1518 != VXGE_HW_OK) {
1519 vxge_debug_init(VXGE_ERR,
1520 "vxge_hw_vpath_recover_"
1521 "from_reset failed for vpath: "
1522 "%d", i);
1523 ret = -EPERM;
1524 goto out;
1525 }
1526 } else {
1527 vxge_debug_init(VXGE_ERR,
1528 "vxge_hw_vpath_reset failed for "
1529 "vpath:%d", i);
1530 ret = -EPERM;
1531 goto out;
1532 }
1533 }
1534
1535 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) {
1536 /* Reprogram the DA table with populated mac addresses */
1537 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1538 vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
1539 vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
1540 }
1541
1542 /* enable vpath interrupts */
1543 for (i = 0; i < vdev->no_of_vpath; i++)
1544 vxge_vpath_intr_enable(vdev, i);
1545
1546 vxge_hw_device_intr_enable(vdev->devh);
1547
1548 smp_wmb();
1549
1550 /* Indicate card up */
1551 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1552
1553 /* Get the traffic to flow through the vpaths */
1554 for (i = 0; i < vdev->no_of_vpath; i++) {
1555 vxge_hw_vpath_enable(vdev->vpaths[i].handle);
1556 smp_wmb();
1557 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
1558 }
1559
d03848e0 1560 netif_tx_wake_all_queues(vdev->ndev);
703da5a1
RV
1561 }
1562
1563out:
1564 vxge_debug_entryexit(VXGE_TRACE,
1565 "%s:%d Exiting...", __func__, __LINE__);
1566
1567 /* Indicate reset done */
1568 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET))
1569 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
1570 return ret;
1571}
1572
1573/*
1574 * vxge_reset
1575 * @vdev: pointer to ll device
1576 *
1577 * driver may reset the chip on events of serr, eccerr, etc
1578 */
42821a5b 1579static int vxge_reset(struct vxgedev *vdev)
703da5a1 1580{
7adf7d1b 1581 return do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
703da5a1
RV
1582}
1583
1584/**
1585 * vxge_poll - Receive handler when Receive Polling is used.
1586 * @dev: pointer to the device structure.
1587 * @budget: Number of packets budgeted to be processed in this iteration.
1588 *
1589 * This function comes into picture only if Receive side is being handled
1590 * through polling (called NAPI in linux). It mostly does what the normal
1591 * Rx interrupt handler does in terms of descriptor and packet processing
1592 * but not in an interrupt context. Also it will process a specified number
1593 * of packets at most in one iteration. This value is passed down by the
1594 * kernel as the function argument 'budget'.
1595 */
1596static int vxge_poll_msix(struct napi_struct *napi, int budget)
1597{
1598 struct vxge_ring *ring =
1599 container_of(napi, struct vxge_ring, napi);
1600 int budget_org = budget;
1601 ring->budget = budget;
1602
1603 vxge_hw_vpath_poll_rx(ring->handle);
1604
1605 if (ring->pkts_processed < budget_org) {
1606 napi_complete(napi);
1607 /* Re enable the Rx interrupts for the vpath */
1608 vxge_hw_channel_msix_unmask(
1609 (struct __vxge_hw_channel *)ring->handle,
1610 ring->rx_vector_no);
1611 }
1612
1613 return ring->pkts_processed;
1614}
1615
1616static int vxge_poll_inta(struct napi_struct *napi, int budget)
1617{
1618 struct vxgedev *vdev = container_of(napi, struct vxgedev, napi);
1619 int pkts_processed = 0;
1620 int i;
1621 int budget_org = budget;
1622 struct vxge_ring *ring;
1623
1624 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
1625 pci_get_drvdata(vdev->pdev);
1626
1627 for (i = 0; i < vdev->no_of_vpath; i++) {
1628 ring = &vdev->vpaths[i].ring;
1629 ring->budget = budget;
1630 vxge_hw_vpath_poll_rx(ring->handle);
1631 pkts_processed += ring->pkts_processed;
1632 budget -= ring->pkts_processed;
1633 if (budget <= 0)
1634 break;
1635 }
1636
1637 VXGE_COMPLETE_ALL_TX(vdev);
1638
1639 if (pkts_processed < budget_org) {
1640 napi_complete(napi);
1641 /* Re enable the Rx interrupts for the ring */
1642 vxge_hw_device_unmask_all(hldev);
1643 vxge_hw_device_flush_io(hldev);
1644 }
1645
1646 return pkts_processed;
1647}
1648
1649#ifdef CONFIG_NET_POLL_CONTROLLER
1650/**
1651 * vxge_netpoll - netpoll event handler entry point
1652 * @dev : pointer to the device structure.
1653 * Description:
1654 * This function will be called by upper layer to check for events on the
1655 * interface in situations where interrupts are disabled. It is used for
1656 * specific in-kernel networking tasks, such as remote consoles and kernel
1657 * debugging over the network (example netdump in RedHat).
1658 */
1659static void vxge_netpoll(struct net_device *dev)
1660{
1661 struct __vxge_hw_device *hldev;
1662 struct vxgedev *vdev;
1663
1664 vdev = (struct vxgedev *)netdev_priv(dev);
1665 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
1666
1667 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1668
1669 if (pci_channel_offline(vdev->pdev))
1670 return;
1671
1672 disable_irq(dev->irq);
1673 vxge_hw_device_clear_tx_rx(hldev);
1674
1675 vxge_hw_device_clear_tx_rx(hldev);
1676 VXGE_COMPLETE_ALL_RX(vdev);
1677 VXGE_COMPLETE_ALL_TX(vdev);
1678
1679 enable_irq(dev->irq);
1680
1681 vxge_debug_entryexit(VXGE_TRACE,
1682 "%s:%d Exiting...", __func__, __LINE__);
703da5a1
RV
1683}
1684#endif
1685
1686/* RTH configuration */
1687static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1688{
1689 enum vxge_hw_status status = VXGE_HW_OK;
1690 struct vxge_hw_rth_hash_types hash_types;
1691 u8 itable[256] = {0}; /* indirection table */
1692 u8 mtable[256] = {0}; /* CPU to vpath mapping */
1693 int index;
1694
1695 /*
1696 * Filling
1697 * - itable with bucket numbers
1698 * - mtable with bucket-to-vpath mapping
1699 */
1700 for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) {
1701 itable[index] = index;
1702 mtable[index] = index % vdev->no_of_vpath;
1703 }
1704
703da5a1
RV
1705 /* set indirection table, bucket-to-vpath mapping */
1706 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
1707 vdev->no_of_vpath,
1708 mtable, itable,
1709 vdev->config.rth_bkt_sz);
1710 if (status != VXGE_HW_OK) {
1711 vxge_debug_init(VXGE_ERR,
1712 "RTH indirection table configuration failed "
1713 "for vpath:%d", vdev->vpaths[0].device_id);
1714 return status;
1715 }
1716
47f01db4
JM
1717 /* Fill RTH hash types */
1718 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1719 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1720 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1721 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1722 hash_types.hash_type_tcpipv6ex_en =
1723 vdev->config.rth_hash_type_tcpipv6ex;
1724 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1725
703da5a1 1726 /*
47f01db4
JM
1727 * Because the itable_set() method uses the active_table field
1728 * for the target virtual path the RTH config should be updated
1729 * for all VPATHs. The h/w only uses the lowest numbered VPATH
1730 * when steering frames.
1731 */
703da5a1
RV
1732 for (index = 0; index < vdev->no_of_vpath; index++) {
1733 status = vxge_hw_vpath_rts_rth_set(
1734 vdev->vpaths[index].handle,
1735 vdev->config.rth_algorithm,
1736 &hash_types,
1737 vdev->config.rth_bkt_sz);
1738
1739 if (status != VXGE_HW_OK) {
1740 vxge_debug_init(VXGE_ERR,
1741 "RTH configuration failed for vpath:%d",
1742 vdev->vpaths[index].device_id);
1743 return status;
1744 }
1745 }
1746
1747 return status;
1748}
1749
42821a5b 1750static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
703da5a1
RV
1751{
1752 struct vxge_mac_addrs *new_mac_entry;
1753 u8 *mac_address = NULL;
1754
1755 if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
1756 return TRUE;
1757
1758 new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
1759 if (!new_mac_entry) {
1760 vxge_debug_mem(VXGE_ERR,
1761 "%s: memory allocation failed",
1762 VXGE_DRIVER_NAME);
1763 return FALSE;
1764 }
1765
1766 list_add(&new_mac_entry->item, &vpath->mac_addr_list);
1767
1768 /* Copy the new mac address to the list */
1769 mac_address = (u8 *)&new_mac_entry->macaddr;
1770 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1771
1772 new_mac_entry->state = mac->state;
1773 vpath->mac_addr_cnt++;
1774
1775 /* Is this a multicast address */
1776 if (0x01 & mac->macaddr[0])
1777 vpath->mcast_addr_cnt++;
1778
1779 return TRUE;
1780}
1781
1782/* Add a mac address to DA table */
42821a5b 1783static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
1784 struct macInfo *mac)
703da5a1
RV
1785{
1786 enum vxge_hw_status status = VXGE_HW_OK;
1787 struct vxge_vpath *vpath;
1788 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
1789
1790 if (0x01 & mac->macaddr[0]) /* multicast address */
1791 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
1792 else
1793 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
1794
1795 vpath = &vdev->vpaths[mac->vpath_no];
1796 status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
1797 mac->macmask, duplicate_mode);
1798 if (status != VXGE_HW_OK) {
1799 vxge_debug_init(VXGE_ERR,
1800 "DA config add entry failed for vpath:%d",
1801 vpath->device_id);
1802 } else
1803 if (FALSE == vxge_mac_list_add(vpath, mac))
1804 status = -EPERM;
1805
1806 return status;
1807}
1808
42821a5b 1809static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
703da5a1
RV
1810{
1811 struct list_head *entry, *next;
1812 u64 del_mac = 0;
1813 u8 *mac_address = (u8 *) (&del_mac);
1814
1815 /* Copy the mac address to delete from the list */
1816 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1817
1818 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1819 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
1820 list_del(entry);
1821 kfree((struct vxge_mac_addrs *)entry);
1822 vpath->mac_addr_cnt--;
1823
1824 /* Is this a multicast address */
1825 if (0x01 & mac->macaddr[0])
1826 vpath->mcast_addr_cnt--;
1827 return TRUE;
1828 }
1829 }
1830
1831 return FALSE;
1832}
1833/* delete a mac address from DA table */
42821a5b 1834static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
1835 struct macInfo *mac)
703da5a1
RV
1836{
1837 enum vxge_hw_status status = VXGE_HW_OK;
1838 struct vxge_vpath *vpath;
1839
1840 vpath = &vdev->vpaths[mac->vpath_no];
1841 status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
1842 mac->macmask);
1843 if (status != VXGE_HW_OK) {
1844 vxge_debug_init(VXGE_ERR,
1845 "DA config delete entry failed for vpath:%d",
1846 vpath->device_id);
1847 } else
1848 vxge_mac_list_del(vpath, mac);
1849 return status;
1850}
1851
1852/* list all mac addresses from DA table */
1853enum vxge_hw_status
1854static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
1855 struct macInfo *mac)
1856{
1857 enum vxge_hw_status status = VXGE_HW_OK;
1858 unsigned char macmask[ETH_ALEN];
1859 unsigned char macaddr[ETH_ALEN];
1860
1861 status = vxge_hw_vpath_mac_addr_get(vpath->handle,
1862 macaddr, macmask);
1863 if (status != VXGE_HW_OK) {
1864 vxge_debug_init(VXGE_ERR,
1865 "DA config list entry failed for vpath:%d",
1866 vpath->device_id);
1867 return status;
1868 }
1869
1870 while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
1871
1872 status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
1873 macaddr, macmask);
1874 if (status != VXGE_HW_OK)
1875 break;
1876 }
1877
1878 return status;
1879}
1880
1881/* Store all vlan ids from the list to the vid table */
42821a5b 1882static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
703da5a1
RV
1883{
1884 enum vxge_hw_status status = VXGE_HW_OK;
1885 struct vxgedev *vdev = vpath->vdev;
1886 u16 vid;
1887
1888 if (vdev->vlgrp && vpath->is_open) {
1889
b738127d 1890 for (vid = 0; vid < VLAN_N_VID; vid++) {
703da5a1
RV
1891 if (!vlan_group_get_device(vdev->vlgrp, vid))
1892 continue;
1893 /* Add these vlan to the vid table */
1894 status = vxge_hw_vpath_vid_add(vpath->handle, vid);
1895 }
1896 }
1897
1898 return status;
1899}
1900
1901/* Store all mac addresses from the list to the DA table */
42821a5b 1902static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
703da5a1
RV
1903{
1904 enum vxge_hw_status status = VXGE_HW_OK;
1905 struct macInfo mac_info;
1906 u8 *mac_address = NULL;
1907 struct list_head *entry, *next;
1908
1909 memset(&mac_info, 0, sizeof(struct macInfo));
1910
1911 if (vpath->is_open) {
1912
1913 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1914 mac_address =
1915 (u8 *)&
1916 ((struct vxge_mac_addrs *)entry)->macaddr;
1917 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1918 ((struct vxge_mac_addrs *)entry)->state =
1919 VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1920 /* does this mac address already exist in da table? */
1921 status = vxge_search_mac_addr_in_da_table(vpath,
1922 &mac_info);
1923 if (status != VXGE_HW_OK) {
1924 /* Add this mac address to the DA table */
1925 status = vxge_hw_vpath_mac_addr_add(
1926 vpath->handle, mac_info.macaddr,
1927 mac_info.macmask,
1928 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
1929 if (status != VXGE_HW_OK) {
1930 vxge_debug_init(VXGE_ERR,
1931 "DA add entry failed for vpath:%d",
1932 vpath->device_id);
1933 ((struct vxge_mac_addrs *)entry)->state
1934 = VXGE_LL_MAC_ADDR_IN_LIST;
1935 }
1936 }
1937 }
1938 }
1939
1940 return status;
1941}
1942
1943/* reset vpaths */
4d2a5b40 1944enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
703da5a1 1945{
703da5a1 1946 enum vxge_hw_status status = VXGE_HW_OK;
7adf7d1b
JM
1947 struct vxge_vpath *vpath;
1948 int i;
703da5a1 1949
7adf7d1b
JM
1950 for (i = 0; i < vdev->no_of_vpath; i++) {
1951 vpath = &vdev->vpaths[i];
1952 if (vpath->handle) {
1953 if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
703da5a1
RV
1954 if (is_vxge_card_up(vdev) &&
1955 vxge_hw_vpath_recover_from_reset(
7adf7d1b 1956 vpath->handle) != VXGE_HW_OK) {
703da5a1
RV
1957 vxge_debug_init(VXGE_ERR,
1958 "vxge_hw_vpath_recover_"
1959 "from_reset failed for vpath: "
1960 "%d", i);
1961 return status;
1962 }
1963 } else {
1964 vxge_debug_init(VXGE_ERR,
1965 "vxge_hw_vpath_reset failed for "
1966 "vpath:%d", i);
1967 return status;
1968 }
1969 }
7adf7d1b
JM
1970 }
1971
703da5a1
RV
1972 return status;
1973}
1974
1975/* close vpaths */
42821a5b 1976static void vxge_close_vpaths(struct vxgedev *vdev, int index)
703da5a1 1977{
7adf7d1b 1978 struct vxge_vpath *vpath;
703da5a1 1979 int i;
7adf7d1b 1980
703da5a1 1981 for (i = index; i < vdev->no_of_vpath; i++) {
7adf7d1b
JM
1982 vpath = &vdev->vpaths[i];
1983
1984 if (vpath->handle && vpath->is_open) {
1985 vxge_hw_vpath_close(vpath->handle);
703da5a1
RV
1986 vdev->stats.vpaths_open--;
1987 }
7adf7d1b
JM
1988 vpath->is_open = 0;
1989 vpath->handle = NULL;
703da5a1
RV
1990 }
1991}
1992
1993/* open vpaths */
42821a5b 1994static int vxge_open_vpaths(struct vxgedev *vdev)
703da5a1 1995{
7adf7d1b 1996 struct vxge_hw_vpath_attr attr;
703da5a1 1997 enum vxge_hw_status status;
7adf7d1b 1998 struct vxge_vpath *vpath;
703da5a1 1999 u32 vp_id = 0;
7adf7d1b 2000 int i;
703da5a1
RV
2001
2002 for (i = 0; i < vdev->no_of_vpath; i++) {
7adf7d1b
JM
2003 vpath = &vdev->vpaths[i];
2004
2005 vxge_assert(vpath->is_configured);
2006 attr.vp_id = vpath->device_id;
703da5a1
RV
2007 attr.fifo_attr.callback = vxge_xmit_compl;
2008 attr.fifo_attr.txdl_term = vxge_tx_term;
2009 attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
7adf7d1b 2010 attr.fifo_attr.userdata = &vpath->fifo;
703da5a1
RV
2011
2012 attr.ring_attr.callback = vxge_rx_1b_compl;
2013 attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
2014 attr.ring_attr.rxd_term = vxge_rx_term;
2015 attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
7adf7d1b 2016 attr.ring_attr.userdata = &vpath->ring;
703da5a1 2017
7adf7d1b
JM
2018 vpath->ring.ndev = vdev->ndev;
2019 vpath->ring.pdev = vdev->pdev;
2020 status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
703da5a1 2021 if (status == VXGE_HW_OK) {
7adf7d1b 2022 vpath->fifo.handle =
703da5a1 2023 (struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
7adf7d1b 2024 vpath->ring.handle =
703da5a1 2025 (struct __vxge_hw_ring *)attr.ring_attr.userdata;
7adf7d1b 2026 vpath->fifo.tx_steering_type =
703da5a1 2027 vdev->config.tx_steering_type;
7adf7d1b
JM
2028 vpath->fifo.ndev = vdev->ndev;
2029 vpath->fifo.pdev = vdev->pdev;
98f45da2
JM
2030 if (vdev->config.tx_steering_type)
2031 vpath->fifo.txq =
2032 netdev_get_tx_queue(vdev->ndev, i);
2033 else
2034 vpath->fifo.txq =
2035 netdev_get_tx_queue(vdev->ndev, 0);
7adf7d1b 2036 vpath->fifo.indicate_max_pkts =
703da5a1 2037 vdev->config.fifo_indicate_max_pkts;
7adf7d1b
JM
2038 vpath->ring.rx_vector_no = 0;
2039 vpath->ring.rx_csum = vdev->rx_csum;
2040 vpath->is_open = 1;
2041 vdev->vp_handles[i] = vpath->handle;
2042 vpath->ring.gro_enable = vdev->config.gro_enable;
2043 vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
703da5a1
RV
2044 vdev->stats.vpaths_open++;
2045 } else {
2046 vdev->stats.vpath_open_fail++;
2047 vxge_debug_init(VXGE_ERR,
2048 "%s: vpath: %d failed to open "
2049 "with status: %d",
7adf7d1b 2050 vdev->ndev->name, vpath->device_id,
703da5a1
RV
2051 status);
2052 vxge_close_vpaths(vdev, 0);
2053 return -EPERM;
2054 }
2055
7adf7d1b 2056 vp_id = vpath->handle->vpath->vp_id;
703da5a1
RV
2057 vdev->vpaths_deployed |= vxge_mBIT(vp_id);
2058 }
2059 return VXGE_HW_OK;
2060}
2061
2062/*
2063 * vxge_isr_napi
2064 * @irq: the irq of the device.
2065 * @dev_id: a void pointer to the hldev structure of the Titan device
2066 * @ptregs: pointer to the registers pushed on the stack.
2067 *
2068 * This function is the ISR handler of the device when napi is enabled. It
2069 * identifies the reason for the interrupt and calls the relevant service
2070 * routines.
2071 */
2072static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2073{
703da5a1 2074 struct net_device *dev;
a5d165b5 2075 struct __vxge_hw_device *hldev;
703da5a1
RV
2076 u64 reason;
2077 enum vxge_hw_status status;
a5d165b5 2078 struct vxgedev *vdev = (struct vxgedev *) dev_id;;
703da5a1
RV
2079
2080 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
2081
a5d165b5
SH
2082 dev = vdev->ndev;
2083 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
703da5a1
RV
2084
2085 if (pci_channel_offline(vdev->pdev))
2086 return IRQ_NONE;
2087
2088 if (unlikely(!is_vxge_card_up(vdev)))
4d2a5b40 2089 return IRQ_HANDLED;
703da5a1
RV
2090
2091 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode,
2092 &reason);
2093 if (status == VXGE_HW_OK) {
2094 vxge_hw_device_mask_all(hldev);
2095
2096 if (reason &
2097 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
2098 vdev->vpaths_deployed >>
2099 (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) {
2100
2101 vxge_hw_device_clear_tx_rx(hldev);
2102 napi_schedule(&vdev->napi);
2103 vxge_debug_intr(VXGE_TRACE,
2104 "%s:%d Exiting...", __func__, __LINE__);
2105 return IRQ_HANDLED;
2106 } else
2107 vxge_hw_device_unmask_all(hldev);
2108 } else if (unlikely((status == VXGE_HW_ERR_VPATH) ||
2109 (status == VXGE_HW_ERR_CRITICAL) ||
2110 (status == VXGE_HW_ERR_FIFO))) {
2111 vxge_hw_device_mask_all(hldev);
2112 vxge_hw_device_flush_io(hldev);
2113 return IRQ_HANDLED;
2114 } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE))
2115 return IRQ_HANDLED;
2116
2117 vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__);
2118 return IRQ_NONE;
2119}
2120
2121#ifdef CONFIG_PCI_MSI
2122
2123static irqreturn_t
2124vxge_tx_msix_handle(int irq, void *dev_id)
2125{
2126 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
2127
2128 VXGE_COMPLETE_VPATH_TX(fifo);
2129
2130 return IRQ_HANDLED;
2131}
2132
2133static irqreturn_t
2134vxge_rx_msix_napi_handle(int irq, void *dev_id)
2135{
2136 struct vxge_ring *ring = (struct vxge_ring *)dev_id;
2137
2138 /* MSIX_IDX for Rx is 1 */
2139 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
2140 ring->rx_vector_no);
2141
2142 napi_schedule(&ring->napi);
2143 return IRQ_HANDLED;
2144}
2145
2146static irqreturn_t
2147vxge_alarm_msix_handle(int irq, void *dev_id)
2148{
2149 int i;
2150 enum vxge_hw_status status;
2151 struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
2152 struct vxgedev *vdev = vpath->vdev;
b59c9457
SH
2153 int msix_id = (vpath->handle->vpath->vp_id *
2154 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
703da5a1
RV
2155
2156 for (i = 0; i < vdev->no_of_vpath; i++) {
b59c9457 2157 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
703da5a1
RV
2158
2159 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2160 vdev->exec_mode);
2161 if (status == VXGE_HW_OK) {
2162
2163 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
b59c9457 2164 msix_id);
703da5a1
RV
2165 continue;
2166 }
2167 vxge_debug_intr(VXGE_ERR,
2168 "%s: vxge_hw_vpath_alarm_process failed %x ",
2169 VXGE_DRIVER_NAME, status);
2170 }
2171 return IRQ_HANDLED;
2172}
2173
2174static int vxge_alloc_msix(struct vxgedev *vdev)
2175{
2176 int j, i, ret = 0;
b59c9457 2177 int msix_intr_vect = 0, temp;
703da5a1
RV
2178 vdev->intr_cnt = 0;
2179
b59c9457 2180start:
703da5a1
RV
2181 /* Tx/Rx MSIX Vectors count */
2182 vdev->intr_cnt = vdev->no_of_vpath * 2;
2183
2184 /* Alarm MSIX Vectors count */
2185 vdev->intr_cnt++;
2186
baeb2ffa
JP
2187 vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry),
2188 GFP_KERNEL);
703da5a1
RV
2189 if (!vdev->entries) {
2190 vxge_debug_init(VXGE_ERR,
2191 "%s: memory allocation failed",
2192 VXGE_DRIVER_NAME);
cc413d90
MS
2193 ret = -ENOMEM;
2194 goto alloc_entries_failed;
703da5a1
RV
2195 }
2196
baeb2ffa
JP
2197 vdev->vxge_entries = kcalloc(vdev->intr_cnt,
2198 sizeof(struct vxge_msix_entry),
2199 GFP_KERNEL);
703da5a1
RV
2200 if (!vdev->vxge_entries) {
2201 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
2202 VXGE_DRIVER_NAME);
cc413d90
MS
2203 ret = -ENOMEM;
2204 goto alloc_vxge_entries_failed;
703da5a1
RV
2205 }
2206
b59c9457 2207 for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
703da5a1
RV
2208
2209 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
2210
2211 /* Initialize the fifo vector */
2212 vdev->entries[j].entry = msix_intr_vect;
2213 vdev->vxge_entries[j].entry = msix_intr_vect;
2214 vdev->vxge_entries[j].in_use = 0;
2215 j++;
2216
2217 /* Initialize the ring vector */
2218 vdev->entries[j].entry = msix_intr_vect + 1;
2219 vdev->vxge_entries[j].entry = msix_intr_vect + 1;
2220 vdev->vxge_entries[j].in_use = 0;
2221 j++;
2222 }
2223
2224 /* Initialize the alarm vector */
b59c9457
SH
2225 vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
2226 vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
703da5a1
RV
2227 vdev->vxge_entries[j].in_use = 0;
2228
b59c9457 2229 ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
b59c9457 2230 if (ret > 0) {
703da5a1
RV
2231 vxge_debug_init(VXGE_ERR,
2232 "%s: MSI-X enable failed for %d vectors, ret: %d",
b59c9457 2233 VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
cc413d90
MS
2234 if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) {
2235 ret = -ENODEV;
2236 goto enable_msix_failed;
2237 }
2238
703da5a1
RV
2239 kfree(vdev->entries);
2240 kfree(vdev->vxge_entries);
2241 vdev->entries = NULL;
2242 vdev->vxge_entries = NULL;
b59c9457
SH
2243 /* Try with less no of vector by reducing no of vpaths count */
2244 temp = (ret - 1)/2;
2245 vxge_close_vpaths(vdev, temp);
2246 vdev->no_of_vpath = temp;
2247 goto start;
cc413d90
MS
2248 } else if (ret < 0) {
2249 ret = -ENODEV;
2250 goto enable_msix_failed;
2251 }
703da5a1 2252 return 0;
cc413d90
MS
2253
2254enable_msix_failed:
2255 kfree(vdev->vxge_entries);
2256alloc_vxge_entries_failed:
2257 kfree(vdev->entries);
2258alloc_entries_failed:
2259 return ret;
703da5a1
RV
2260}
2261
2262static int vxge_enable_msix(struct vxgedev *vdev)
2263{
2264
2265 int i, ret = 0;
703da5a1 2266 /* 0 - Tx, 1 - Rx */
b59c9457
SH
2267 int tim_msix_id[4] = {0, 1, 0, 0};
2268
703da5a1
RV
2269 vdev->intr_cnt = 0;
2270
2271 /* allocate msix vectors */
2272 ret = vxge_alloc_msix(vdev);
2273 if (!ret) {
703da5a1 2274 for (i = 0; i < vdev->no_of_vpath; i++) {
7adf7d1b 2275 struct vxge_vpath *vpath = &vdev->vpaths[i];
703da5a1 2276
7adf7d1b
JM
2277 /* If fifo or ring are not enabled, the MSIX vector for
2278 * it should be set to 0.
2279 */
2280 vpath->ring.rx_vector_no = (vpath->device_id *
2281 VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
703da5a1 2282
7adf7d1b
JM
2283 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
2284 VXGE_ALARM_MSIX_ID);
703da5a1
RV
2285 }
2286 }
2287
2288 return ret;
2289}
2290
2291static void vxge_rem_msix_isr(struct vxgedev *vdev)
2292{
2293 int intr_cnt;
2294
b59c9457 2295 for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
703da5a1
RV
2296 intr_cnt++) {
2297 if (vdev->vxge_entries[intr_cnt].in_use) {
2298 synchronize_irq(vdev->entries[intr_cnt].vector);
2299 free_irq(vdev->entries[intr_cnt].vector,
2300 vdev->vxge_entries[intr_cnt].arg);
2301 vdev->vxge_entries[intr_cnt].in_use = 0;
2302 }
2303 }
2304
2305 kfree(vdev->entries);
2306 kfree(vdev->vxge_entries);
2307 vdev->entries = NULL;
2308 vdev->vxge_entries = NULL;
2309
2310 if (vdev->config.intr_type == MSI_X)
2311 pci_disable_msix(vdev->pdev);
2312}
2313#endif
2314
2315static void vxge_rem_isr(struct vxgedev *vdev)
2316{
2317 struct __vxge_hw_device *hldev;
2318 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
2319
2320#ifdef CONFIG_PCI_MSI
2321 if (vdev->config.intr_type == MSI_X) {
2322 vxge_rem_msix_isr(vdev);
2323 } else
2324#endif
2325 if (vdev->config.intr_type == INTA) {
2326 synchronize_irq(vdev->pdev->irq);
a5d165b5 2327 free_irq(vdev->pdev->irq, vdev);
703da5a1
RV
2328 }
2329}
2330
2331static int vxge_add_isr(struct vxgedev *vdev)
2332{
2333 int ret = 0;
703da5a1
RV
2334#ifdef CONFIG_PCI_MSI
2335 int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
703da5a1
RV
2336 int pci_fun = PCI_FUNC(vdev->pdev->devfn);
2337
2338 if (vdev->config.intr_type == MSI_X)
2339 ret = vxge_enable_msix(vdev);
2340
2341 if (ret) {
2342 vxge_debug_init(VXGE_ERR,
2343 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
eb5f10c2
SH
2344 vxge_debug_init(VXGE_ERR,
2345 "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
2346 vdev->config.intr_type = INTA;
703da5a1
RV
2347 }
2348
2349 if (vdev->config.intr_type == MSI_X) {
2350 for (intr_idx = 0;
2351 intr_idx < (vdev->no_of_vpath *
2352 VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
2353
2354 msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE;
2355 irq_req = 0;
2356
2357 switch (msix_idx) {
2358 case 0:
2359 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
b59c9457
SH
2360 "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
2361 vdev->ndev->name,
2362 vdev->entries[intr_cnt].entry,
2363 pci_fun, vp_idx);
703da5a1
RV
2364 ret = request_irq(
2365 vdev->entries[intr_cnt].vector,
2366 vxge_tx_msix_handle, 0,
2367 vdev->desc[intr_cnt],
2368 &vdev->vpaths[vp_idx].fifo);
2369 vdev->vxge_entries[intr_cnt].arg =
2370 &vdev->vpaths[vp_idx].fifo;
2371 irq_req = 1;
2372 break;
2373 case 1:
2374 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
b59c9457
SH
2375 "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
2376 vdev->ndev->name,
2377 vdev->entries[intr_cnt].entry,
2378 pci_fun, vp_idx);
703da5a1
RV
2379 ret = request_irq(
2380 vdev->entries[intr_cnt].vector,
2381 vxge_rx_msix_napi_handle,
2382 0,
2383 vdev->desc[intr_cnt],
2384 &vdev->vpaths[vp_idx].ring);
2385 vdev->vxge_entries[intr_cnt].arg =
2386 &vdev->vpaths[vp_idx].ring;
2387 irq_req = 1;
2388 break;
2389 }
2390
2391 if (ret) {
2392 vxge_debug_init(VXGE_ERR,
2393 "%s: MSIX - %d Registration failed",
2394 vdev->ndev->name, intr_cnt);
2395 vxge_rem_msix_isr(vdev);
eb5f10c2
SH
2396 vdev->config.intr_type = INTA;
2397 vxge_debug_init(VXGE_ERR,
2398 "%s: Defaulting to INTA"
2399 , vdev->ndev->name);
703da5a1 2400 goto INTA_MODE;
703da5a1
RV
2401 }
2402
2403 if (irq_req) {
2404 /* We requested for this msix interrupt */
2405 vdev->vxge_entries[intr_cnt].in_use = 1;
b59c9457
SH
2406 msix_idx += vdev->vpaths[vp_idx].device_id *
2407 VXGE_HW_VPATH_MSIX_ACTIVE;
703da5a1
RV
2408 vxge_hw_vpath_msix_unmask(
2409 vdev->vpaths[vp_idx].handle,
b59c9457 2410 msix_idx);
703da5a1
RV
2411 intr_cnt++;
2412 }
2413
2414 /* Point to next vpath handler */
8e95a202
JP
2415 if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) &&
2416 (vp_idx < (vdev->no_of_vpath - 1)))
2417 vp_idx++;
703da5a1
RV
2418 }
2419
b59c9457 2420 intr_cnt = vdev->no_of_vpath * 2;
703da5a1 2421 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
b59c9457
SH
2422 "%s:vxge:MSI-X %d - Alarm - fn:%d",
2423 vdev->ndev->name,
2424 vdev->entries[intr_cnt].entry,
2425 pci_fun);
703da5a1
RV
2426 /* For Alarm interrupts */
2427 ret = request_irq(vdev->entries[intr_cnt].vector,
2428 vxge_alarm_msix_handle, 0,
2429 vdev->desc[intr_cnt],
b59c9457 2430 &vdev->vpaths[0]);
703da5a1
RV
2431 if (ret) {
2432 vxge_debug_init(VXGE_ERR,
2433 "%s: MSIX - %d Registration failed",
2434 vdev->ndev->name, intr_cnt);
2435 vxge_rem_msix_isr(vdev);
eb5f10c2
SH
2436 vdev->config.intr_type = INTA;
2437 vxge_debug_init(VXGE_ERR,
2438 "%s: Defaulting to INTA",
2439 vdev->ndev->name);
703da5a1 2440 goto INTA_MODE;
703da5a1
RV
2441 }
2442
b59c9457
SH
2443 msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
2444 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
703da5a1 2445 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
b59c9457 2446 msix_idx);
703da5a1 2447 vdev->vxge_entries[intr_cnt].in_use = 1;
b59c9457 2448 vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
703da5a1
RV
2449 }
2450INTA_MODE:
2451#endif
703da5a1
RV
2452
2453 if (vdev->config.intr_type == INTA) {
b59c9457
SH
2454 snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
2455 "%s:vxge:INTA", vdev->ndev->name);
eb5f10c2
SH
2456 vxge_hw_device_set_intr_type(vdev->devh,
2457 VXGE_HW_INTR_MODE_IRQLINE);
2458 vxge_hw_vpath_tti_ci_set(vdev->devh,
2459 vdev->vpaths[0].device_id);
703da5a1
RV
2460 ret = request_irq((int) vdev->pdev->irq,
2461 vxge_isr_napi,
a5d165b5 2462 IRQF_SHARED, vdev->desc[0], vdev);
703da5a1
RV
2463 if (ret) {
2464 vxge_debug_init(VXGE_ERR,
2465 "%s %s-%d: ISR registration failed",
2466 VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq);
2467 return -ENODEV;
2468 }
2469 vxge_debug_init(VXGE_TRACE,
2470 "new %s-%d line allocated",
2471 "IRQ", vdev->pdev->irq);
2472 }
2473
2474 return VXGE_HW_OK;
2475}
2476
2477static void vxge_poll_vp_reset(unsigned long data)
2478{
2479 struct vxgedev *vdev = (struct vxgedev *)data;
2480 int i, j = 0;
2481
2482 for (i = 0; i < vdev->no_of_vpath; i++) {
2483 if (test_bit(i, &vdev->vp_reset)) {
2484 vxge_reset_vpath(vdev, i);
2485 j++;
2486 }
2487 }
2488 if (j && (vdev->config.intr_type != MSI_X)) {
2489 vxge_hw_device_unmask_all(vdev->devh);
2490 vxge_hw_device_flush_io(vdev->devh);
2491 }
2492
2493 mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2);
2494}
2495
2496static void vxge_poll_vp_lockup(unsigned long data)
2497{
2498 struct vxgedev *vdev = (struct vxgedev *)data;
703da5a1 2499 enum vxge_hw_status status = VXGE_HW_OK;
7adf7d1b
JM
2500 struct vxge_vpath *vpath;
2501 struct vxge_ring *ring;
2502 int i;
703da5a1
RV
2503
2504 for (i = 0; i < vdev->no_of_vpath; i++) {
2505 ring = &vdev->vpaths[i].ring;
2506 /* Did this vpath received any packets */
2507 if (ring->stats.prev_rx_frms == ring->stats.rx_frms) {
2508 status = vxge_hw_vpath_check_leak(ring->handle);
2509
2510 /* Did it received any packets last time */
2511 if ((VXGE_HW_FAIL == status) &&
2512 (VXGE_HW_FAIL == ring->last_status)) {
2513
2514 /* schedule vpath reset */
2515 if (!test_and_set_bit(i, &vdev->vp_reset)) {
7adf7d1b 2516 vpath = &vdev->vpaths[i];
703da5a1
RV
2517
2518 /* disable interrupts for this vpath */
2519 vxge_vpath_intr_disable(vdev, i);
2520
2521 /* stop the queue for this vpath */
98f45da2 2522 netif_tx_stop_queue(vpath->fifo.txq);
703da5a1
RV
2523 continue;
2524 }
2525 }
2526 }
2527 ring->stats.prev_rx_frms = ring->stats.rx_frms;
2528 ring->last_status = status;
2529 }
2530
2531 /* Check every 1 milli second */
2532 mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
2533}
2534
2535/**
2536 * vxge_open
2537 * @dev: pointer to the device structure.
2538 *
2539 * This function is the open entry point of the driver. It mainly calls a
2540 * function to allocate Rx buffers and inserts them into the buffer
2541 * descriptors and then enables the Rx part of the NIC.
2542 * Return value: '0' on success and an appropriate (-)ve integer as
2543 * defined in errno.h file on failure.
2544 */
42821a5b 2545static int
703da5a1
RV
2546vxge_open(struct net_device *dev)
2547{
2548 enum vxge_hw_status status;
2549 struct vxgedev *vdev;
2550 struct __vxge_hw_device *hldev;
7adf7d1b 2551 struct vxge_vpath *vpath;
703da5a1
RV
2552 int ret = 0;
2553 int i;
2554 u64 val64, function_mode;
2555 vxge_debug_entryexit(VXGE_TRACE,
2556 "%s: %s:%d", dev->name, __func__, __LINE__);
2557
2558 vdev = (struct vxgedev *)netdev_priv(dev);
2559 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
2560 function_mode = vdev->config.device_hw_info.function_mode;
2561
2562 /* make sure you have link off by default every time Nic is
2563 * initialized */
2564 netif_carrier_off(dev);
2565
703da5a1
RV
2566 /* Open VPATHs */
2567 status = vxge_open_vpaths(vdev);
2568 if (status != VXGE_HW_OK) {
2569 vxge_debug_init(VXGE_ERR,
2570 "%s: fatal: Vpath open failed", vdev->ndev->name);
2571 ret = -EPERM;
2572 goto out0;
2573 }
2574
2575 vdev->mtu = dev->mtu;
2576
2577 status = vxge_add_isr(vdev);
2578 if (status != VXGE_HW_OK) {
2579 vxge_debug_init(VXGE_ERR,
2580 "%s: fatal: ISR add failed", dev->name);
2581 ret = -EPERM;
2582 goto out1;
2583 }
2584
703da5a1
RV
2585 if (vdev->config.intr_type != MSI_X) {
2586 netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
2587 vdev->config.napi_weight);
2588 napi_enable(&vdev->napi);
7adf7d1b
JM
2589 for (i = 0; i < vdev->no_of_vpath; i++) {
2590 vpath = &vdev->vpaths[i];
2591 vpath->ring.napi_p = &vdev->napi;
2592 }
703da5a1
RV
2593 } else {
2594 for (i = 0; i < vdev->no_of_vpath; i++) {
7adf7d1b
JM
2595 vpath = &vdev->vpaths[i];
2596 netif_napi_add(dev, &vpath->ring.napi,
703da5a1 2597 vxge_poll_msix, vdev->config.napi_weight);
7adf7d1b
JM
2598 napi_enable(&vpath->ring.napi);
2599 vpath->ring.napi_p = &vpath->ring.napi;
703da5a1
RV
2600 }
2601 }
2602
2603 /* configure RTH */
2604 if (vdev->config.rth_steering) {
2605 status = vxge_rth_configure(vdev);
2606 if (status != VXGE_HW_OK) {
2607 vxge_debug_init(VXGE_ERR,
2608 "%s: fatal: RTH configuration failed",
2609 dev->name);
2610 ret = -EPERM;
2611 goto out2;
2612 }
2613 }
47f01db4
JM
2614 printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
2615 hldev->config.rth_en ? "enabled" : "disabled");
703da5a1
RV
2616
2617 for (i = 0; i < vdev->no_of_vpath; i++) {
7adf7d1b
JM
2618 vpath = &vdev->vpaths[i];
2619
703da5a1 2620 /* set initial mtu before enabling the device */
7adf7d1b 2621 status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu);
703da5a1
RV
2622 if (status != VXGE_HW_OK) {
2623 vxge_debug_init(VXGE_ERR,
2624 "%s: fatal: can not set new MTU", dev->name);
2625 ret = -EPERM;
2626 goto out2;
2627 }
2628 }
2629
2630 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev);
2631 vxge_debug_init(vdev->level_trace,
2632 "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
2633 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
2634
7adf7d1b
JM
2635 /* Restore the DA, VID table and also multicast and promiscuous mode
2636 * states
2637 */
2638 if (vdev->all_multi_flg) {
2639 for (i = 0; i < vdev->no_of_vpath; i++) {
2640 vpath = &vdev->vpaths[i];
2641 vxge_restore_vpath_mac_addr(vpath);
2642 vxge_restore_vpath_vid_table(vpath);
2643
2644 status = vxge_hw_vpath_mcast_enable(vpath->handle);
2645 if (status != VXGE_HW_OK)
2646 vxge_debug_init(VXGE_ERR,
2647 "%s:%d Enabling multicast failed",
2648 __func__, __LINE__);
2649 }
703da5a1
RV
2650 }
2651
2652 /* Enable vpath to sniff all unicast/multicast traffic that not
2653 * addressed to them. We allow promiscous mode for PF only
2654 */
2655
2656 val64 = 0;
2657 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
2658 val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i);
2659
2660 vxge_hw_mgmt_reg_write(vdev->devh,
2661 vxge_hw_mgmt_reg_type_mrpcim,
2662 0,
2663 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2664 rxmac_authorize_all_addr),
2665 val64);
2666
2667 vxge_hw_mgmt_reg_write(vdev->devh,
2668 vxge_hw_mgmt_reg_type_mrpcim,
2669 0,
2670 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2671 rxmac_authorize_all_vid),
2672 val64);
2673
2674 vxge_set_multicast(dev);
2675
2676 /* Enabling Bcast and mcast for all vpath */
2677 for (i = 0; i < vdev->no_of_vpath; i++) {
7adf7d1b
JM
2678 vpath = &vdev->vpaths[i];
2679 status = vxge_hw_vpath_bcast_enable(vpath->handle);
703da5a1
RV
2680 if (status != VXGE_HW_OK)
2681 vxge_debug_init(VXGE_ERR,
2682 "%s : Can not enable bcast for vpath "
2683 "id %d", dev->name, i);
2684 if (vdev->config.addr_learn_en) {
7adf7d1b 2685 status = vxge_hw_vpath_mcast_enable(vpath->handle);
703da5a1
RV
2686 if (status != VXGE_HW_OK)
2687 vxge_debug_init(VXGE_ERR,
2688 "%s : Can not enable mcast for vpath "
2689 "id %d", dev->name, i);
2690 }
2691 }
2692
2693 vxge_hw_device_setpause_data(vdev->devh, 0,
2694 vdev->config.tx_pause_enable,
2695 vdev->config.rx_pause_enable);
2696
2697 if (vdev->vp_reset_timer.function == NULL)
2698 vxge_os_timer(vdev->vp_reset_timer,
2699 vxge_poll_vp_reset, vdev, (HZ/2));
2700
2701 if (vdev->vp_lockup_timer.function == NULL)
2702 vxge_os_timer(vdev->vp_lockup_timer,
2703 vxge_poll_vp_lockup, vdev, (HZ/2));
2704
2705 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2706
2707 smp_wmb();
2708
2709 if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
2710 netif_carrier_on(vdev->ndev);
75f5e1c6 2711 netdev_notice(vdev->ndev, "Link Up\n");
703da5a1
RV
2712 vdev->stats.link_up++;
2713 }
2714
2715 vxge_hw_device_intr_enable(vdev->devh);
2716
2717 smp_wmb();
2718
2719 for (i = 0; i < vdev->no_of_vpath; i++) {
7adf7d1b
JM
2720 vpath = &vdev->vpaths[i];
2721
2722 vxge_hw_vpath_enable(vpath->handle);
703da5a1 2723 smp_wmb();
7adf7d1b 2724 vxge_hw_vpath_rx_doorbell_init(vpath->handle);
703da5a1
RV
2725 }
2726
d03848e0 2727 netif_tx_start_all_queues(vdev->ndev);
703da5a1
RV
2728 goto out0;
2729
2730out2:
2731 vxge_rem_isr(vdev);
2732
2733 /* Disable napi */
2734 if (vdev->config.intr_type != MSI_X)
2735 napi_disable(&vdev->napi);
2736 else {
2737 for (i = 0; i < vdev->no_of_vpath; i++)
2738 napi_disable(&vdev->vpaths[i].ring.napi);
2739 }
2740
2741out1:
2742 vxge_close_vpaths(vdev, 0);
2743out0:
2744 vxge_debug_entryexit(VXGE_TRACE,
2745 "%s: %s:%d Exiting...",
2746 dev->name, __func__, __LINE__);
2747 return ret;
2748}
2749
2750/* Loop throught the mac address list and delete all the entries */
42821a5b 2751static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
703da5a1
RV
2752{
2753
2754 struct list_head *entry, *next;
2755 if (list_empty(&vpath->mac_addr_list))
2756 return;
2757
2758 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
2759 list_del(entry);
2760 kfree((struct vxge_mac_addrs *)entry);
2761 }
2762}
2763
2764static void vxge_napi_del_all(struct vxgedev *vdev)
2765{
2766 int i;
2767 if (vdev->config.intr_type != MSI_X)
2768 netif_napi_del(&vdev->napi);
2769 else {
2770 for (i = 0; i < vdev->no_of_vpath; i++)
2771 netif_napi_del(&vdev->vpaths[i].ring.napi);
2772 }
703da5a1
RV
2773}
2774
42821a5b 2775static int do_vxge_close(struct net_device *dev, int do_io)
703da5a1
RV
2776{
2777 enum vxge_hw_status status;
2778 struct vxgedev *vdev;
2779 struct __vxge_hw_device *hldev;
2780 int i;
2781 u64 val64, vpath_vector;
2782 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
2783 dev->name, __func__, __LINE__);
2784
2785 vdev = (struct vxgedev *)netdev_priv(dev);
2786 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
2787
bd9ee680
SH
2788 if (unlikely(!is_vxge_card_up(vdev)))
2789 return 0;
2790
703da5a1
RV
2791 /* If vxge_handle_crit_err task is executing,
2792 * wait till it completes. */
2793 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
2794 msleep(50);
2795
703da5a1
RV
2796 if (do_io) {
2797 /* Put the vpath back in normal mode */
2798 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
2799 status = vxge_hw_mgmt_reg_read(vdev->devh,
2800 vxge_hw_mgmt_reg_type_mrpcim,
2801 0,
2802 (ulong)offsetof(
2803 struct vxge_hw_mrpcim_reg,
2804 rts_mgr_cbasin_cfg),
2805 &val64);
2806
2807 if (status == VXGE_HW_OK) {
2808 val64 &= ~vpath_vector;
2809 status = vxge_hw_mgmt_reg_write(vdev->devh,
2810 vxge_hw_mgmt_reg_type_mrpcim,
2811 0,
2812 (ulong)offsetof(
2813 struct vxge_hw_mrpcim_reg,
2814 rts_mgr_cbasin_cfg),
2815 val64);
2816 }
2817
2818 /* Remove the function 0 from promiscous mode */
2819 vxge_hw_mgmt_reg_write(vdev->devh,
2820 vxge_hw_mgmt_reg_type_mrpcim,
2821 0,
2822 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2823 rxmac_authorize_all_addr),
2824 0);
2825
2826 vxge_hw_mgmt_reg_write(vdev->devh,
2827 vxge_hw_mgmt_reg_type_mrpcim,
2828 0,
2829 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2830 rxmac_authorize_all_vid),
2831 0);
2832
2833 smp_wmb();
2834 }
2835 del_timer_sync(&vdev->vp_lockup_timer);
2836
2837 del_timer_sync(&vdev->vp_reset_timer);
2838
4d2a5b40
JM
2839 if (do_io)
2840 vxge_hw_device_wait_receive_idle(hldev);
2841
2842 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2843
703da5a1
RV
2844 /* Disable napi */
2845 if (vdev->config.intr_type != MSI_X)
2846 napi_disable(&vdev->napi);
2847 else {
2848 for (i = 0; i < vdev->no_of_vpath; i++)
2849 napi_disable(&vdev->vpaths[i].ring.napi);
2850 }
2851
2852 netif_carrier_off(vdev->ndev);
75f5e1c6 2853 netdev_notice(vdev->ndev, "Link Down\n");
d03848e0 2854 netif_tx_stop_all_queues(vdev->ndev);
703da5a1
RV
2855
2856 /* Note that at this point xmit() is stopped by upper layer */
2857 if (do_io)
2858 vxge_hw_device_intr_disable(vdev->devh);
2859
703da5a1
RV
2860 vxge_rem_isr(vdev);
2861
2862 vxge_napi_del_all(vdev);
2863
2864 if (do_io)
2865 vxge_reset_all_vpaths(vdev);
2866
2867 vxge_close_vpaths(vdev, 0);
2868
2869 vxge_debug_entryexit(VXGE_TRACE,
2870 "%s: %s:%d Exiting...", dev->name, __func__, __LINE__);
2871
703da5a1
RV
2872 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
2873
2874 return 0;
2875}
2876
2877/**
2878 * vxge_close
2879 * @dev: device pointer.
2880 *
2881 * This is the stop entry point of the driver. It needs to undo exactly
2882 * whatever was done by the open entry point, thus it's usually referred to
2883 * as the close function.Among other things this function mainly stops the
2884 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2885 * Return value: '0' on success and an appropriate (-)ve integer as
2886 * defined in errno.h file on failure.
2887 */
42821a5b 2888static int
703da5a1
RV
2889vxge_close(struct net_device *dev)
2890{
2891 do_vxge_close(dev, 1);
2892 return 0;
2893}
2894
2895/**
2896 * vxge_change_mtu
2897 * @dev: net device pointer.
2898 * @new_mtu :the new MTU size for the device.
2899 *
2900 * A driver entry point to change MTU size for the device. Before changing
2901 * the MTU the device must be stopped.
2902 */
2903static int vxge_change_mtu(struct net_device *dev, int new_mtu)
2904{
2905 struct vxgedev *vdev = netdev_priv(dev);
2906
2907 vxge_debug_entryexit(vdev->level_trace,
2908 "%s:%d", __func__, __LINE__);
2909 if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > VXGE_HW_MAX_MTU)) {
2910 vxge_debug_init(vdev->level_err,
2911 "%s: mtu size is invalid", dev->name);
2912 return -EPERM;
2913 }
2914
2915 /* check if device is down already */
2916 if (unlikely(!is_vxge_card_up(vdev))) {
2917 /* just store new value, will use later on open() */
2918 dev->mtu = new_mtu;
2919 vxge_debug_init(vdev->level_err,
2920 "%s", "device is down on MTU change");
2921 return 0;
2922 }
2923
2924 vxge_debug_init(vdev->level_trace,
2925 "trying to apply new MTU %d", new_mtu);
2926
2927 if (vxge_close(dev))
2928 return -EIO;
2929
2930 dev->mtu = new_mtu;
2931 vdev->mtu = new_mtu;
2932
2933 if (vxge_open(dev))
2934 return -EIO;
2935
2936 vxge_debug_init(vdev->level_trace,
2937 "%s: MTU changed to %d", vdev->ndev->name, new_mtu);
2938
2939 vxge_debug_entryexit(vdev->level_trace,
2940 "%s:%d Exiting...", __func__, __LINE__);
2941
2942 return 0;
2943}
2944
2945/**
dd57f970 2946 * vxge_get_stats64
703da5a1 2947 * @dev: pointer to the device structure
dd57f970 2948 * @stats: pointer to struct rtnl_link_stats64
703da5a1 2949 *
703da5a1 2950 */
dd57f970
ED
2951static struct rtnl_link_stats64 *
2952vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
703da5a1 2953{
dd57f970 2954 struct vxgedev *vdev = netdev_priv(dev);
703da5a1
RV
2955 int k;
2956
dd57f970 2957 /* net_stats already zeroed by caller */
703da5a1
RV
2958 for (k = 0; k < vdev->no_of_vpath; k++) {
2959 net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms;
2960 net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes;
2961 net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors;
2962 net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast;
2963 net_stats->rx_dropped +=
2964 vdev->vpaths[k].ring.stats.rx_dropped;
2965
2966 net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms;
2967 net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes;
2968 net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors;
2969 }
2970
2971 return net_stats;
2972}
2973
2974/**
2975 * vxge_ioctl
2976 * @dev: Device pointer.
2977 * @ifr: An IOCTL specific structure, that can contain a pointer to
2978 * a proprietary structure used to pass information to the driver.
2979 * @cmd: This is used to distinguish between the different commands that
2980 * can be passed to the IOCTL functions.
2981 *
2982 * Entry point for the Ioctl.
2983 */
2984static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2985{
2986 return -EOPNOTSUPP;
2987}
2988
2989/**
2990 * vxge_tx_watchdog
2991 * @dev: pointer to net device structure
2992 *
2993 * Watchdog for transmit side.
2994 * This function is triggered if the Tx Queue is stopped
2995 * for a pre-defined amount of time when the Interface is still up.
2996 */
2997static void
2998vxge_tx_watchdog(struct net_device *dev)
2999{
3000 struct vxgedev *vdev;
3001
3002 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3003
3004 vdev = (struct vxgedev *)netdev_priv(dev);
3005
3006 vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
3007
3008 vxge_reset(vdev);
3009 vxge_debug_entryexit(VXGE_TRACE,
3010 "%s:%d Exiting...", __func__, __LINE__);
3011}
3012
3013/**
3014 * vxge_vlan_rx_register
3015 * @dev: net device pointer.
3016 * @grp: vlan group
3017 *
3018 * Vlan group registration
3019 */
3020static void
3021vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
3022{
3023 struct vxgedev *vdev;
3024 struct vxge_vpath *vpath;
3025 int vp;
3026 u64 vid;
3027 enum vxge_hw_status status;
3028 int i;
3029
3030 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3031
3032 vdev = (struct vxgedev *)netdev_priv(dev);
3033
3034 vpath = &vdev->vpaths[0];
3035 if ((NULL == grp) && (vpath->is_open)) {
3036 /* Get the first vlan */
3037 status = vxge_hw_vpath_vid_get(vpath->handle, &vid);
3038
3039 while (status == VXGE_HW_OK) {
3040
3041 /* Delete this vlan from the vid table */
3042 for (vp = 0; vp < vdev->no_of_vpath; vp++) {
3043 vpath = &vdev->vpaths[vp];
3044 if (!vpath->is_open)
3045 continue;
3046
3047 vxge_hw_vpath_vid_delete(vpath->handle, vid);
3048 }
3049
3050 /* Get the next vlan to be deleted */
3051 vpath = &vdev->vpaths[0];
3052 status = vxge_hw_vpath_vid_get(vpath->handle, &vid);
3053 }
3054 }
3055
3056 vdev->vlgrp = grp;
3057
3058 for (i = 0; i < vdev->no_of_vpath; i++) {
3059 if (vdev->vpaths[i].is_configured)
3060 vdev->vpaths[i].ring.vlgrp = grp;
3061 }
3062
3063 vxge_debug_entryexit(VXGE_TRACE,
3064 "%s:%d Exiting...", __func__, __LINE__);
3065}
3066
3067/**
3068 * vxge_vlan_rx_add_vid
3069 * @dev: net device pointer.
3070 * @vid: vid
3071 *
3072 * Add the vlan id to the devices vlan id table
3073 */
3074static void
3075vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
3076{
3077 struct vxgedev *vdev;
3078 struct vxge_vpath *vpath;
3079 int vp_id;
3080
3081 vdev = (struct vxgedev *)netdev_priv(dev);
3082
3083 /* Add these vlan to the vid table */
3084 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3085 vpath = &vdev->vpaths[vp_id];
3086 if (!vpath->is_open)
3087 continue;
3088 vxge_hw_vpath_vid_add(vpath->handle, vid);
3089 }
3090}
3091
3092/**
3093 * vxge_vlan_rx_add_vid
3094 * @dev: net device pointer.
3095 * @vid: vid
3096 *
3097 * Remove the vlan id from the device's vlan id table
3098 */
3099static void
3100vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
3101{
3102 struct vxgedev *vdev;
3103 struct vxge_vpath *vpath;
3104 int vp_id;
3105
3106 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3107
3108 vdev = (struct vxgedev *)netdev_priv(dev);
3109
3110 vlan_group_set_device(vdev->vlgrp, vid, NULL);
3111
3112 /* Delete this vlan from the vid table */
3113 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3114 vpath = &vdev->vpaths[vp_id];
3115 if (!vpath->is_open)
3116 continue;
3117 vxge_hw_vpath_vid_delete(vpath->handle, vid);
3118 }
3119 vxge_debug_entryexit(VXGE_TRACE,
3120 "%s:%d Exiting...", __func__, __LINE__);
3121}
3122
3123static const struct net_device_ops vxge_netdev_ops = {
3124 .ndo_open = vxge_open,
3125 .ndo_stop = vxge_close,
dd57f970 3126 .ndo_get_stats64 = vxge_get_stats64,
703da5a1
RV
3127 .ndo_start_xmit = vxge_xmit,
3128 .ndo_validate_addr = eth_validate_addr,
3129 .ndo_set_multicast_list = vxge_set_multicast,
3130
3131 .ndo_do_ioctl = vxge_ioctl,
3132
3133 .ndo_set_mac_address = vxge_set_mac_addr,
3134 .ndo_change_mtu = vxge_change_mtu,
3135 .ndo_vlan_rx_register = vxge_vlan_rx_register,
3136 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
3137 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
3138
3139 .ndo_tx_timeout = vxge_tx_watchdog,
3140#ifdef CONFIG_NET_POLL_CONTROLLER
3141 .ndo_poll_controller = vxge_netpoll,
3142#endif
3143};
3144
42821a5b 3145static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3146 struct vxge_config *config,
3147 int high_dma, int no_of_vpath,
3148 struct vxgedev **vdev_out)
703da5a1
RV
3149{
3150 struct net_device *ndev;
3151 enum vxge_hw_status status = VXGE_HW_OK;
3152 struct vxgedev *vdev;
98f45da2 3153 int ret = 0, no_of_queue = 1;
703da5a1
RV
3154 u64 stat;
3155
3156 *vdev_out = NULL;
d03848e0 3157 if (config->tx_steering_type)
703da5a1
RV
3158 no_of_queue = no_of_vpath;
3159
3160 ndev = alloc_etherdev_mq(sizeof(struct vxgedev),
3161 no_of_queue);
3162 if (ndev == NULL) {
3163 vxge_debug_init(
3164 vxge_hw_device_trace_level_get(hldev),
3165 "%s : device allocation failed", __func__);
3166 ret = -ENODEV;
3167 goto _out0;
3168 }
3169
3170 vxge_debug_entryexit(
3171 vxge_hw_device_trace_level_get(hldev),
3172 "%s: %s:%d Entering...",
3173 ndev->name, __func__, __LINE__);
3174
3175 vdev = netdev_priv(ndev);
3176 memset(vdev, 0, sizeof(struct vxgedev));
3177
3178 vdev->ndev = ndev;
3179 vdev->devh = hldev;
3180 vdev->pdev = hldev->pdev;
3181 memcpy(&vdev->config, config, sizeof(struct vxge_config));
3182 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */
3183
3184 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3185
3186 ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
3187 NETIF_F_HW_VLAN_FILTER;
3188 /* Driver entry points */
3189 ndev->irq = vdev->pdev->irq;
3190 ndev->base_addr = (unsigned long) hldev->bar0;
3191
3192 ndev->netdev_ops = &vxge_netdev_ops;
3193
3194 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
3195
42821a5b 3196 vxge_initialize_ethtool_ops(ndev);
703da5a1 3197
47f01db4
JM
3198 if (vdev->config.rth_steering != NO_STEERING) {
3199 ndev->features |= NETIF_F_RXHASH;
3200 hldev->config.rth_en = VXGE_HW_RTH_ENABLE;
3201 }
3202
703da5a1
RV
3203 /* Allocate memory for vpath */
3204 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
3205 no_of_vpath, GFP_KERNEL);
3206 if (!vdev->vpaths) {
3207 vxge_debug_init(VXGE_ERR,
3208 "%s: vpath memory allocation failed",
3209 vdev->ndev->name);
3210 ret = -ENODEV;
3211 goto _out1;
3212 }
3213
3214 ndev->features |= NETIF_F_SG;
3215
3216 ndev->features |= NETIF_F_HW_CSUM;
3217 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3218 "%s : checksuming enabled", __func__);
3219
3220 if (high_dma) {
3221 ndev->features |= NETIF_F_HIGHDMA;
3222 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3223 "%s : using High DMA", __func__);
3224 }
3225
3226 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
3227
3228 if (vdev->config.gro_enable)
3229 ndev->features |= NETIF_F_GRO;
3230
703da5a1
RV
3231 if (register_netdev(ndev)) {
3232 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3233 "%s: %s : device registration failed!",
3234 ndev->name, __func__);
3235 ret = -ENODEV;
3236 goto _out2;
3237 }
3238
3239 /* Set the factory defined MAC address initially */
3240 ndev->addr_len = ETH_ALEN;
3241
3242 /* Make Link state as off at this point, when the Link change
3243 * interrupt comes the state will be automatically changed to
3244 * the right state.
3245 */
3246 netif_carrier_off(ndev);
3247
3248 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3249 "%s: Ethernet device registered",
3250 ndev->name);
3251
e8ac1756 3252 hldev->ndev = ndev;
703da5a1
RV
3253 *vdev_out = vdev;
3254
3255 /* Resetting the Device stats */
3256 status = vxge_hw_mrpcim_stats_access(
3257 hldev,
3258 VXGE_HW_STATS_OP_CLEAR_ALL_STATS,
3259 0,
3260 0,
3261 &stat);
3262
3263 if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION)
3264 vxge_debug_init(
3265 vxge_hw_device_trace_level_get(hldev),
3266 "%s: device stats clear returns"
3267 "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name);
3268
3269 vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev),
3270 "%s: %s:%d Exiting...",
3271 ndev->name, __func__, __LINE__);
3272
3273 return ret;
3274_out2:
3275 kfree(vdev->vpaths);
3276_out1:
3277 free_netdev(ndev);
3278_out0:
3279 return ret;
3280}
3281
3282/*
3283 * vxge_device_unregister
3284 *
3285 * This function will unregister and free network device
3286 */
42821a5b 3287static void
703da5a1
RV
3288vxge_device_unregister(struct __vxge_hw_device *hldev)
3289{
3290 struct vxgedev *vdev;
3291 struct net_device *dev;
3292 char buf[IFNAMSIZ];
3293#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3294 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3295 u32 level_trace;
3296#endif
3297
3298 dev = hldev->ndev;
3299 vdev = netdev_priv(dev);
3300#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3301 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3302 level_trace = vdev->level_trace;
3303#endif
3304 vxge_debug_entryexit(level_trace,
3305 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3306
3307 memcpy(buf, vdev->ndev->name, IFNAMSIZ);
3308
3309 /* in 2.6 will call stop() if device is up */
3310 unregister_netdev(dev);
3311
3312 flush_scheduled_work();
3313
3314 vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf);
3315 vxge_debug_entryexit(level_trace,
3316 "%s: %s:%d Exiting...", buf, __func__, __LINE__);
3317}
3318
3319/*
3320 * vxge_callback_crit_err
3321 *
3322 * This function is called by the alarm handler in interrupt context.
3323 * Driver must analyze it based on the event type.
3324 */
3325static void
3326vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3327 enum vxge_hw_event type, u64 vp_id)
3328{
3329 struct net_device *dev = hldev->ndev;
3330 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
98f45da2 3331 struct vxge_vpath *vpath = NULL;
703da5a1
RV
3332 int vpath_idx;
3333
3334 vxge_debug_entryexit(vdev->level_trace,
3335 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3336
3337 /* Note: This event type should be used for device wide
3338 * indications only - Serious errors, Slot freeze and critical errors
3339 */
3340 vdev->cric_err_event = type;
3341
98f45da2
JM
3342 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
3343 vpath = &vdev->vpaths[vpath_idx];
3344 if (vpath->device_id == vp_id)
703da5a1 3345 break;
98f45da2 3346 }
703da5a1
RV
3347
3348 if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
3349 if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
3350 vxge_debug_init(VXGE_ERR,
3351 "%s: Slot is frozen", vdev->ndev->name);
3352 } else if (type == VXGE_HW_EVENT_SERR) {
3353 vxge_debug_init(VXGE_ERR,
3354 "%s: Encountered Serious Error",
3355 vdev->ndev->name);
3356 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR)
3357 vxge_debug_init(VXGE_ERR,
3358 "%s: Encountered Critical Error",
3359 vdev->ndev->name);
3360 }
3361
3362 if ((type == VXGE_HW_EVENT_SERR) ||
3363 (type == VXGE_HW_EVENT_SLOT_FREEZE)) {
3364 if (unlikely(vdev->exec_mode))
3365 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3366 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) {
3367 vxge_hw_device_mask_all(hldev);
3368 if (unlikely(vdev->exec_mode))
3369 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3370 } else if ((type == VXGE_HW_EVENT_FIFO_ERR) ||
3371 (type == VXGE_HW_EVENT_VPATH_ERR)) {
3372
3373 if (unlikely(vdev->exec_mode))
3374 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3375 else {
3376 /* check if this vpath is already set for reset */
3377 if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) {
3378
3379 /* disable interrupts for this vpath */
3380 vxge_vpath_intr_disable(vdev, vpath_idx);
3381
3382 /* stop the queue for this vpath */
98f45da2 3383 netif_tx_stop_queue(vpath->fifo.txq);
703da5a1
RV
3384 }
3385 }
3386 }
3387
3388 vxge_debug_entryexit(vdev->level_trace,
3389 "%s: %s:%d Exiting...",
3390 vdev->ndev->name, __func__, __LINE__);
3391}
3392
3393static void verify_bandwidth(void)
3394{
3395 int i, band_width, total = 0, equal_priority = 0;
3396
3397 /* 1. If user enters 0 for some fifo, give equal priority to all */
3398 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3399 if (bw_percentage[i] == 0) {
3400 equal_priority = 1;
3401 break;
3402 }
3403 }
3404
3405 if (!equal_priority) {
3406 /* 2. If sum exceeds 100, give equal priority to all */
3407 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3408 if (bw_percentage[i] == 0xFF)
3409 break;
3410
3411 total += bw_percentage[i];
3412 if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) {
3413 equal_priority = 1;
3414 break;
3415 }
3416 }
3417 }
3418
3419 if (!equal_priority) {
3420 /* Is all the bandwidth consumed? */
3421 if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) {
3422 if (i < VXGE_HW_MAX_VIRTUAL_PATHS) {
3423 /* Split rest of bw equally among next VPs*/
3424 band_width =
3425 (VXGE_HW_VPATH_BANDWIDTH_MAX - total) /
3426 (VXGE_HW_MAX_VIRTUAL_PATHS - i);
3427 if (band_width < 2) /* min of 2% */
3428 equal_priority = 1;
3429 else {
3430 for (; i < VXGE_HW_MAX_VIRTUAL_PATHS;
3431 i++)
3432 bw_percentage[i] =
3433 band_width;
3434 }
3435 }
3436 } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS)
3437 equal_priority = 1;
3438 }
3439
3440 if (equal_priority) {
3441 vxge_debug_init(VXGE_ERR,
3442 "%s: Assigning equal bandwidth to all the vpaths",
3443 VXGE_DRIVER_NAME);
3444 bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX /
3445 VXGE_HW_MAX_VIRTUAL_PATHS;
3446 for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3447 bw_percentage[i] = bw_percentage[0];
3448 }
703da5a1
RV
3449}
3450
3451/*
3452 * Vpath configuration
3453 */
3454static int __devinit vxge_config_vpaths(
3455 struct vxge_hw_device_config *device_config,
3456 u64 vpath_mask, struct vxge_config *config_param)
3457{
3458 int i, no_of_vpaths = 0, default_no_vpath = 0, temp;
3459 u32 txdl_size, txdl_per_memblock;
3460
3461 temp = driver_config->vpath_per_dev;
3462 if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) &&
3463 (max_config_dev == VXGE_MAX_CONFIG_DEV)) {
3464 /* No more CPU. Return vpath number as zero.*/
3465 if (driver_config->g_no_cpus == -1)
3466 return 0;
3467
3468 if (!driver_config->g_no_cpus)
3469 driver_config->g_no_cpus = num_online_cpus();
3470
3471 driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
3472 if (!driver_config->vpath_per_dev)
3473 driver_config->vpath_per_dev = 1;
3474
3475 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3476 if (!vxge_bVALn(vpath_mask, i, 1))
3477 continue;
3478 else
3479 default_no_vpath++;
3480 if (default_no_vpath < driver_config->vpath_per_dev)
3481 driver_config->vpath_per_dev = default_no_vpath;
3482
3483 driver_config->g_no_cpus = driver_config->g_no_cpus -
3484 (driver_config->vpath_per_dev * 2);
3485 if (driver_config->g_no_cpus <= 0)
3486 driver_config->g_no_cpus = -1;
3487 }
3488
3489 if (driver_config->vpath_per_dev == 1) {
3490 vxge_debug_ll_config(VXGE_TRACE,
3491 "%s: Disable tx and rx steering, "
3492 "as single vpath is configured", VXGE_DRIVER_NAME);
3493 config_param->rth_steering = NO_STEERING;
3494 config_param->tx_steering_type = NO_STEERING;
3495 device_config->rth_en = 0;
3496 }
3497
3498 /* configure bandwidth */
3499 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3500 device_config->vp_config[i].min_bandwidth = bw_percentage[i];
3501
3502 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3503 device_config->vp_config[i].vp_id = i;
3504 device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU;
3505 if (no_of_vpaths < driver_config->vpath_per_dev) {
3506 if (!vxge_bVALn(vpath_mask, i, 1)) {
3507 vxge_debug_ll_config(VXGE_TRACE,
3508 "%s: vpath: %d is not available",
3509 VXGE_DRIVER_NAME, i);
3510 continue;
3511 } else {
3512 vxge_debug_ll_config(VXGE_TRACE,
3513 "%s: vpath: %d available",
3514 VXGE_DRIVER_NAME, i);
3515 no_of_vpaths++;
3516 }
3517 } else {
3518 vxge_debug_ll_config(VXGE_TRACE,
3519 "%s: vpath: %d is not configured, "
3520 "max_config_vpath exceeded",
3521 VXGE_DRIVER_NAME, i);
3522 break;
3523 }
3524
3525 /* Configure Tx fifo's */
3526 device_config->vp_config[i].fifo.enable =
3527 VXGE_HW_FIFO_ENABLE;
3528 device_config->vp_config[i].fifo.max_frags =
5beefb4f 3529 MAX_SKB_FRAGS + 1;
703da5a1
RV
3530 device_config->vp_config[i].fifo.memblock_size =
3531 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
3532
5beefb4f
SH
3533 txdl_size = device_config->vp_config[i].fifo.max_frags *
3534 sizeof(struct vxge_hw_fifo_txd);
703da5a1
RV
3535 txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
3536
3537 device_config->vp_config[i].fifo.fifo_blocks =
3538 ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1;
3539
3540 device_config->vp_config[i].fifo.intr =
3541 VXGE_HW_FIFO_QUEUE_INTR_DISABLE;
3542
3543 /* Configure tti properties */
3544 device_config->vp_config[i].tti.intr_enable =
3545 VXGE_HW_TIM_INTR_ENABLE;
3546
3547 device_config->vp_config[i].tti.btimer_val =
3548 (VXGE_TTI_BTIMER_VAL * 1000) / 272;
3549
3550 device_config->vp_config[i].tti.timer_ac_en =
3551 VXGE_HW_TIM_TIMER_AC_ENABLE;
3552
3553 /* For msi-x with napi (each vector
3554 has a handler of its own) -
3555 Set CI to OFF for all vpaths */
3556 device_config->vp_config[i].tti.timer_ci_en =
3557 VXGE_HW_TIM_TIMER_CI_DISABLE;
3558
3559 device_config->vp_config[i].tti.timer_ri_en =
3560 VXGE_HW_TIM_TIMER_RI_DISABLE;
3561
3562 device_config->vp_config[i].tti.util_sel =
3563 VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
3564
3565 device_config->vp_config[i].tti.ltimer_val =
3566 (VXGE_TTI_LTIMER_VAL * 1000) / 272;
3567
3568 device_config->vp_config[i].tti.rtimer_val =
3569 (VXGE_TTI_RTIMER_VAL * 1000) / 272;
3570
3571 device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A;
3572 device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B;
3573 device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C;
3574 device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A;
3575 device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B;
3576 device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C;
3577 device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D;
3578
3579 /* Configure Rx rings */
3580 device_config->vp_config[i].ring.enable =
3581 VXGE_HW_RING_ENABLE;
3582
3583 device_config->vp_config[i].ring.ring_blocks =
3584 VXGE_HW_DEF_RING_BLOCKS;
3585 device_config->vp_config[i].ring.buffer_mode =
3586 VXGE_HW_RING_RXD_BUFFER_MODE_1;
3587 device_config->vp_config[i].ring.rxds_limit =
3588 VXGE_HW_DEF_RING_RXDS_LIMIT;
3589 device_config->vp_config[i].ring.scatter_mode =
3590 VXGE_HW_RING_SCATTER_MODE_A;
3591
3592 /* Configure rti properties */
3593 device_config->vp_config[i].rti.intr_enable =
3594 VXGE_HW_TIM_INTR_ENABLE;
3595
3596 device_config->vp_config[i].rti.btimer_val =
3597 (VXGE_RTI_BTIMER_VAL * 1000)/272;
3598
3599 device_config->vp_config[i].rti.timer_ac_en =
3600 VXGE_HW_TIM_TIMER_AC_ENABLE;
3601
3602 device_config->vp_config[i].rti.timer_ci_en =
3603 VXGE_HW_TIM_TIMER_CI_DISABLE;
3604
3605 device_config->vp_config[i].rti.timer_ri_en =
3606 VXGE_HW_TIM_TIMER_RI_DISABLE;
3607
3608 device_config->vp_config[i].rti.util_sel =
3609 VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
3610
3611 device_config->vp_config[i].rti.urange_a =
3612 RTI_RX_URANGE_A;
3613 device_config->vp_config[i].rti.urange_b =
3614 RTI_RX_URANGE_B;
3615 device_config->vp_config[i].rti.urange_c =
3616 RTI_RX_URANGE_C;
3617 device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A;
3618 device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B;
3619 device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C;
3620 device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D;
3621
3622 device_config->vp_config[i].rti.rtimer_val =
3623 (VXGE_RTI_RTIMER_VAL * 1000) / 272;
3624
3625 device_config->vp_config[i].rti.ltimer_val =
3626 (VXGE_RTI_LTIMER_VAL * 1000) / 272;
3627
3628 device_config->vp_config[i].rpa_strip_vlan_tag =
3629 vlan_tag_strip;
3630 }
3631
3632 driver_config->vpath_per_dev = temp;
3633 return no_of_vpaths;
3634}
3635
3636/* initialize device configuratrions */
3637static void __devinit vxge_device_config_init(
3638 struct vxge_hw_device_config *device_config,
3639 int *intr_type)
3640{
3641 /* Used for CQRQ/SRQ. */
3642 device_config->dma_blockpool_initial =
3643 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
3644
3645 device_config->dma_blockpool_max =
3646 VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
3647
3648 if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
3649 max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
3650
3651#ifndef CONFIG_PCI_MSI
3652 vxge_debug_init(VXGE_ERR,
3653 "%s: This Kernel does not support "
3654 "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
3655 *intr_type = INTA;
3656#endif
3657
3658 /* Configure whether MSI-X or IRQL. */
3659 switch (*intr_type) {
3660 case INTA:
3661 device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
3662 break;
3663
3664 case MSI_X:
3665 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX;
3666 break;
3667 }
3668 /* Timer period between device poll */
3669 device_config->device_poll_millis = VXGE_TIMER_DELAY;
3670
3671 /* Configure mac based steering. */
3672 device_config->rts_mac_en = addr_learn_en;
3673
3674 /* Configure Vpaths */
3675 device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT;
3676
3677 vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
3678 __func__);
3679 vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_initial : %d",
3680 device_config->dma_blockpool_initial);
3681 vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_max : %d",
3682 device_config->dma_blockpool_max);
3683 vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
3684 device_config->intr_mode);
3685 vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
3686 device_config->device_poll_millis);
3687 vxge_debug_ll_config(VXGE_TRACE, "rts_mac_en : %d",
3688 device_config->rts_mac_en);
3689 vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
3690 device_config->rth_en);
3691 vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
3692 device_config->rth_it_type);
3693}
3694
3695static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3696{
3697 int i;
3698
3699 vxge_debug_init(VXGE_TRACE,
3700 "%s: %d Vpath(s) opened",
3701 vdev->ndev->name, vdev->no_of_vpath);
3702
3703 switch (vdev->config.intr_type) {
3704 case INTA:
3705 vxge_debug_init(VXGE_TRACE,
3706 "%s: Interrupt type INTA", vdev->ndev->name);
3707 break;
3708
3709 case MSI_X:
3710 vxge_debug_init(VXGE_TRACE,
3711 "%s: Interrupt type MSI-X", vdev->ndev->name);
3712 break;
3713 }
3714
3715 if (vdev->config.rth_steering) {
3716 vxge_debug_init(VXGE_TRACE,
3717 "%s: RTH steering enabled for TCP_IPV4",
3718 vdev->ndev->name);
3719 } else {
3720 vxge_debug_init(VXGE_TRACE,
3721 "%s: RTH steering disabled", vdev->ndev->name);
3722 }
3723
3724 switch (vdev->config.tx_steering_type) {
3725 case NO_STEERING:
3726 vxge_debug_init(VXGE_TRACE,
3727 "%s: Tx steering disabled", vdev->ndev->name);
3728 break;
3729 case TX_PRIORITY_STEERING:
3730 vxge_debug_init(VXGE_TRACE,
3731 "%s: Unsupported tx steering option",
3732 vdev->ndev->name);
3733 vxge_debug_init(VXGE_TRACE,
3734 "%s: Tx steering disabled", vdev->ndev->name);
3735 vdev->config.tx_steering_type = 0;
3736 break;
3737 case TX_VLAN_STEERING:
3738 vxge_debug_init(VXGE_TRACE,
3739 "%s: Unsupported tx steering option",
3740 vdev->ndev->name);
3741 vxge_debug_init(VXGE_TRACE,
3742 "%s: Tx steering disabled", vdev->ndev->name);
3743 vdev->config.tx_steering_type = 0;
3744 break;
3745 case TX_MULTIQ_STEERING:
3746 vxge_debug_init(VXGE_TRACE,
3747 "%s: Tx multiqueue steering enabled",
3748 vdev->ndev->name);
3749 break;
3750 case TX_PORT_STEERING:
3751 vxge_debug_init(VXGE_TRACE,
3752 "%s: Tx port steering enabled",
3753 vdev->ndev->name);
3754 break;
3755 default:
3756 vxge_debug_init(VXGE_ERR,
3757 "%s: Unsupported tx steering type",
3758 vdev->ndev->name);
3759 vxge_debug_init(VXGE_TRACE,
3760 "%s: Tx steering disabled", vdev->ndev->name);
3761 vdev->config.tx_steering_type = 0;
3762 }
3763
3764 if (vdev->config.gro_enable) {
3765 vxge_debug_init(VXGE_ERR,
3766 "%s: Generic receive offload enabled",
3767 vdev->ndev->name);
3768 } else
3769 vxge_debug_init(VXGE_TRACE,
3770 "%s: Generic receive offload disabled",
3771 vdev->ndev->name);
3772
3773 if (vdev->config.addr_learn_en)
3774 vxge_debug_init(VXGE_TRACE,
3775 "%s: MAC Address learning enabled", vdev->ndev->name);
3776
3777 vxge_debug_init(VXGE_TRACE,
3778 "%s: Rx doorbell mode enabled", vdev->ndev->name);
3779
3780 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3781 if (!vxge_bVALn(vpath_mask, i, 1))
3782 continue;
3783 vxge_debug_ll_config(VXGE_TRACE,
3784 "%s: MTU size - %d", vdev->ndev->name,
3785 ((struct __vxge_hw_device *)(vdev->devh))->
3786 config.vp_config[i].mtu);
3787 vxge_debug_init(VXGE_TRACE,
3788 "%s: VLAN tag stripping %s", vdev->ndev->name,
3789 ((struct __vxge_hw_device *)(vdev->devh))->
3790 config.vp_config[i].rpa_strip_vlan_tag
3791 ? "Enabled" : "Disabled");
3792 vxge_debug_init(VXGE_TRACE,
3793 "%s: Ring blocks : %d", vdev->ndev->name,
3794 ((struct __vxge_hw_device *)(vdev->devh))->
3795 config.vp_config[i].ring.ring_blocks);
3796 vxge_debug_init(VXGE_TRACE,
3797 "%s: Fifo blocks : %d", vdev->ndev->name,
3798 ((struct __vxge_hw_device *)(vdev->devh))->
3799 config.vp_config[i].fifo.fifo_blocks);
3800 vxge_debug_ll_config(VXGE_TRACE,
3801 "%s: Max frags : %d", vdev->ndev->name,
3802 ((struct __vxge_hw_device *)(vdev->devh))->
3803 config.vp_config[i].fifo.max_frags);
3804 break;
3805 }
3806}
3807
3808#ifdef CONFIG_PM
3809/**
3810 * vxge_pm_suspend - vxge power management suspend entry point
3811 *
3812 */
3813static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state)
3814{
3815 return -ENOSYS;
3816}
3817/**
3818 * vxge_pm_resume - vxge power management resume entry point
3819 *
3820 */
3821static int vxge_pm_resume(struct pci_dev *pdev)
3822{
3823 return -ENOSYS;
3824}
3825
3826#endif
3827
3828/**
3829 * vxge_io_error_detected - called when PCI error is detected
3830 * @pdev: Pointer to PCI device
3831 * @state: The current pci connection state
3832 *
3833 * This function is called after a PCI bus error affecting
3834 * this device has been detected.
3835 */
3836static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
3837 pci_channel_state_t state)
3838{
3839 struct __vxge_hw_device *hldev =
3840 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3841 struct net_device *netdev = hldev->ndev;
3842
3843 netif_device_detach(netdev);
3844
e33b992d
DN
3845 if (state == pci_channel_io_perm_failure)
3846 return PCI_ERS_RESULT_DISCONNECT;
3847
703da5a1
RV
3848 if (netif_running(netdev)) {
3849 /* Bring down the card, while avoiding PCI I/O */
3850 do_vxge_close(netdev, 0);
3851 }
3852
3853 pci_disable_device(pdev);
3854
3855 return PCI_ERS_RESULT_NEED_RESET;
3856}
3857
3858/**
3859 * vxge_io_slot_reset - called after the pci bus has been reset.
3860 * @pdev: Pointer to PCI device
3861 *
3862 * Restart the card from scratch, as if from a cold-boot.
3863 * At this point, the card has exprienced a hard reset,
3864 * followed by fixups by BIOS, and has its config space
3865 * set up identically to what it was at cold boot.
3866 */
3867static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
3868{
3869 struct __vxge_hw_device *hldev =
3870 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3871 struct net_device *netdev = hldev->ndev;
3872
3873 struct vxgedev *vdev = netdev_priv(netdev);
3874
3875 if (pci_enable_device(pdev)) {
75f5e1c6 3876 netdev_err(netdev, "Cannot re-enable device after reset\n");
703da5a1
RV
3877 return PCI_ERS_RESULT_DISCONNECT;
3878 }
3879
3880 pci_set_master(pdev);
3881 vxge_reset(vdev);
3882
3883 return PCI_ERS_RESULT_RECOVERED;
3884}
3885
3886/**
3887 * vxge_io_resume - called when traffic can start flowing again.
3888 * @pdev: Pointer to PCI device
3889 *
3890 * This callback is called when the error recovery driver tells
3891 * us that its OK to resume normal operation.
3892 */
3893static void vxge_io_resume(struct pci_dev *pdev)
3894{
3895 struct __vxge_hw_device *hldev =
3896 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3897 struct net_device *netdev = hldev->ndev;
3898
3899 if (netif_running(netdev)) {
3900 if (vxge_open(netdev)) {
75f5e1c6
JP
3901 netdev_err(netdev,
3902 "Can't bring device back up after reset\n");
703da5a1
RV
3903 return;
3904 }
3905 }
3906
3907 netif_device_attach(netdev);
3908}
3909
cb27ec60
SH
3910static inline u32 vxge_get_num_vfs(u64 function_mode)
3911{
3912 u32 num_functions = 0;
3913
3914 switch (function_mode) {
3915 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
3916 case VXGE_HW_FUNCTION_MODE_SRIOV_8:
3917 num_functions = 8;
3918 break;
3919 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
3920 num_functions = 1;
3921 break;
3922 case VXGE_HW_FUNCTION_MODE_SRIOV:
3923 case VXGE_HW_FUNCTION_MODE_MRIOV:
3924 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
3925 num_functions = 17;
3926 break;
3927 case VXGE_HW_FUNCTION_MODE_SRIOV_4:
3928 num_functions = 4;
3929 break;
3930 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
3931 num_functions = 2;
3932 break;
3933 case VXGE_HW_FUNCTION_MODE_MRIOV_8:
3934 num_functions = 8; /* TODO */
3935 break;
3936 }
3937 return num_functions;
3938}
3939
e8ac1756
JM
3940int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
3941{
3942 struct __vxge_hw_device *hldev = vdev->devh;
3943 u32 maj, min, bld, cmaj, cmin, cbld;
3944 enum vxge_hw_status status;
3945 const struct firmware *fw;
3946 int ret;
3947
3948 ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
3949 if (ret) {
3950 vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
3951 VXGE_DRIVER_NAME, fw_name);
3952 goto out;
3953 }
3954
3955 /* Load the new firmware onto the adapter */
3956 status = vxge_update_fw_image(hldev, fw->data, fw->size);
3957 if (status != VXGE_HW_OK) {
3958 vxge_debug_init(VXGE_ERR,
3959 "%s: FW image download to adapter failed '%s'.",
3960 VXGE_DRIVER_NAME, fw_name);
3961 ret = -EIO;
3962 goto out;
3963 }
3964
3965 /* Read the version of the new firmware */
3966 status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
3967 if (status != VXGE_HW_OK) {
3968 vxge_debug_init(VXGE_ERR,
3969 "%s: Upgrade read version failed '%s'.",
3970 VXGE_DRIVER_NAME, fw_name);
3971 ret = -EIO;
3972 goto out;
3973 }
3974
3975 cmaj = vdev->config.device_hw_info.fw_version.major;
3976 cmin = vdev->config.device_hw_info.fw_version.minor;
3977 cbld = vdev->config.device_hw_info.fw_version.build;
3978 /* It's possible the version in /lib/firmware is not the latest version.
3979 * If so, we could get into a loop of trying to upgrade to the latest
3980 * and flashing the older version.
3981 */
3982 if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
3983 !override) {
3984 ret = -EINVAL;
3985 goto out;
3986 }
3987
3988 printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
3989 maj, min, bld);
3990
3991 /* Flash the adapter with the new firmware */
3992 status = vxge_hw_flash_fw(hldev);
3993 if (status != VXGE_HW_OK) {
3994 vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
3995 VXGE_DRIVER_NAME, fw_name);
3996 ret = -EIO;
3997 goto out;
3998 }
3999
4000 printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be "
4001 "hard reset before using, thus requiring a system reboot or a "
4002 "hotplug event.\n");
4003
4004out:
4005 return ret;
4006}
4007
4008static int vxge_probe_fw_update(struct vxgedev *vdev)
4009{
4010 u32 maj, min, bld;
4011 int ret, gpxe = 0;
4012 char *fw_name;
4013
4014 maj = vdev->config.device_hw_info.fw_version.major;
4015 min = vdev->config.device_hw_info.fw_version.minor;
4016 bld = vdev->config.device_hw_info.fw_version.build;
4017
4018 if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
4019 return 0;
4020
4021 /* Ignore the build number when determining if the current firmware is
4022 * "too new" to load the driver
4023 */
4024 if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
4025 vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
4026 "version, unable to load driver\n",
4027 VXGE_DRIVER_NAME);
4028 return -EINVAL;
4029 }
4030
4031 /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
4032 * work with this driver.
4033 */
4034 if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
4035 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
4036 "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
4037 return -EINVAL;
4038 }
4039
4040 /* If file not specified, determine gPXE or not */
4041 if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
4042 int i;
4043 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
4044 if (vdev->devh->eprom_versions[i]) {
4045 gpxe = 1;
4046 break;
4047 }
4048 }
4049 if (gpxe)
4050 fw_name = "vxge/X3fw-pxe.ncf";
4051 else
4052 fw_name = "vxge/X3fw.ncf";
4053
4054 ret = vxge_fw_upgrade(vdev, fw_name, 0);
4055 /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
4056 * probe, so ignore them
4057 */
4058 if (ret != -EINVAL && ret != -ENOENT)
4059 return -EIO;
4060 else
4061 ret = 0;
4062
4063 if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
4064 VXGE_FW_VER(maj, min, 0)) {
4065 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
4066 " be used with this driver.\n"
4067 "Please get the latest version from "
4068 "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
4069 VXGE_DRIVER_NAME, maj, min, bld);
4070 return -EINVAL;
4071 }
4072
4073 return ret;
4074}
4075
703da5a1
RV
4076/**
4077 * vxge_probe
4078 * @pdev : structure containing the PCI related information of the device.
4079 * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
4080 * Description:
4081 * This function is called when a new PCI device gets detected and initializes
4082 * it.
4083 * Return value:
4084 * returns 0 on success and negative on failure.
4085 *
4086 */
4087static int __devinit
4088vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4089{
4090 struct __vxge_hw_device *hldev;
4091 enum vxge_hw_status status;
4092 int ret;
4093 int high_dma = 0;
4094 u64 vpath_mask = 0;
4095 struct vxgedev *vdev;
7dad171c 4096 struct vxge_config *ll_config = NULL;
703da5a1
RV
4097 struct vxge_hw_device_config *device_config = NULL;
4098 struct vxge_hw_device_attr attr;
4099 int i, j, no_of_vpath = 0, max_vpath_supported = 0;
4100 u8 *macaddr;
4101 struct vxge_mac_addrs *entry;
4102 static int bus = -1, device = -1;
cb27ec60 4103 u32 host_type;
703da5a1 4104 u8 new_device = 0;
cb27ec60
SH
4105 enum vxge_hw_status is_privileged;
4106 u32 function_mode;
4107 u32 num_vfs = 0;
703da5a1
RV
4108
4109 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
4110 attr.pdev = pdev;
4111
cb27ec60
SH
4112 /* In SRIOV-17 mode, functions of the same adapter
4113 * can be deployed on different buses */
4114 if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) ||
4115 (device != PCI_SLOT(pdev->devfn))))
703da5a1
RV
4116 new_device = 1;
4117
4118 bus = pdev->bus->number;
4119 device = PCI_SLOT(pdev->devfn);
4120
4121 if (new_device) {
4122 if (driver_config->config_dev_cnt &&
4123 (driver_config->config_dev_cnt !=
4124 driver_config->total_dev_cnt))
4125 vxge_debug_init(VXGE_ERR,
4126 "%s: Configured %d of %d devices",
4127 VXGE_DRIVER_NAME,
4128 driver_config->config_dev_cnt,
4129 driver_config->total_dev_cnt);
4130 driver_config->config_dev_cnt = 0;
4131 driver_config->total_dev_cnt = 0;
703da5a1 4132 }
9002397e
SH
4133 /* Now making the CPU based no of vpath calculation
4134 * applicable for individual functions as well.
4135 */
4136 driver_config->g_no_cpus = 0;
657205bd
SH
4137 driver_config->vpath_per_dev = max_config_vpath;
4138
703da5a1
RV
4139 driver_config->total_dev_cnt++;
4140 if (++driver_config->config_dev_cnt > max_config_dev) {
4141 ret = 0;
4142 goto _exit0;
4143 }
4144
4145 device_config = kzalloc(sizeof(struct vxge_hw_device_config),
4146 GFP_KERNEL);
4147 if (!device_config) {
4148 ret = -ENOMEM;
4149 vxge_debug_init(VXGE_ERR,
4150 "device_config : malloc failed %s %d",
4151 __FILE__, __LINE__);
4152 goto _exit0;
4153 }
4154
7dad171c
PB
4155 ll_config = kzalloc(sizeof(*ll_config), GFP_KERNEL);
4156 if (!ll_config) {
4157 ret = -ENOMEM;
4158 vxge_debug_init(VXGE_ERR,
4159 "ll_config : malloc failed %s %d",
4160 __FILE__, __LINE__);
4161 goto _exit0;
4162 }
4163 ll_config->tx_steering_type = TX_MULTIQ_STEERING;
4164 ll_config->intr_type = MSI_X;
4165 ll_config->napi_weight = NEW_NAPI_WEIGHT;
4166 ll_config->rth_steering = RTH_STEERING;
703da5a1
RV
4167
4168 /* get the default configuration parameters */
4169 vxge_hw_device_config_default_get(device_config);
4170
4171 /* initialize configuration parameters */
7dad171c 4172 vxge_device_config_init(device_config, &ll_config->intr_type);
703da5a1
RV
4173
4174 ret = pci_enable_device(pdev);
4175 if (ret) {
4176 vxge_debug_init(VXGE_ERR,
4177 "%s : can not enable PCI device", __func__);
4178 goto _exit0;
4179 }
4180
b3837cec 4181 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
703da5a1
RV
4182 vxge_debug_ll_config(VXGE_TRACE,
4183 "%s : using 64bit DMA", __func__);
4184
4185 high_dma = 1;
4186
4187 if (pci_set_consistent_dma_mask(pdev,
b3837cec 4188 DMA_BIT_MASK(64))) {
703da5a1
RV
4189 vxge_debug_init(VXGE_ERR,
4190 "%s : unable to obtain 64bit DMA for "
4191 "consistent allocations", __func__);
4192 ret = -ENOMEM;
4193 goto _exit1;
4194 }
b3837cec 4195 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
703da5a1
RV
4196 vxge_debug_ll_config(VXGE_TRACE,
4197 "%s : using 32bit DMA", __func__);
4198 } else {
4199 ret = -ENOMEM;
4200 goto _exit1;
4201 }
4202
4203 if (pci_request_regions(pdev, VXGE_DRIVER_NAME)) {
4204 vxge_debug_init(VXGE_ERR,
4205 "%s : request regions failed", __func__);
4206 ret = -ENODEV;
4207 goto _exit1;
4208 }
4209
4210 pci_set_master(pdev);
4211
4212 attr.bar0 = pci_ioremap_bar(pdev, 0);
4213 if (!attr.bar0) {
4214 vxge_debug_init(VXGE_ERR,
4215 "%s : cannot remap io memory bar0", __func__);
4216 ret = -ENODEV;
4217 goto _exit2;
4218 }
4219 vxge_debug_ll_config(VXGE_TRACE,
4220 "pci ioremap bar0: %p:0x%llx",
4221 attr.bar0,
4222 (unsigned long long)pci_resource_start(pdev, 0));
4223
703da5a1 4224 status = vxge_hw_device_hw_info_get(attr.bar0,
7dad171c 4225 &ll_config->device_hw_info);
703da5a1
RV
4226 if (status != VXGE_HW_OK) {
4227 vxge_debug_init(VXGE_ERR,
4228 "%s: Reading of hardware info failed."
4229 "Please try upgrading the firmware.", VXGE_DRIVER_NAME);
4230 ret = -EINVAL;
7975d1ee 4231 goto _exit3;
703da5a1
RV
4232 }
4233
7dad171c 4234 vpath_mask = ll_config->device_hw_info.vpath_mask;
703da5a1
RV
4235 if (vpath_mask == 0) {
4236 vxge_debug_ll_config(VXGE_TRACE,
4237 "%s: No vpaths available in device", VXGE_DRIVER_NAME);
4238 ret = -EINVAL;
7975d1ee 4239 goto _exit3;
703da5a1
RV
4240 }
4241
4242 vxge_debug_ll_config(VXGE_TRACE,
4243 "%s:%d Vpath mask = %llx", __func__, __LINE__,
4244 (unsigned long long)vpath_mask);
4245
7dad171c
PB
4246 function_mode = ll_config->device_hw_info.function_mode;
4247 host_type = ll_config->device_hw_info.host_type;
cb27ec60 4248 is_privileged = __vxge_hw_device_is_privilaged(host_type,
7dad171c 4249 ll_config->device_hw_info.func_id);
cb27ec60 4250
703da5a1
RV
4251 /* Check how many vpaths are available */
4252 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4253 if (!((vpath_mask) & vxge_mBIT(i)))
4254 continue;
4255 max_vpath_supported++;
4256 }
4257
cb27ec60
SH
4258 if (new_device)
4259 num_vfs = vxge_get_num_vfs(function_mode) - 1;
4260
5dbc9011 4261 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
cb27ec60 4262 if (is_sriov(function_mode) && (max_config_dev > 1) &&
7dad171c 4263 (ll_config->intr_type != INTA) &&
cb27ec60
SH
4264 (is_privileged == VXGE_HW_OK)) {
4265 ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs)
4266 ? (max_config_dev - 1) : num_vfs);
4267 if (ret)
4268 vxge_debug_ll_config(VXGE_ERR,
4269 "Failed in enabling SRIOV mode: %d\n", ret);
5dbc9011
SS
4270 }
4271
703da5a1
RV
4272 /*
4273 * Configure vpaths and get driver configured number of vpaths
4274 * which is less than or equal to the maximum vpaths per function.
4275 */
7dad171c 4276 no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config);
703da5a1
RV
4277 if (!no_of_vpath) {
4278 vxge_debug_ll_config(VXGE_ERR,
4279 "%s: No more vpaths to configure", VXGE_DRIVER_NAME);
4280 ret = 0;
7975d1ee 4281 goto _exit3;
703da5a1
RV
4282 }
4283
4284 /* Setting driver callbacks */
4285 attr.uld_callbacks.link_up = vxge_callback_link_up;
4286 attr.uld_callbacks.link_down = vxge_callback_link_down;
4287 attr.uld_callbacks.crit_err = vxge_callback_crit_err;
4288
4289 status = vxge_hw_device_initialize(&hldev, &attr, device_config);
4290 if (status != VXGE_HW_OK) {
4291 vxge_debug_init(VXGE_ERR,
4292 "Failed to initialize device (%d)", status);
4293 ret = -EINVAL;
7975d1ee 4294 goto _exit3;
703da5a1
RV
4295 }
4296
e8ac1756
JM
4297 if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
4298 ll_config->device_hw_info.fw_version.minor,
4299 ll_config->device_hw_info.fw_version.build) >=
4300 VXGE_EPROM_FW_VER) {
4301 struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
4302
4303 status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
4304 if (status != VXGE_HW_OK) {
4305 vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
4306 VXGE_DRIVER_NAME);
4307 /* This is a non-fatal error, continue */
4308 }
4309
4310 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
4311 hldev->eprom_versions[i] = img[i].version;
4312 if (!img[i].is_valid)
4313 break;
4314 vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
4315 "%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i,
4316 VXGE_EPROM_IMG_MAJOR(img[i].version),
4317 VXGE_EPROM_IMG_MINOR(img[i].version),
4318 VXGE_EPROM_IMG_FIX(img[i].version),
4319 VXGE_EPROM_IMG_BUILD(img[i].version));
4320 }
4321 }
4322
fa41fd10
SH
4323 /* if FCS stripping is not disabled in MAC fail driver load */
4324 if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) {
4325 vxge_debug_init(VXGE_ERR,
4326 "%s: FCS stripping is not disabled in MAC"
4327 " failing driver load", VXGE_DRIVER_NAME);
4328 ret = -EINVAL;
4329 goto _exit4;
4330 }
4331
703da5a1
RV
4332 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4333
4334 /* set private device info */
4335 pci_set_drvdata(pdev, hldev);
4336
7dad171c
PB
4337 ll_config->gro_enable = VXGE_GRO_ALWAYS_AGGREGATE;
4338 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
4339 ll_config->addr_learn_en = addr_learn_en;
4340 ll_config->rth_algorithm = RTH_ALG_JENKINS;
47f01db4
JM
4341 ll_config->rth_hash_type_tcpipv4 = 1;
4342 ll_config->rth_hash_type_ipv4 = 0;
4343 ll_config->rth_hash_type_tcpipv6 = 0;
4344 ll_config->rth_hash_type_ipv6 = 0;
4345 ll_config->rth_hash_type_tcpipv6ex = 0;
4346 ll_config->rth_hash_type_ipv6ex = 0;
7dad171c
PB
4347 ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
4348 ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4349 ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4350
e8ac1756
JM
4351 ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
4352 &vdev);
4353 if (ret) {
703da5a1 4354 ret = -EINVAL;
7975d1ee 4355 goto _exit4;
703da5a1
RV
4356 }
4357
e8ac1756
JM
4358 ret = vxge_probe_fw_update(vdev);
4359 if (ret)
4360 goto _exit5;
4361
703da5a1
RV
4362 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
4363 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4364 vxge_hw_device_trace_level_get(hldev));
4365
4366 /* set private HW device info */
703da5a1
RV
4367 vdev->mtu = VXGE_HW_DEFAULT_MTU;
4368 vdev->bar0 = attr.bar0;
703da5a1
RV
4369 vdev->max_vpath_supported = max_vpath_supported;
4370 vdev->no_of_vpath = no_of_vpath;
4371
4372 /* Virtual Path count */
4373 for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4374 if (!vxge_bVALn(vpath_mask, i, 1))
4375 continue;
4376 if (j >= vdev->no_of_vpath)
4377 break;
4378
4379 vdev->vpaths[j].is_configured = 1;
4380 vdev->vpaths[j].device_id = i;
703da5a1
RV
4381 vdev->vpaths[j].ring.driver_id = j;
4382 vdev->vpaths[j].vdev = vdev;
4383 vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
4384 memcpy((u8 *)vdev->vpaths[j].macaddr,
7dad171c 4385 ll_config->device_hw_info.mac_addrs[i],
703da5a1
RV
4386 ETH_ALEN);
4387
4388 /* Initialize the mac address list header */
4389 INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list);
4390
4391 vdev->vpaths[j].mac_addr_cnt = 0;
4392 vdev->vpaths[j].mcast_addr_cnt = 0;
4393 j++;
4394 }
4395 vdev->exec_mode = VXGE_EXEC_MODE_DISABLE;
4396 vdev->max_config_port = max_config_port;
4397
4398 vdev->vlan_tag_strip = vlan_tag_strip;
4399
4400 /* map the hashing selector table to the configured vpaths */
4401 for (i = 0; i < vdev->no_of_vpath; i++)
4402 vdev->vpath_selector[i] = vpath_selector[i];
4403
4404 macaddr = (u8 *)vdev->vpaths[0].macaddr;
4405
7dad171c
PB
4406 ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
4407 ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
4408 ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
703da5a1
RV
4409
4410 vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
7dad171c 4411 vdev->ndev->name, ll_config->device_hw_info.serial_number);
703da5a1
RV
4412
4413 vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
7dad171c 4414 vdev->ndev->name, ll_config->device_hw_info.part_number);
703da5a1
RV
4415
4416 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
7dad171c 4417 vdev->ndev->name, ll_config->device_hw_info.product_desc);
703da5a1 4418
bf54e736 4419 vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
4420 vdev->ndev->name, macaddr);
703da5a1
RV
4421
4422 vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
4423 vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
4424
4425 vxge_debug_init(VXGE_TRACE,
4426 "%s: Firmware version : %s Date : %s", vdev->ndev->name,
7dad171c
PB
4427 ll_config->device_hw_info.fw_version.version,
4428 ll_config->device_hw_info.fw_date.date);
703da5a1 4429
0a25bdc6 4430 if (new_device) {
7dad171c 4431 switch (ll_config->device_hw_info.function_mode) {
0a25bdc6
SH
4432 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
4433 vxge_debug_init(VXGE_TRACE,
4434 "%s: Single Function Mode Enabled", vdev->ndev->name);
4435 break;
4436 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
4437 vxge_debug_init(VXGE_TRACE,
4438 "%s: Multi Function Mode Enabled", vdev->ndev->name);
4439 break;
4440 case VXGE_HW_FUNCTION_MODE_SRIOV:
4441 vxge_debug_init(VXGE_TRACE,
4442 "%s: Single Root IOV Mode Enabled", vdev->ndev->name);
4443 break;
4444 case VXGE_HW_FUNCTION_MODE_MRIOV:
4445 vxge_debug_init(VXGE_TRACE,
4446 "%s: Multi Root IOV Mode Enabled", vdev->ndev->name);
4447 break;
4448 }
4449 }
4450
703da5a1
RV
4451 vxge_print_parm(vdev, vpath_mask);
4452
4453 /* Store the fw version for ethttool option */
7dad171c 4454 strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
703da5a1
RV
4455 memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
4456 memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN);
4457
4458 /* Copy the station mac address to the list */
4459 for (i = 0; i < vdev->no_of_vpath; i++) {
4460 entry = (struct vxge_mac_addrs *)
4461 kzalloc(sizeof(struct vxge_mac_addrs),
4462 GFP_KERNEL);
4463 if (NULL == entry) {
4464 vxge_debug_init(VXGE_ERR,
4465 "%s: mac_addr_list : memory allocation failed",
4466 vdev->ndev->name);
4467 ret = -EPERM;
e8ac1756 4468 goto _exit6;
703da5a1
RV
4469 }
4470 macaddr = (u8 *)&entry->macaddr;
4471 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
4472 list_add(&entry->item, &vdev->vpaths[i].mac_addr_list);
4473 vdev->vpaths[i].mac_addr_cnt = 1;
4474 }
4475
914d0d71 4476 kfree(device_config);
eb5f10c2
SH
4477
4478 /*
4479 * INTA is shared in multi-function mode. This is unlike the INTA
4480 * implementation in MR mode, where each VH has its own INTA message.
4481 * - INTA is masked (disabled) as long as at least one function sets
4482 * its TITAN_MASK_ALL_INT.ALARM bit.
4483 * - INTA is unmasked (enabled) when all enabled functions have cleared
4484 * their own TITAN_MASK_ALL_INT.ALARM bit.
4485 * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
4486 * Though this driver leaves the top level interrupts unmasked while
4487 * leaving the required module interrupt bits masked on exit, there
4488 * could be a rougue driver around that does not follow this procedure
4489 * resulting in a failure to generate interrupts. The following code is
4490 * present to prevent such a failure.
4491 */
4492
7dad171c 4493 if (ll_config->device_hw_info.function_mode ==
eb5f10c2
SH
4494 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
4495 if (vdev->config.intr_type == INTA)
4496 vxge_hw_device_unmask_all(hldev);
4497
703da5a1
RV
4498 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
4499 vdev->ndev->name, __func__, __LINE__);
4500
4501 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4502 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4503 vxge_hw_device_trace_level_get(hldev));
4504
7dad171c 4505 kfree(ll_config);
703da5a1
RV
4506 return 0;
4507
e8ac1756 4508_exit6:
703da5a1
RV
4509 for (i = 0; i < vdev->no_of_vpath; i++)
4510 vxge_free_mac_add_list(&vdev->vpaths[i]);
e8ac1756 4511_exit5:
703da5a1 4512 vxge_device_unregister(hldev);
7975d1ee 4513_exit4:
5dbc9011 4514 pci_disable_sriov(pdev);
703da5a1 4515 vxge_hw_device_terminate(hldev);
703da5a1
RV
4516_exit3:
4517 iounmap(attr.bar0);
4518_exit2:
4519 pci_release_regions(pdev);
4520_exit1:
4521 pci_disable_device(pdev);
4522_exit0:
7dad171c 4523 kfree(ll_config);
703da5a1
RV
4524 kfree(device_config);
4525 driver_config->config_dev_cnt--;
4526 pci_set_drvdata(pdev, NULL);
4527 return ret;
4528}
4529
4530/**
4531 * vxge_rem_nic - Free the PCI device
4532 * @pdev: structure containing the PCI related information of the device.
4533 * Description: This function is called by the Pci subsystem to release a
4534 * PCI device and free up all resource held up by the device.
4535 */
4536static void __devexit
4537vxge_remove(struct pci_dev *pdev)
4538{
4539 struct __vxge_hw_device *hldev;
4540 struct vxgedev *vdev = NULL;
4541 struct net_device *dev;
4542 int i = 0;
4543#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
4544 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4545 u32 level_trace;
4546#endif
4547
4548 hldev = (struct __vxge_hw_device *) pci_get_drvdata(pdev);
4549
4550 if (hldev == NULL)
4551 return;
4552 dev = hldev->ndev;
4553 vdev = netdev_priv(dev);
4554
4555#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
4556 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4557 level_trace = vdev->level_trace;
4558#endif
4559 vxge_debug_entryexit(level_trace,
4560 "%s:%d", __func__, __LINE__);
4561
4562 vxge_debug_init(level_trace,
4563 "%s : removing PCI device...", __func__);
4564 vxge_device_unregister(hldev);
4565
4566 for (i = 0; i < vdev->no_of_vpath; i++) {
4567 vxge_free_mac_add_list(&vdev->vpaths[i]);
4568 vdev->vpaths[i].mcast_addr_cnt = 0;
4569 vdev->vpaths[i].mac_addr_cnt = 0;
4570 }
4571
4572 kfree(vdev->vpaths);
4573
4574 iounmap(vdev->bar0);
703da5a1 4575
5dbc9011
SS
4576 pci_disable_sriov(pdev);
4577
703da5a1
RV
4578 /* we are safe to free it now */
4579 free_netdev(dev);
4580
4581 vxge_debug_init(level_trace,
4582 "%s:%d Device unregistered", __func__, __LINE__);
4583
4584 vxge_hw_device_terminate(hldev);
4585
4586 pci_disable_device(pdev);
4587 pci_release_regions(pdev);
4588 pci_set_drvdata(pdev, NULL);
4589 vxge_debug_entryexit(level_trace,
4590 "%s:%d Exiting...", __func__, __LINE__);
4591}
4592
4593static struct pci_error_handlers vxge_err_handler = {
4594 .error_detected = vxge_io_error_detected,
4595 .slot_reset = vxge_io_slot_reset,
4596 .resume = vxge_io_resume,
4597};
4598
4599static struct pci_driver vxge_driver = {
4600 .name = VXGE_DRIVER_NAME,
4601 .id_table = vxge_id_table,
4602 .probe = vxge_probe,
4603 .remove = __devexit_p(vxge_remove),
4604#ifdef CONFIG_PM
4605 .suspend = vxge_pm_suspend,
4606 .resume = vxge_pm_resume,
4607#endif
4608 .err_handler = &vxge_err_handler,
4609};
4610
4611static int __init
4612vxge_starter(void)
4613{
4614 int ret = 0;
703da5a1 4615
75f5e1c6
JP
4616 pr_info("Copyright(c) 2002-2010 Exar Corp.\n");
4617 pr_info("Driver version: %s\n", DRV_VERSION);
703da5a1
RV
4618
4619 verify_bandwidth();
4620
4621 driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL);
4622 if (!driver_config)
4623 return -ENOMEM;
4624
4625 ret = pci_register_driver(&vxge_driver);
4626
4627 if (driver_config->config_dev_cnt &&
4628 (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
4629 vxge_debug_init(VXGE_ERR,
4630 "%s: Configured %d of %d devices",
4631 VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
4632 driver_config->total_dev_cnt);
4633
4634 if (ret)
4635 kfree(driver_config);
4636
4637 return ret;
4638}
4639
4640static void __exit
4641vxge_closer(void)
4642{
4643 pci_unregister_driver(&vxge_driver);
4644 kfree(driver_config);
4645}
4646module_init(vxge_starter);
4647module_exit(vxge_closer);