]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/vxge/vxge-main.c
vxge: Fixes in isr routine
[net-next-2.6.git] / drivers / net / vxge / vxge-main.c
CommitLineData
703da5a1
RV
1/******************************************************************************
2* This software may be used and distributed according to the terms of
3* the GNU General Public License (GPL), incorporated herein by reference.
4* Drivers based on or derived from this code fall under the GPL and must
5* retain the authorship, copyright and license notice. This file is not
6* a complete program and may only be used when the entire operating
7* system is licensed under the GPL.
8* See the file COPYING in this distribution for more information.
9*
10* vxge-main.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11* Virtualized Server Adapter.
12* Copyright(c) 2002-2009 Neterion Inc.
13*
14* The module loadable parameters that are supported by the driver and a brief
15* explanation of all the variables:
16* vlan_tag_strip:
17* Strip VLAN Tag enable/disable. Instructs the device to remove
18* the VLAN tag from all received tagged frames that are not
19* replicated at the internal L2 switch.
20* 0 - Do not strip the VLAN tag.
21* 1 - Strip the VLAN tag.
22*
23* addr_learn_en:
24* Enable learning the mac address of the guest OS interface in
25* a virtualization environment.
26* 0 - DISABLE
27* 1 - ENABLE
28*
29* max_config_port:
30* Maximum number of port to be supported.
31* MIN -1 and MAX - 2
32*
33* max_config_vpath:
34* This configures the maximum no of VPATH configures for each
35* device function.
36* MIN - 1 and MAX - 17
37*
38* max_config_dev:
39* This configures maximum no of Device function to be enabled.
40* MIN - 1 and MAX - 17
41*
42******************************************************************************/
43
44#include <linux/if_vlan.h>
45#include <linux/pci.h>
2b05e002 46#include <linux/tcp.h>
703da5a1
RV
47#include <net/ip.h>
48#include <linux/netdevice.h>
49#include <linux/etherdevice.h>
50#include "vxge-main.h"
51#include "vxge-reg.h"
52
53MODULE_LICENSE("Dual BSD/GPL");
54MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
55 "Virtualized Server Adapter");
56
57static struct pci_device_id vxge_id_table[] __devinitdata = {
58 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
59 PCI_ANY_ID},
60 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
61 PCI_ANY_ID},
62 {0}
63};
64
65MODULE_DEVICE_TABLE(pci, vxge_id_table);
66
67VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE);
68VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT);
69VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT);
70VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT);
71VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT);
72VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV);
73
74static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] =
75 {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
76static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
77 {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF};
78module_param_array(bw_percentage, uint, NULL, 0);
79
80static struct vxge_drv_config *driver_config;
81
82static inline int is_vxge_card_up(struct vxgedev *vdev)
83{
84 return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
85}
86
87static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
88{
89 unsigned long flags = 0;
90 struct sk_buff *skb_ptr = NULL;
91 struct sk_buff **temp, *head, *skb;
92
93 if (spin_trylock_irqsave(&fifo->tx_lock, flags)) {
94 vxge_hw_vpath_poll_tx(fifo->handle, (void **)&skb_ptr);
95 spin_unlock_irqrestore(&fifo->tx_lock, flags);
96 }
97 /* free SKBs */
98 head = skb_ptr;
99 while (head) {
100 skb = head;
101 temp = (struct sk_buff **)&skb->cb;
102 head = *temp;
103 *temp = NULL;
104 dev_kfree_skb_irq(skb);
105 }
106}
107
108static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
109{
110 int i;
111
112 /* Complete all transmits */
113 for (i = 0; i < vdev->no_of_vpath; i++)
114 VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo);
115}
116
117static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
118{
119 int i;
120 struct vxge_ring *ring;
121
122 /* Complete all receives*/
123 for (i = 0; i < vdev->no_of_vpath; i++) {
124 ring = &vdev->vpaths[i].ring;
125 vxge_hw_vpath_poll_rx(ring->handle);
126 }
127}
128
129/*
130 * MultiQ manipulation helper functions
131 */
132void vxge_stop_all_tx_queue(struct vxgedev *vdev)
133{
134 int i;
135 struct net_device *dev = vdev->ndev;
136
137 if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) {
138 for (i = 0; i < vdev->no_of_vpath; i++)
139 vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_STOP;
140 }
141 netif_tx_stop_all_queues(dev);
142}
143
144void vxge_stop_tx_queue(struct vxge_fifo *fifo)
145{
146 struct net_device *dev = fifo->ndev;
147
148 struct netdev_queue *txq = NULL;
149 if (fifo->tx_steering_type == TX_MULTIQ_STEERING)
150 txq = netdev_get_tx_queue(dev, fifo->driver_id);
151 else {
152 txq = netdev_get_tx_queue(dev, 0);
153 fifo->queue_state = VPATH_QUEUE_STOP;
154 }
155
156 netif_tx_stop_queue(txq);
157}
158
159void vxge_start_all_tx_queue(struct vxgedev *vdev)
160{
161 int i;
162 struct net_device *dev = vdev->ndev;
163
164 if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) {
165 for (i = 0; i < vdev->no_of_vpath; i++)
166 vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_START;
167 }
168 netif_tx_start_all_queues(dev);
169}
170
171static void vxge_wake_all_tx_queue(struct vxgedev *vdev)
172{
173 int i;
174 struct net_device *dev = vdev->ndev;
175
176 if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) {
177 for (i = 0; i < vdev->no_of_vpath; i++)
178 vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_START;
179 }
180 netif_tx_wake_all_queues(dev);
181}
182
183void vxge_wake_tx_queue(struct vxge_fifo *fifo, struct sk_buff *skb)
184{
185 struct net_device *dev = fifo->ndev;
186
187 int vpath_no = fifo->driver_id;
188 struct netdev_queue *txq = NULL;
189 if (fifo->tx_steering_type == TX_MULTIQ_STEERING) {
190 txq = netdev_get_tx_queue(dev, vpath_no);
191 if (netif_tx_queue_stopped(txq))
192 netif_tx_wake_queue(txq);
193 } else {
194 txq = netdev_get_tx_queue(dev, 0);
195 if (fifo->queue_state == VPATH_QUEUE_STOP)
196 if (netif_tx_queue_stopped(txq)) {
197 fifo->queue_state = VPATH_QUEUE_START;
198 netif_tx_wake_queue(txq);
199 }
200 }
201}
202
203/*
204 * vxge_callback_link_up
205 *
206 * This function is called during interrupt context to notify link up state
207 * change.
208 */
209void
210vxge_callback_link_up(struct __vxge_hw_device *hldev)
211{
212 struct net_device *dev = hldev->ndev;
213 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
214
215 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
216 vdev->ndev->name, __func__, __LINE__);
217 printk(KERN_NOTICE "%s: Link Up\n", vdev->ndev->name);
218 vdev->stats.link_up++;
219
220 netif_carrier_on(vdev->ndev);
221 vxge_wake_all_tx_queue(vdev);
222
223 vxge_debug_entryexit(VXGE_TRACE,
224 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
225}
226
227/*
228 * vxge_callback_link_down
229 *
230 * This function is called during interrupt context to notify link down state
231 * change.
232 */
233void
234vxge_callback_link_down(struct __vxge_hw_device *hldev)
235{
236 struct net_device *dev = hldev->ndev;
237 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
238
239 vxge_debug_entryexit(VXGE_TRACE,
240 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
241 printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name);
242
243 vdev->stats.link_down++;
244 netif_carrier_off(vdev->ndev);
245 vxge_stop_all_tx_queue(vdev);
246
247 vxge_debug_entryexit(VXGE_TRACE,
248 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
249}
250
251/*
252 * vxge_rx_alloc
253 *
254 * Allocate SKB.
255 */
256static struct sk_buff*
257vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
258{
259 struct net_device *dev;
260 struct sk_buff *skb;
261 struct vxge_rx_priv *rx_priv;
262
263 dev = ring->ndev;
264 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
265 ring->ndev->name, __func__, __LINE__);
266
267 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
268
269 /* try to allocate skb first. this one may fail */
270 skb = netdev_alloc_skb(dev, skb_size +
271 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
272 if (skb == NULL) {
273 vxge_debug_mem(VXGE_ERR,
274 "%s: out of memory to allocate SKB", dev->name);
275 ring->stats.skb_alloc_fail++;
276 return NULL;
277 }
278
279 vxge_debug_mem(VXGE_TRACE,
280 "%s: %s:%d Skb : 0x%p", ring->ndev->name,
281 __func__, __LINE__, skb);
282
283 skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
284
285 rx_priv->skb = skb;
286 rx_priv->data_size = skb_size;
287 vxge_debug_entryexit(VXGE_TRACE,
288 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
289
290 return skb;
291}
292
293/*
294 * vxge_rx_map
295 */
296static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
297{
298 struct vxge_rx_priv *rx_priv;
299 dma_addr_t dma_addr;
300
301 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
302 ring->ndev->name, __func__, __LINE__);
303 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
304
305 dma_addr = pci_map_single(ring->pdev, rx_priv->skb->data,
306 rx_priv->data_size, PCI_DMA_FROMDEVICE);
307
308 if (dma_addr == 0) {
309 ring->stats.pci_map_fail++;
310 return -EIO;
311 }
312 vxge_debug_mem(VXGE_TRACE,
313 "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
314 ring->ndev->name, __func__, __LINE__,
315 (unsigned long long)dma_addr);
316 vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size);
317
318 rx_priv->data_dma = dma_addr;
319 vxge_debug_entryexit(VXGE_TRACE,
320 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
321
322 return 0;
323}
324
325/*
326 * vxge_rx_initial_replenish
327 * Allocation of RxD as an initial replenish procedure.
328 */
329static enum vxge_hw_status
330vxge_rx_initial_replenish(void *dtrh, void *userdata)
331{
332 struct vxge_ring *ring = (struct vxge_ring *)userdata;
333 struct vxge_rx_priv *rx_priv;
334
335 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
336 ring->ndev->name, __func__, __LINE__);
337 if (vxge_rx_alloc(dtrh, ring,
338 VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL)
339 return VXGE_HW_FAIL;
340
341 if (vxge_rx_map(dtrh, ring)) {
342 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
343 dev_kfree_skb(rx_priv->skb);
344
345 return VXGE_HW_FAIL;
346 }
347 vxge_debug_entryexit(VXGE_TRACE,
348 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
349
350 return VXGE_HW_OK;
351}
352
353static inline void
354vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
355 int pkt_length, struct vxge_hw_ring_rxd_info *ext_info)
356{
357
358 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
359 ring->ndev->name, __func__, __LINE__);
360 skb_record_rx_queue(skb, ring->driver_id);
361 skb->protocol = eth_type_trans(skb, ring->ndev);
362
363 ring->stats.rx_frms++;
364 ring->stats.rx_bytes += pkt_length;
365
366 if (skb->pkt_type == PACKET_MULTICAST)
367 ring->stats.rx_mcast++;
368
369 vxge_debug_rx(VXGE_TRACE,
370 "%s: %s:%d skb protocol = %d",
371 ring->ndev->name, __func__, __LINE__, skb->protocol);
372
373 if (ring->gro_enable) {
374 if (ring->vlgrp && ext_info->vlan &&
375 (ring->vlan_tag_strip ==
376 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
a5d165b5 377 vlan_gro_receive(ring->napi_p, ring->vlgrp,
703da5a1
RV
378 ext_info->vlan, skb);
379 else
a5d165b5 380 napi_gro_receive(ring->napi_p, skb);
703da5a1
RV
381 } else {
382 if (ring->vlgrp && vlan &&
383 (ring->vlan_tag_strip ==
384 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
385 vlan_hwaccel_receive_skb(skb, ring->vlgrp, vlan);
386 else
387 netif_receive_skb(skb);
388 }
389 vxge_debug_entryexit(VXGE_TRACE,
390 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
391}
392
393static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
394 struct vxge_rx_priv *rx_priv)
395{
396 pci_dma_sync_single_for_device(ring->pdev,
397 rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE);
398
399 vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
400 vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
401}
402
403static inline void vxge_post(int *dtr_cnt, void **first_dtr,
404 void *post_dtr, struct __vxge_hw_ring *ringh)
405{
406 int dtr_count = *dtr_cnt;
407 if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) {
408 if (*first_dtr)
409 vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr);
410 *first_dtr = post_dtr;
411 } else
412 vxge_hw_ring_rxd_post_post(ringh, post_dtr);
413 dtr_count++;
414 *dtr_cnt = dtr_count;
415}
416
417/*
418 * vxge_rx_1b_compl
419 *
420 * If the interrupt is because of a received frame or if the receive ring
421 * contains fresh as yet un-processed frames, this function is called.
422 */
423enum vxge_hw_status
424vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
425 u8 t_code, void *userdata)
426{
427 struct vxge_ring *ring = (struct vxge_ring *)userdata;
428 struct net_device *dev = ring->ndev;
429 unsigned int dma_sizes;
430 void *first_dtr = NULL;
431 int dtr_cnt = 0;
432 int data_size;
433 dma_addr_t data_dma;
434 int pkt_length;
435 struct sk_buff *skb;
436 struct vxge_rx_priv *rx_priv;
437 struct vxge_hw_ring_rxd_info ext_info;
438 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
439 ring->ndev->name, __func__, __LINE__);
440 ring->pkts_processed = 0;
441
442 vxge_hw_ring_replenish(ringh, 0);
443
444 do {
445 rx_priv = vxge_hw_ring_rxd_private_get(dtr);
446 skb = rx_priv->skb;
447 data_size = rx_priv->data_size;
448 data_dma = rx_priv->data_dma;
449
450 vxge_debug_rx(VXGE_TRACE,
451 "%s: %s:%d skb = 0x%p",
452 ring->ndev->name, __func__, __LINE__, skb);
453
454 vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes);
455 pkt_length = dma_sizes;
456
457 vxge_debug_rx(VXGE_TRACE,
458 "%s: %s:%d Packet Length = %d",
459 ring->ndev->name, __func__, __LINE__, pkt_length);
460
461 vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info);
462
463 /* check skb validity */
464 vxge_assert(skb);
465
466 prefetch((char *)skb + L1_CACHE_BYTES);
467 if (unlikely(t_code)) {
468
469 if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
470 VXGE_HW_OK) {
471
472 ring->stats.rx_errors++;
473 vxge_debug_rx(VXGE_TRACE,
474 "%s: %s :%d Rx T_code is %d",
475 ring->ndev->name, __func__,
476 __LINE__, t_code);
477
478 /* If the t_code is not supported and if the
479 * t_code is other than 0x5 (unparseable packet
480 * such as unknown UPV6 header), Drop it !!!
481 */
482 vxge_re_pre_post(dtr, ring, rx_priv);
483
484 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
485 ring->stats.rx_dropped++;
486 continue;
487 }
488 }
489
490 if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
491
492 if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
493
494 if (!vxge_rx_map(dtr, ring)) {
495 skb_put(skb, pkt_length);
496
497 pci_unmap_single(ring->pdev, data_dma,
498 data_size, PCI_DMA_FROMDEVICE);
499
500 vxge_hw_ring_rxd_pre_post(ringh, dtr);
501 vxge_post(&dtr_cnt, &first_dtr, dtr,
502 ringh);
503 } else {
504 dev_kfree_skb(rx_priv->skb);
505 rx_priv->skb = skb;
506 rx_priv->data_size = data_size;
507 vxge_re_pre_post(dtr, ring, rx_priv);
508
509 vxge_post(&dtr_cnt, &first_dtr, dtr,
510 ringh);
511 ring->stats.rx_dropped++;
512 break;
513 }
514 } else {
515 vxge_re_pre_post(dtr, ring, rx_priv);
516
517 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
518 ring->stats.rx_dropped++;
519 break;
520 }
521 } else {
522 struct sk_buff *skb_up;
523
524 skb_up = netdev_alloc_skb(dev, pkt_length +
525 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
526 if (skb_up != NULL) {
527 skb_reserve(skb_up,
528 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
529
530 pci_dma_sync_single_for_cpu(ring->pdev,
531 data_dma, data_size,
532 PCI_DMA_FROMDEVICE);
533
534 vxge_debug_mem(VXGE_TRACE,
535 "%s: %s:%d skb_up = %p",
536 ring->ndev->name, __func__,
537 __LINE__, skb);
538 memcpy(skb_up->data, skb->data, pkt_length);
539
540 vxge_re_pre_post(dtr, ring, rx_priv);
541
542 vxge_post(&dtr_cnt, &first_dtr, dtr,
543 ringh);
544 /* will netif_rx small SKB instead */
545 skb = skb_up;
546 skb_put(skb, pkt_length);
547 } else {
548 vxge_re_pre_post(dtr, ring, rx_priv);
549
550 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
551 vxge_debug_rx(VXGE_ERR,
552 "%s: vxge_rx_1b_compl: out of "
553 "memory", dev->name);
554 ring->stats.skb_alloc_fail++;
555 break;
556 }
557 }
558
559 if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
560 !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
561 ring->rx_csum && /* Offload Rx side CSUM */
562 ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
563 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
564 skb->ip_summed = CHECKSUM_UNNECESSARY;
565 else
566 skb->ip_summed = CHECKSUM_NONE;
567
568 vxge_rx_complete(ring, skb, ext_info.vlan,
569 pkt_length, &ext_info);
570
571 ring->budget--;
572 ring->pkts_processed++;
573 if (!ring->budget)
574 break;
575
576 } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr,
577 &t_code) == VXGE_HW_OK);
578
579 if (first_dtr)
580 vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
581
582 dev->last_rx = jiffies;
583
584 vxge_debug_entryexit(VXGE_TRACE,
585 "%s:%d Exiting...",
586 __func__, __LINE__);
587 return VXGE_HW_OK;
588}
589
590/*
591 * vxge_xmit_compl
592 *
593 * If an interrupt was raised to indicate DMA complete of the Tx packet,
594 * this function is called. It identifies the last TxD whose buffer was
595 * freed and frees all skbs whose data have already DMA'ed into the NICs
596 * internal memory.
597 */
598enum vxge_hw_status
599vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
600 enum vxge_hw_fifo_tcode t_code, void *userdata,
601 void **skb_ptr)
602{
603 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
604 struct sk_buff *skb, *head = NULL;
605 struct sk_buff **temp;
606 int pkt_cnt = 0;
607
608 vxge_debug_entryexit(VXGE_TRACE,
609 "%s:%d Entered....", __func__, __LINE__);
610
611 do {
612 int frg_cnt;
613 skb_frag_t *frag;
614 int i = 0, j;
615 struct vxge_tx_priv *txd_priv =
616 vxge_hw_fifo_txdl_private_get(dtr);
617
618 skb = txd_priv->skb;
619 frg_cnt = skb_shinfo(skb)->nr_frags;
620 frag = &skb_shinfo(skb)->frags[0];
621
622 vxge_debug_tx(VXGE_TRACE,
623 "%s: %s:%d fifo_hw = %p dtr = %p "
624 "tcode = 0x%x", fifo->ndev->name, __func__,
625 __LINE__, fifo_hw, dtr, t_code);
626 /* check skb validity */
627 vxge_assert(skb);
628 vxge_debug_tx(VXGE_TRACE,
629 "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
630 fifo->ndev->name, __func__, __LINE__,
631 skb, txd_priv, frg_cnt);
632 if (unlikely(t_code)) {
633 fifo->stats.tx_errors++;
634 vxge_debug_tx(VXGE_ERR,
635 "%s: tx: dtr %p completed due to "
636 "error t_code %01x", fifo->ndev->name,
637 dtr, t_code);
638 vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code);
639 }
640
641 /* for unfragmented skb */
642 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
643 skb_headlen(skb), PCI_DMA_TODEVICE);
644
645 for (j = 0; j < frg_cnt; j++) {
646 pci_unmap_page(fifo->pdev,
647 txd_priv->dma_buffers[i++],
648 frag->size, PCI_DMA_TODEVICE);
649 frag += 1;
650 }
651
652 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
653
654 /* Updating the statistics block */
655 fifo->stats.tx_frms++;
656 fifo->stats.tx_bytes += skb->len;
657
658 temp = (struct sk_buff **)&skb->cb;
659 *temp = head;
660 head = skb;
661
662 pkt_cnt++;
663 if (pkt_cnt > fifo->indicate_max_pkts)
664 break;
665
666 } while (vxge_hw_fifo_txdl_next_completed(fifo_hw,
667 &dtr, &t_code) == VXGE_HW_OK);
668
669 vxge_wake_tx_queue(fifo, skb);
670
671 if (skb_ptr)
672 *skb_ptr = (void *) head;
673
674 vxge_debug_entryexit(VXGE_TRACE,
675 "%s: %s:%d Exiting...",
676 fifo->ndev->name, __func__, __LINE__);
677 return VXGE_HW_OK;
678}
679
28679751 680/* select a vpath to transmit the packet */
703da5a1
RV
681static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb,
682 int *do_lock)
683{
684 u16 queue_len, counter = 0;
685 if (skb->protocol == htons(ETH_P_IP)) {
686 struct iphdr *ip;
687 struct tcphdr *th;
688
689 ip = ip_hdr(skb);
690
691 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
692 th = (struct tcphdr *)(((unsigned char *)ip) +
693 ip->ihl*4);
694
695 queue_len = vdev->no_of_vpath;
696 counter = (ntohs(th->source) +
697 ntohs(th->dest)) &
698 vdev->vpath_selector[queue_len - 1];
699 if (counter >= queue_len)
700 counter = queue_len - 1;
701
702 if (ip->protocol == IPPROTO_UDP) {
703#ifdef NETIF_F_LLTX
704 *do_lock = 0;
705#endif
706 }
707 }
708 }
709 return counter;
710}
711
712static enum vxge_hw_status vxge_search_mac_addr_in_list(
713 struct vxge_vpath *vpath, u64 del_mac)
714{
715 struct list_head *entry, *next;
716 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
717 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac)
718 return TRUE;
719 }
720 return FALSE;
721}
722
723static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
724{
725 struct macInfo mac_info;
726 u8 *mac_address = NULL;
727 u64 mac_addr = 0, vpath_vector = 0;
728 int vpath_idx = 0;
729 enum vxge_hw_status status = VXGE_HW_OK;
730 struct vxge_vpath *vpath = NULL;
731 struct __vxge_hw_device *hldev;
732
733 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
734
735 mac_address = (u8 *)&mac_addr;
736 memcpy(mac_address, mac_header, ETH_ALEN);
737
738 /* Is this mac address already in the list? */
739 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
740 vpath = &vdev->vpaths[vpath_idx];
741 if (vxge_search_mac_addr_in_list(vpath, mac_addr))
742 return vpath_idx;
743 }
744
745 memset(&mac_info, 0, sizeof(struct macInfo));
746 memcpy(mac_info.macaddr, mac_header, ETH_ALEN);
747
748 /* Any vpath has room to add mac address to its da table? */
749 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
750 vpath = &vdev->vpaths[vpath_idx];
751 if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) {
752 /* Add this mac address to this vpath */
753 mac_info.vpath_no = vpath_idx;
754 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
755 status = vxge_add_mac_addr(vdev, &mac_info);
756 if (status != VXGE_HW_OK)
757 return -EPERM;
758 return vpath_idx;
759 }
760 }
761
762 mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST;
763 vpath_idx = 0;
764 mac_info.vpath_no = vpath_idx;
765 /* Is the first vpath already selected as catch-basin ? */
766 vpath = &vdev->vpaths[vpath_idx];
767 if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) {
768 /* Add this mac address to this vpath */
769 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
770 return -EPERM;
771 return vpath_idx;
772 }
773
774 /* Select first vpath as catch-basin */
775 vpath_vector = vxge_mBIT(vpath->device_id);
776 status = vxge_hw_mgmt_reg_write(vpath->vdev->devh,
777 vxge_hw_mgmt_reg_type_mrpcim,
778 0,
779 (ulong)offsetof(
780 struct vxge_hw_mrpcim_reg,
781 rts_mgr_cbasin_cfg),
782 vpath_vector);
783 if (status != VXGE_HW_OK) {
784 vxge_debug_tx(VXGE_ERR,
785 "%s: Unable to set the vpath-%d in catch-basin mode",
786 VXGE_DRIVER_NAME, vpath->device_id);
787 return -EPERM;
788 }
789
790 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
791 return -EPERM;
792
793 return vpath_idx;
794}
795
796/**
797 * vxge_xmit
798 * @skb : the socket buffer containing the Tx data.
799 * @dev : device pointer.
800 *
801 * This function is the Tx entry point of the driver. Neterion NIC supports
802 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
803 * NOTE: when device cant queue the pkt, just the trans_start variable will
804 * not be upadted.
805*/
806static int
807vxge_xmit(struct sk_buff *skb, struct net_device *dev)
808{
809 struct vxge_fifo *fifo = NULL;
810 void *dtr_priv;
811 void *dtr = NULL;
812 struct vxgedev *vdev = NULL;
813 enum vxge_hw_status status;
814 int frg_cnt, first_frg_len;
815 skb_frag_t *frag;
816 int i = 0, j = 0, avail;
817 u64 dma_pointer;
818 struct vxge_tx_priv *txdl_priv = NULL;
819 struct __vxge_hw_fifo *fifo_hw;
703da5a1
RV
820 int offload_type;
821 unsigned long flags = 0;
822 int vpath_no = 0;
823 int do_spin_tx_lock = 1;
824
825 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
826 dev->name, __func__, __LINE__);
827
828 /* A buffer with no data will be dropped */
829 if (unlikely(skb->len <= 0)) {
830 vxge_debug_tx(VXGE_ERR,
831 "%s: Buffer has no data..", dev->name);
832 dev_kfree_skb(skb);
833 return NETDEV_TX_OK;
834 }
835
836 vdev = (struct vxgedev *)netdev_priv(dev);
837
838 if (unlikely(!is_vxge_card_up(vdev))) {
839 vxge_debug_tx(VXGE_ERR,
840 "%s: vdev not initialized", dev->name);
841 dev_kfree_skb(skb);
842 return NETDEV_TX_OK;
843 }
844
845 if (vdev->config.addr_learn_en) {
846 vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN);
847 if (vpath_no == -EPERM) {
848 vxge_debug_tx(VXGE_ERR,
849 "%s: Failed to store the mac address",
850 dev->name);
851 dev_kfree_skb(skb);
852 return NETDEV_TX_OK;
853 }
854 }
855
856 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
857 vpath_no = skb_get_queue_mapping(skb);
858 else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
859 vpath_no = vxge_get_vpath_no(vdev, skb, &do_spin_tx_lock);
860
861 vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
862
863 if (vpath_no >= vdev->no_of_vpath)
864 vpath_no = 0;
865
866 fifo = &vdev->vpaths[vpath_no].fifo;
867 fifo_hw = fifo->handle;
868
869 if (do_spin_tx_lock)
870 spin_lock_irqsave(&fifo->tx_lock, flags);
871 else {
872 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
873 return NETDEV_TX_LOCKED;
874 }
875
876 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) {
877 if (netif_subqueue_stopped(dev, skb)) {
878 spin_unlock_irqrestore(&fifo->tx_lock, flags);
879 return NETDEV_TX_BUSY;
880 }
881 } else if (unlikely(fifo->queue_state == VPATH_QUEUE_STOP)) {
882 if (netif_queue_stopped(dev)) {
883 spin_unlock_irqrestore(&fifo->tx_lock, flags);
884 return NETDEV_TX_BUSY;
885 }
886 }
887 avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
888 if (avail == 0) {
889 vxge_debug_tx(VXGE_ERR,
890 "%s: No free TXDs available", dev->name);
891 fifo->stats.txd_not_free++;
892 vxge_stop_tx_queue(fifo);
893 goto _exit2;
894 }
895
896 status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
897 if (unlikely(status != VXGE_HW_OK)) {
898 vxge_debug_tx(VXGE_ERR,
899 "%s: Out of descriptors .", dev->name);
900 fifo->stats.txd_out_of_desc++;
901 vxge_stop_tx_queue(fifo);
902 goto _exit2;
903 }
904
905 vxge_debug_tx(VXGE_TRACE,
906 "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
907 dev->name, __func__, __LINE__,
908 fifo_hw, dtr, dtr_priv);
909
910 if (vdev->vlgrp && vlan_tx_tag_present(skb)) {
911 u16 vlan_tag = vlan_tx_tag_get(skb);
912 vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
913 }
914
915 first_frg_len = skb_headlen(skb);
916
917 dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len,
918 PCI_DMA_TODEVICE);
919
920 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
921 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
922 vxge_stop_tx_queue(fifo);
923 fifo->stats.pci_map_fail++;
924 goto _exit2;
925 }
926
927 txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
928 txdl_priv->skb = skb;
929 txdl_priv->dma_buffers[j] = dma_pointer;
930
931 frg_cnt = skb_shinfo(skb)->nr_frags;
932 vxge_debug_tx(VXGE_TRACE,
933 "%s: %s:%d skb = %p txdl_priv = %p "
934 "frag_cnt = %d dma_pointer = 0x%llx", dev->name,
935 __func__, __LINE__, skb, txdl_priv,
936 frg_cnt, (unsigned long long)dma_pointer);
937
938 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
939 first_frg_len);
940
941 frag = &skb_shinfo(skb)->frags[0];
942 for (i = 0; i < frg_cnt; i++) {
943 /* ignore 0 length fragment */
944 if (!frag->size)
945 continue;
946
947 dma_pointer =
948 (u64)pci_map_page(fifo->pdev, frag->page,
949 frag->page_offset, frag->size,
950 PCI_DMA_TODEVICE);
951
952 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer)))
953 goto _exit0;
954 vxge_debug_tx(VXGE_TRACE,
955 "%s: %s:%d frag = %d dma_pointer = 0x%llx",
956 dev->name, __func__, __LINE__, i,
957 (unsigned long long)dma_pointer);
958
959 txdl_priv->dma_buffers[j] = dma_pointer;
960 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
961 frag->size);
962 frag += 1;
963 }
964
965 offload_type = vxge_offload_type(skb);
966
967 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
968
969 int mss = vxge_tcp_mss(skb);
970 if (mss) {
703da5a1
RV
971 vxge_debug_tx(VXGE_TRACE,
972 "%s: %s:%d mss = %d",
973 dev->name, __func__, __LINE__, mss);
974 vxge_hw_fifo_txdl_mss_set(dtr, mss);
975 } else {
976 vxge_assert(skb->len <=
977 dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE);
978 vxge_assert(0);
979 goto _exit1;
980 }
981 }
982
983 if (skb->ip_summed == CHECKSUM_PARTIAL)
984 vxge_hw_fifo_txdl_cksum_set_bits(dtr,
985 VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN |
986 VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN |
987 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
988
989 vxge_hw_fifo_txdl_post(fifo_hw, dtr);
28679751
ED
990#ifdef NETIF_F_LLTX
991 dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
992#endif
703da5a1
RV
993 spin_unlock_irqrestore(&fifo->tx_lock, flags);
994
995 VXGE_COMPLETE_VPATH_TX(fifo);
996 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
997 dev->name, __func__, __LINE__);
6ed10654 998 return NETDEV_TX_OK;
703da5a1
RV
999
1000_exit0:
1001 vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
1002
1003_exit1:
1004 j = 0;
1005 frag = &skb_shinfo(skb)->frags[0];
1006
1007 pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++],
1008 skb_headlen(skb), PCI_DMA_TODEVICE);
1009
1010 for (; j < i; j++) {
1011 pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j],
1012 frag->size, PCI_DMA_TODEVICE);
1013 frag += 1;
1014 }
1015
1016 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
1017_exit2:
1018 dev_kfree_skb(skb);
1019 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1020 VXGE_COMPLETE_VPATH_TX(fifo);
1021
6ed10654 1022 return NETDEV_TX_OK;
703da5a1
RV
1023}
1024
1025/*
1026 * vxge_rx_term
1027 *
1028 * Function will be called by hw function to abort all outstanding receive
1029 * descriptors.
1030 */
1031static void
1032vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
1033{
1034 struct vxge_ring *ring = (struct vxge_ring *)userdata;
1035 struct vxge_rx_priv *rx_priv =
1036 vxge_hw_ring_rxd_private_get(dtrh);
1037
1038 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
1039 ring->ndev->name, __func__, __LINE__);
1040 if (state != VXGE_HW_RXD_STATE_POSTED)
1041 return;
1042
1043 pci_unmap_single(ring->pdev, rx_priv->data_dma,
1044 rx_priv->data_size, PCI_DMA_FROMDEVICE);
1045
1046 dev_kfree_skb(rx_priv->skb);
1047
1048 vxge_debug_entryexit(VXGE_TRACE,
1049 "%s: %s:%d Exiting...",
1050 ring->ndev->name, __func__, __LINE__);
1051}
1052
1053/*
1054 * vxge_tx_term
1055 *
1056 * Function will be called to abort all outstanding tx descriptors
1057 */
1058static void
1059vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
1060{
1061 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
1062 skb_frag_t *frag;
1063 int i = 0, j, frg_cnt;
1064 struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh);
1065 struct sk_buff *skb = txd_priv->skb;
1066
1067 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1068
1069 if (state != VXGE_HW_TXDL_STATE_POSTED)
1070 return;
1071
1072 /* check skb validity */
1073 vxge_assert(skb);
1074 frg_cnt = skb_shinfo(skb)->nr_frags;
1075 frag = &skb_shinfo(skb)->frags[0];
1076
1077 /* for unfragmented skb */
1078 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
1079 skb_headlen(skb), PCI_DMA_TODEVICE);
1080
1081 for (j = 0; j < frg_cnt; j++) {
1082 pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++],
1083 frag->size, PCI_DMA_TODEVICE);
1084 frag += 1;
1085 }
1086
1087 dev_kfree_skb(skb);
1088
1089 vxge_debug_entryexit(VXGE_TRACE,
1090 "%s:%d Exiting...", __func__, __LINE__);
1091}
1092
1093/**
1094 * vxge_set_multicast
1095 * @dev: pointer to the device structure
1096 *
1097 * Entry point for multicast address enable/disable
1098 * This function is a driver entry point which gets called by the kernel
1099 * whenever multicast addresses must be enabled/disabled. This also gets
1100 * called to set/reset promiscuous mode. Depending on the deivce flag, we
1101 * determine, if multicast address must be enabled or if promiscuous mode
1102 * is to be disabled etc.
1103 */
1104static void vxge_set_multicast(struct net_device *dev)
1105{
1106 struct dev_mc_list *mclist;
1107 struct vxgedev *vdev;
1108 int i, mcast_cnt = 0;
1109 struct __vxge_hw_device *hldev;
1110 enum vxge_hw_status status = VXGE_HW_OK;
1111 struct macInfo mac_info;
1112 int vpath_idx = 0;
1113 struct vxge_mac_addrs *mac_entry;
1114 struct list_head *list_head;
1115 struct list_head *entry, *next;
1116 u8 *mac_address = NULL;
1117
1118 vxge_debug_entryexit(VXGE_TRACE,
1119 "%s:%d", __func__, __LINE__);
1120
1121 vdev = (struct vxgedev *)netdev_priv(dev);
1122 hldev = (struct __vxge_hw_device *)vdev->devh;
1123
1124 if (unlikely(!is_vxge_card_up(vdev)))
1125 return;
1126
1127 if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
1128 for (i = 0; i < vdev->no_of_vpath; i++) {
1129 vxge_assert(vdev->vpaths[i].is_open);
1130 status = vxge_hw_vpath_mcast_enable(
1131 vdev->vpaths[i].handle);
1132 vdev->all_multi_flg = 1;
1133 }
1134 } else if ((dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
1135 for (i = 0; i < vdev->no_of_vpath; i++) {
1136 vxge_assert(vdev->vpaths[i].is_open);
1137 status = vxge_hw_vpath_mcast_disable(
1138 vdev->vpaths[i].handle);
1139 vdev->all_multi_flg = 1;
1140 }
1141 }
1142
1143 if (status != VXGE_HW_OK)
1144 vxge_debug_init(VXGE_ERR,
1145 "failed to %s multicast, status %d",
1146 dev->flags & IFF_ALLMULTI ?
1147 "enable" : "disable", status);
1148
1149 if (!vdev->config.addr_learn_en) {
1150 if (dev->flags & IFF_PROMISC) {
1151 for (i = 0; i < vdev->no_of_vpath; i++) {
1152 vxge_assert(vdev->vpaths[i].is_open);
1153 status = vxge_hw_vpath_promisc_enable(
1154 vdev->vpaths[i].handle);
1155 }
1156 } else {
1157 for (i = 0; i < vdev->no_of_vpath; i++) {
1158 vxge_assert(vdev->vpaths[i].is_open);
1159 status = vxge_hw_vpath_promisc_disable(
1160 vdev->vpaths[i].handle);
1161 }
1162 }
1163 }
1164
1165 memset(&mac_info, 0, sizeof(struct macInfo));
1166 /* Update individual M_CAST address list */
1167 if ((!vdev->all_multi_flg) && dev->mc_count) {
1168
1169 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1170 list_head = &vdev->vpaths[0].mac_addr_list;
1171 if ((dev->mc_count +
1172 (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
1173 vdev->vpaths[0].max_mac_addr_cnt)
1174 goto _set_all_mcast;
1175
1176 /* Delete previous MC's */
1177 for (i = 0; i < mcast_cnt; i++) {
1178 if (!list_empty(list_head))
1179 mac_entry = (struct vxge_mac_addrs *)
1180 list_first_entry(list_head,
1181 struct vxge_mac_addrs,
1182 item);
1183
1184 list_for_each_safe(entry, next, list_head) {
1185
1186 mac_entry = (struct vxge_mac_addrs *) entry;
1187 /* Copy the mac address to delete */
1188 mac_address = (u8 *)&mac_entry->macaddr;
1189 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1190
1191 /* Is this a multicast address */
1192 if (0x01 & mac_info.macaddr[0]) {
1193 for (vpath_idx = 0; vpath_idx <
1194 vdev->no_of_vpath;
1195 vpath_idx++) {
1196 mac_info.vpath_no = vpath_idx;
1197 status = vxge_del_mac_addr(
1198 vdev,
1199 &mac_info);
1200 }
1201 }
1202 }
1203 }
1204
1205 /* Add new ones */
1206 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
1207 i++, mclist = mclist->next) {
1208
1209 memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN);
1210 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1211 vpath_idx++) {
1212 mac_info.vpath_no = vpath_idx;
1213 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1214 status = vxge_add_mac_addr(vdev, &mac_info);
1215 if (status != VXGE_HW_OK) {
1216 vxge_debug_init(VXGE_ERR,
1217 "%s:%d Setting individual"
1218 "multicast address failed",
1219 __func__, __LINE__);
1220 goto _set_all_mcast;
1221 }
1222 }
1223 }
1224
1225 return;
1226_set_all_mcast:
1227 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1228 /* Delete previous MC's */
1229 for (i = 0; i < mcast_cnt; i++) {
1230
1231 list_for_each_safe(entry, next, list_head) {
1232
1233 mac_entry = (struct vxge_mac_addrs *) entry;
1234 /* Copy the mac address to delete */
1235 mac_address = (u8 *)&mac_entry->macaddr;
1236 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1237
1238 /* Is this a multicast address */
1239 if (0x01 & mac_info.macaddr[0])
1240 break;
1241 }
1242
1243 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1244 vpath_idx++) {
1245 mac_info.vpath_no = vpath_idx;
1246 status = vxge_del_mac_addr(vdev, &mac_info);
1247 }
1248 }
1249
1250 /* Enable all multicast */
1251 for (i = 0; i < vdev->no_of_vpath; i++) {
1252 vxge_assert(vdev->vpaths[i].is_open);
1253 status = vxge_hw_vpath_mcast_enable(
1254 vdev->vpaths[i].handle);
1255 if (status != VXGE_HW_OK) {
1256 vxge_debug_init(VXGE_ERR,
1257 "%s:%d Enabling all multicasts failed",
1258 __func__, __LINE__);
1259 }
1260 vdev->all_multi_flg = 1;
1261 }
1262 dev->flags |= IFF_ALLMULTI;
1263 }
1264
1265 vxge_debug_entryexit(VXGE_TRACE,
1266 "%s:%d Exiting...", __func__, __LINE__);
1267}
1268
1269/**
1270 * vxge_set_mac_addr
1271 * @dev: pointer to the device structure
1272 *
1273 * Update entry "0" (default MAC addr)
1274 */
1275static int vxge_set_mac_addr(struct net_device *dev, void *p)
1276{
1277 struct sockaddr *addr = p;
1278 struct vxgedev *vdev;
1279 struct __vxge_hw_device *hldev;
1280 enum vxge_hw_status status = VXGE_HW_OK;
1281 struct macInfo mac_info_new, mac_info_old;
1282 int vpath_idx = 0;
1283
1284 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1285
1286 vdev = (struct vxgedev *)netdev_priv(dev);
1287 hldev = vdev->devh;
1288
1289 if (!is_valid_ether_addr(addr->sa_data))
1290 return -EINVAL;
1291
1292 memset(&mac_info_new, 0, sizeof(struct macInfo));
1293 memset(&mac_info_old, 0, sizeof(struct macInfo));
1294
1295 vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...",
1296 __func__, __LINE__);
1297
1298 /* Get the old address */
1299 memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len);
1300
1301 /* Copy the new address */
1302 memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len);
1303
1304 /* First delete the old mac address from all the vpaths
1305 as we can't specify the index while adding new mac address */
1306 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1307 struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx];
1308 if (!vpath->is_open) {
1309 /* This can happen when this interface is added/removed
1310 to the bonding interface. Delete this station address
1311 from the linked list */
1312 vxge_mac_list_del(vpath, &mac_info_old);
1313
1314 /* Add this new address to the linked list
1315 for later restoring */
1316 vxge_mac_list_add(vpath, &mac_info_new);
1317
1318 continue;
1319 }
1320 /* Delete the station address */
1321 mac_info_old.vpath_no = vpath_idx;
1322 status = vxge_del_mac_addr(vdev, &mac_info_old);
1323 }
1324
1325 if (unlikely(!is_vxge_card_up(vdev))) {
1326 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1327 return VXGE_HW_OK;
1328 }
1329
1330 /* Set this mac address to all the vpaths */
1331 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1332 mac_info_new.vpath_no = vpath_idx;
1333 mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1334 status = vxge_add_mac_addr(vdev, &mac_info_new);
1335 if (status != VXGE_HW_OK)
1336 return -EINVAL;
1337 }
1338
1339 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1340
1341 return status;
1342}
1343
1344/*
1345 * vxge_vpath_intr_enable
1346 * @vdev: pointer to vdev
1347 * @vp_id: vpath for which to enable the interrupts
1348 *
1349 * Enables the interrupts for the vpath
1350*/
1351void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1352{
1353 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1354 int msix_id, alarm_msix_id;
1355 int tim_msix_id[4] = {[0 ...3] = 0};
1356
1357 vxge_hw_vpath_intr_enable(vpath->handle);
1358
1359 if (vdev->config.intr_type == INTA)
1360 vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
1361 else {
1362 msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1363 alarm_msix_id =
1364 VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
1365
1366 tim_msix_id[0] = msix_id;
1367 tim_msix_id[1] = msix_id + 1;
1368 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
1369 alarm_msix_id);
1370
1371 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1372 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
1373
1374 /* enable the alarm vector */
1375 vxge_hw_vpath_msix_unmask(vpath->handle, alarm_msix_id);
1376 }
1377}
1378
1379/*
1380 * vxge_vpath_intr_disable
1381 * @vdev: pointer to vdev
1382 * @vp_id: vpath for which to disable the interrupts
1383 *
1384 * Disables the interrupts for the vpath
1385*/
1386void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1387{
1388 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1389 int msix_id;
1390
1391 vxge_hw_vpath_intr_disable(vpath->handle);
1392
1393 if (vdev->config.intr_type == INTA)
1394 vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
1395 else {
1396 msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1397 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1398 vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
1399
1400 /* disable the alarm vector */
1401 msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
1402 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1403 }
1404}
1405
1406/*
1407 * vxge_reset_vpath
1408 * @vdev: pointer to vdev
1409 * @vp_id: vpath to reset
1410 *
1411 * Resets the vpath
1412*/
1413static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1414{
1415 enum vxge_hw_status status = VXGE_HW_OK;
1416 int ret = 0;
1417
1418 /* check if device is down already */
1419 if (unlikely(!is_vxge_card_up(vdev)))
1420 return 0;
1421
1422 /* is device reset already scheduled */
1423 if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1424 return 0;
1425
1426 if (vdev->vpaths[vp_id].handle) {
1427 if (vxge_hw_vpath_reset(vdev->vpaths[vp_id].handle)
1428 == VXGE_HW_OK) {
1429 if (is_vxge_card_up(vdev) &&
1430 vxge_hw_vpath_recover_from_reset(
1431 vdev->vpaths[vp_id].handle)
1432 != VXGE_HW_OK) {
1433 vxge_debug_init(VXGE_ERR,
1434 "vxge_hw_vpath_recover_from_reset"
1435 "failed for vpath:%d", vp_id);
1436 return status;
1437 }
1438 } else {
1439 vxge_debug_init(VXGE_ERR,
1440 "vxge_hw_vpath_reset failed for"
1441 "vpath:%d", vp_id);
1442 return status;
1443 }
1444 } else
1445 return VXGE_HW_FAIL;
1446
1447 vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
1448 vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
1449
1450 /* Enable all broadcast */
1451 vxge_hw_vpath_bcast_enable(vdev->vpaths[vp_id].handle);
1452
1453 /* Enable the interrupts */
1454 vxge_vpath_intr_enable(vdev, vp_id);
1455
1456 smp_wmb();
1457
1458 /* Enable the flow of traffic through the vpath */
1459 vxge_hw_vpath_enable(vdev->vpaths[vp_id].handle);
1460
1461 smp_wmb();
1462 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[vp_id].handle);
1463 vdev->vpaths[vp_id].ring.last_status = VXGE_HW_OK;
1464
1465 /* Vpath reset done */
1466 clear_bit(vp_id, &vdev->vp_reset);
1467
1468 /* Start the vpath queue */
1469 vxge_wake_tx_queue(&vdev->vpaths[vp_id].fifo, NULL);
1470
1471 return ret;
1472}
1473
1474static int do_vxge_reset(struct vxgedev *vdev, int event)
1475{
1476 enum vxge_hw_status status;
1477 int ret = 0, vp_id, i;
1478
1479 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1480
1481 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) {
1482 /* check if device is down already */
1483 if (unlikely(!is_vxge_card_up(vdev)))
1484 return 0;
1485
1486 /* is reset already scheduled */
1487 if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1488 return 0;
1489 }
1490
1491 if (event == VXGE_LL_FULL_RESET) {
1492 /* wait for all the vpath reset to complete */
1493 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1494 while (test_bit(vp_id, &vdev->vp_reset))
1495 msleep(50);
1496 }
1497
1498 /* if execution mode is set to debug, don't reset the adapter */
1499 if (unlikely(vdev->exec_mode)) {
1500 vxge_debug_init(VXGE_ERR,
1501 "%s: execution mode is debug, returning..",
1502 vdev->ndev->name);
1503 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1504 vxge_stop_all_tx_queue(vdev);
1505 return 0;
1506 }
1507 }
1508
1509 if (event == VXGE_LL_FULL_RESET) {
1510 vxge_hw_device_intr_disable(vdev->devh);
1511
1512 switch (vdev->cric_err_event) {
1513 case VXGE_HW_EVENT_UNKNOWN:
1514 vxge_stop_all_tx_queue(vdev);
1515 vxge_debug_init(VXGE_ERR,
1516 "fatal: %s: Disabling device due to"
1517 "unknown error",
1518 vdev->ndev->name);
1519 ret = -EPERM;
1520 goto out;
1521 case VXGE_HW_EVENT_RESET_START:
1522 break;
1523 case VXGE_HW_EVENT_RESET_COMPLETE:
1524 case VXGE_HW_EVENT_LINK_DOWN:
1525 case VXGE_HW_EVENT_LINK_UP:
1526 case VXGE_HW_EVENT_ALARM_CLEARED:
1527 case VXGE_HW_EVENT_ECCERR:
1528 case VXGE_HW_EVENT_MRPCIM_ECCERR:
1529 ret = -EPERM;
1530 goto out;
1531 case VXGE_HW_EVENT_FIFO_ERR:
1532 case VXGE_HW_EVENT_VPATH_ERR:
1533 break;
1534 case VXGE_HW_EVENT_CRITICAL_ERR:
1535 vxge_stop_all_tx_queue(vdev);
1536 vxge_debug_init(VXGE_ERR,
1537 "fatal: %s: Disabling device due to"
1538 "serious error",
1539 vdev->ndev->name);
1540 /* SOP or device reset required */
1541 /* This event is not currently used */
1542 ret = -EPERM;
1543 goto out;
1544 case VXGE_HW_EVENT_SERR:
1545 vxge_stop_all_tx_queue(vdev);
1546 vxge_debug_init(VXGE_ERR,
1547 "fatal: %s: Disabling device due to"
1548 "serious error",
1549 vdev->ndev->name);
1550 ret = -EPERM;
1551 goto out;
1552 case VXGE_HW_EVENT_SRPCIM_SERR:
1553 case VXGE_HW_EVENT_MRPCIM_SERR:
1554 ret = -EPERM;
1555 goto out;
1556 case VXGE_HW_EVENT_SLOT_FREEZE:
1557 vxge_stop_all_tx_queue(vdev);
1558 vxge_debug_init(VXGE_ERR,
1559 "fatal: %s: Disabling device due to"
1560 "slot freeze",
1561 vdev->ndev->name);
1562 ret = -EPERM;
1563 goto out;
1564 default:
1565 break;
1566
1567 }
1568 }
1569
1570 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET))
1571 vxge_stop_all_tx_queue(vdev);
1572
1573 if (event == VXGE_LL_FULL_RESET) {
1574 status = vxge_reset_all_vpaths(vdev);
1575 if (status != VXGE_HW_OK) {
1576 vxge_debug_init(VXGE_ERR,
1577 "fatal: %s: can not reset vpaths",
1578 vdev->ndev->name);
1579 ret = -EPERM;
1580 goto out;
1581 }
1582 }
1583
1584 if (event == VXGE_LL_COMPL_RESET) {
1585 for (i = 0; i < vdev->no_of_vpath; i++)
1586 if (vdev->vpaths[i].handle) {
1587 if (vxge_hw_vpath_recover_from_reset(
1588 vdev->vpaths[i].handle)
1589 != VXGE_HW_OK) {
1590 vxge_debug_init(VXGE_ERR,
1591 "vxge_hw_vpath_recover_"
1592 "from_reset failed for vpath: "
1593 "%d", i);
1594 ret = -EPERM;
1595 goto out;
1596 }
1597 } else {
1598 vxge_debug_init(VXGE_ERR,
1599 "vxge_hw_vpath_reset failed for "
1600 "vpath:%d", i);
1601 ret = -EPERM;
1602 goto out;
1603 }
1604 }
1605
1606 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) {
1607 /* Reprogram the DA table with populated mac addresses */
1608 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1609 vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
1610 vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
1611 }
1612
1613 /* enable vpath interrupts */
1614 for (i = 0; i < vdev->no_of_vpath; i++)
1615 vxge_vpath_intr_enable(vdev, i);
1616
1617 vxge_hw_device_intr_enable(vdev->devh);
1618
1619 smp_wmb();
1620
1621 /* Indicate card up */
1622 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1623
1624 /* Get the traffic to flow through the vpaths */
1625 for (i = 0; i < vdev->no_of_vpath; i++) {
1626 vxge_hw_vpath_enable(vdev->vpaths[i].handle);
1627 smp_wmb();
1628 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
1629 }
1630
1631 vxge_wake_all_tx_queue(vdev);
1632 }
1633
1634out:
1635 vxge_debug_entryexit(VXGE_TRACE,
1636 "%s:%d Exiting...", __func__, __LINE__);
1637
1638 /* Indicate reset done */
1639 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET))
1640 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
1641 return ret;
1642}
1643
1644/*
1645 * vxge_reset
1646 * @vdev: pointer to ll device
1647 *
1648 * driver may reset the chip on events of serr, eccerr, etc
1649 */
1650int vxge_reset(struct vxgedev *vdev)
1651{
1652 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
1653 return 0;
1654}
1655
1656/**
1657 * vxge_poll - Receive handler when Receive Polling is used.
1658 * @dev: pointer to the device structure.
1659 * @budget: Number of packets budgeted to be processed in this iteration.
1660 *
1661 * This function comes into picture only if Receive side is being handled
1662 * through polling (called NAPI in linux). It mostly does what the normal
1663 * Rx interrupt handler does in terms of descriptor and packet processing
1664 * but not in an interrupt context. Also it will process a specified number
1665 * of packets at most in one iteration. This value is passed down by the
1666 * kernel as the function argument 'budget'.
1667 */
1668static int vxge_poll_msix(struct napi_struct *napi, int budget)
1669{
1670 struct vxge_ring *ring =
1671 container_of(napi, struct vxge_ring, napi);
1672 int budget_org = budget;
1673 ring->budget = budget;
1674
1675 vxge_hw_vpath_poll_rx(ring->handle);
1676
1677 if (ring->pkts_processed < budget_org) {
1678 napi_complete(napi);
1679 /* Re enable the Rx interrupts for the vpath */
1680 vxge_hw_channel_msix_unmask(
1681 (struct __vxge_hw_channel *)ring->handle,
1682 ring->rx_vector_no);
1683 }
1684
1685 return ring->pkts_processed;
1686}
1687
1688static int vxge_poll_inta(struct napi_struct *napi, int budget)
1689{
1690 struct vxgedev *vdev = container_of(napi, struct vxgedev, napi);
1691 int pkts_processed = 0;
1692 int i;
1693 int budget_org = budget;
1694 struct vxge_ring *ring;
1695
1696 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
1697 pci_get_drvdata(vdev->pdev);
1698
1699 for (i = 0; i < vdev->no_of_vpath; i++) {
1700 ring = &vdev->vpaths[i].ring;
1701 ring->budget = budget;
1702 vxge_hw_vpath_poll_rx(ring->handle);
1703 pkts_processed += ring->pkts_processed;
1704 budget -= ring->pkts_processed;
1705 if (budget <= 0)
1706 break;
1707 }
1708
1709 VXGE_COMPLETE_ALL_TX(vdev);
1710
1711 if (pkts_processed < budget_org) {
1712 napi_complete(napi);
1713 /* Re enable the Rx interrupts for the ring */
1714 vxge_hw_device_unmask_all(hldev);
1715 vxge_hw_device_flush_io(hldev);
1716 }
1717
1718 return pkts_processed;
1719}
1720
1721#ifdef CONFIG_NET_POLL_CONTROLLER
1722/**
1723 * vxge_netpoll - netpoll event handler entry point
1724 * @dev : pointer to the device structure.
1725 * Description:
1726 * This function will be called by upper layer to check for events on the
1727 * interface in situations where interrupts are disabled. It is used for
1728 * specific in-kernel networking tasks, such as remote consoles and kernel
1729 * debugging over the network (example netdump in RedHat).
1730 */
1731static void vxge_netpoll(struct net_device *dev)
1732{
1733 struct __vxge_hw_device *hldev;
1734 struct vxgedev *vdev;
1735
1736 vdev = (struct vxgedev *)netdev_priv(dev);
1737 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
1738
1739 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1740
1741 if (pci_channel_offline(vdev->pdev))
1742 return;
1743
1744 disable_irq(dev->irq);
1745 vxge_hw_device_clear_tx_rx(hldev);
1746
1747 vxge_hw_device_clear_tx_rx(hldev);
1748 VXGE_COMPLETE_ALL_RX(vdev);
1749 VXGE_COMPLETE_ALL_TX(vdev);
1750
1751 enable_irq(dev->irq);
1752
1753 vxge_debug_entryexit(VXGE_TRACE,
1754 "%s:%d Exiting...", __func__, __LINE__);
1755 return;
1756}
1757#endif
1758
1759/* RTH configuration */
1760static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1761{
1762 enum vxge_hw_status status = VXGE_HW_OK;
1763 struct vxge_hw_rth_hash_types hash_types;
1764 u8 itable[256] = {0}; /* indirection table */
1765 u8 mtable[256] = {0}; /* CPU to vpath mapping */
1766 int index;
1767
1768 /*
1769 * Filling
1770 * - itable with bucket numbers
1771 * - mtable with bucket-to-vpath mapping
1772 */
1773 for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) {
1774 itable[index] = index;
1775 mtable[index] = index % vdev->no_of_vpath;
1776 }
1777
1778 /* Fill RTH hash types */
1779 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1780 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1781 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1782 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1783 hash_types.hash_type_tcpipv6ex_en =
1784 vdev->config.rth_hash_type_tcpipv6ex;
1785 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1786
1787 /* set indirection table, bucket-to-vpath mapping */
1788 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
1789 vdev->no_of_vpath,
1790 mtable, itable,
1791 vdev->config.rth_bkt_sz);
1792 if (status != VXGE_HW_OK) {
1793 vxge_debug_init(VXGE_ERR,
1794 "RTH indirection table configuration failed "
1795 "for vpath:%d", vdev->vpaths[0].device_id);
1796 return status;
1797 }
1798
1799 /*
1800 * Because the itable_set() method uses the active_table field
1801 * for the target virtual path the RTH config should be updated
1802 * for all VPATHs. The h/w only uses the lowest numbered VPATH
1803 * when steering frames.
1804 */
1805 for (index = 0; index < vdev->no_of_vpath; index++) {
1806 status = vxge_hw_vpath_rts_rth_set(
1807 vdev->vpaths[index].handle,
1808 vdev->config.rth_algorithm,
1809 &hash_types,
1810 vdev->config.rth_bkt_sz);
1811
1812 if (status != VXGE_HW_OK) {
1813 vxge_debug_init(VXGE_ERR,
1814 "RTH configuration failed for vpath:%d",
1815 vdev->vpaths[index].device_id);
1816 return status;
1817 }
1818 }
1819
1820 return status;
1821}
1822
1823int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
1824{
1825 struct vxge_mac_addrs *new_mac_entry;
1826 u8 *mac_address = NULL;
1827
1828 if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
1829 return TRUE;
1830
1831 new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
1832 if (!new_mac_entry) {
1833 vxge_debug_mem(VXGE_ERR,
1834 "%s: memory allocation failed",
1835 VXGE_DRIVER_NAME);
1836 return FALSE;
1837 }
1838
1839 list_add(&new_mac_entry->item, &vpath->mac_addr_list);
1840
1841 /* Copy the new mac address to the list */
1842 mac_address = (u8 *)&new_mac_entry->macaddr;
1843 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1844
1845 new_mac_entry->state = mac->state;
1846 vpath->mac_addr_cnt++;
1847
1848 /* Is this a multicast address */
1849 if (0x01 & mac->macaddr[0])
1850 vpath->mcast_addr_cnt++;
1851
1852 return TRUE;
1853}
1854
1855/* Add a mac address to DA table */
1856enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1857{
1858 enum vxge_hw_status status = VXGE_HW_OK;
1859 struct vxge_vpath *vpath;
1860 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
1861
1862 if (0x01 & mac->macaddr[0]) /* multicast address */
1863 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
1864 else
1865 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
1866
1867 vpath = &vdev->vpaths[mac->vpath_no];
1868 status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
1869 mac->macmask, duplicate_mode);
1870 if (status != VXGE_HW_OK) {
1871 vxge_debug_init(VXGE_ERR,
1872 "DA config add entry failed for vpath:%d",
1873 vpath->device_id);
1874 } else
1875 if (FALSE == vxge_mac_list_add(vpath, mac))
1876 status = -EPERM;
1877
1878 return status;
1879}
1880
1881int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1882{
1883 struct list_head *entry, *next;
1884 u64 del_mac = 0;
1885 u8 *mac_address = (u8 *) (&del_mac);
1886
1887 /* Copy the mac address to delete from the list */
1888 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1889
1890 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1891 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
1892 list_del(entry);
1893 kfree((struct vxge_mac_addrs *)entry);
1894 vpath->mac_addr_cnt--;
1895
1896 /* Is this a multicast address */
1897 if (0x01 & mac->macaddr[0])
1898 vpath->mcast_addr_cnt--;
1899 return TRUE;
1900 }
1901 }
1902
1903 return FALSE;
1904}
1905/* delete a mac address from DA table */
1906enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1907{
1908 enum vxge_hw_status status = VXGE_HW_OK;
1909 struct vxge_vpath *vpath;
1910
1911 vpath = &vdev->vpaths[mac->vpath_no];
1912 status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
1913 mac->macmask);
1914 if (status != VXGE_HW_OK) {
1915 vxge_debug_init(VXGE_ERR,
1916 "DA config delete entry failed for vpath:%d",
1917 vpath->device_id);
1918 } else
1919 vxge_mac_list_del(vpath, mac);
1920 return status;
1921}
1922
1923/* list all mac addresses from DA table */
1924enum vxge_hw_status
1925static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
1926 struct macInfo *mac)
1927{
1928 enum vxge_hw_status status = VXGE_HW_OK;
1929 unsigned char macmask[ETH_ALEN];
1930 unsigned char macaddr[ETH_ALEN];
1931
1932 status = vxge_hw_vpath_mac_addr_get(vpath->handle,
1933 macaddr, macmask);
1934 if (status != VXGE_HW_OK) {
1935 vxge_debug_init(VXGE_ERR,
1936 "DA config list entry failed for vpath:%d",
1937 vpath->device_id);
1938 return status;
1939 }
1940
1941 while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
1942
1943 status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
1944 macaddr, macmask);
1945 if (status != VXGE_HW_OK)
1946 break;
1947 }
1948
1949 return status;
1950}
1951
1952/* Store all vlan ids from the list to the vid table */
1953enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1954{
1955 enum vxge_hw_status status = VXGE_HW_OK;
1956 struct vxgedev *vdev = vpath->vdev;
1957 u16 vid;
1958
1959 if (vdev->vlgrp && vpath->is_open) {
1960
1961 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1962 if (!vlan_group_get_device(vdev->vlgrp, vid))
1963 continue;
1964 /* Add these vlan to the vid table */
1965 status = vxge_hw_vpath_vid_add(vpath->handle, vid);
1966 }
1967 }
1968
1969 return status;
1970}
1971
1972/* Store all mac addresses from the list to the DA table */
1973enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1974{
1975 enum vxge_hw_status status = VXGE_HW_OK;
1976 struct macInfo mac_info;
1977 u8 *mac_address = NULL;
1978 struct list_head *entry, *next;
1979
1980 memset(&mac_info, 0, sizeof(struct macInfo));
1981
1982 if (vpath->is_open) {
1983
1984 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1985 mac_address =
1986 (u8 *)&
1987 ((struct vxge_mac_addrs *)entry)->macaddr;
1988 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1989 ((struct vxge_mac_addrs *)entry)->state =
1990 VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1991 /* does this mac address already exist in da table? */
1992 status = vxge_search_mac_addr_in_da_table(vpath,
1993 &mac_info);
1994 if (status != VXGE_HW_OK) {
1995 /* Add this mac address to the DA table */
1996 status = vxge_hw_vpath_mac_addr_add(
1997 vpath->handle, mac_info.macaddr,
1998 mac_info.macmask,
1999 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
2000 if (status != VXGE_HW_OK) {
2001 vxge_debug_init(VXGE_ERR,
2002 "DA add entry failed for vpath:%d",
2003 vpath->device_id);
2004 ((struct vxge_mac_addrs *)entry)->state
2005 = VXGE_LL_MAC_ADDR_IN_LIST;
2006 }
2007 }
2008 }
2009 }
2010
2011 return status;
2012}
2013
2014/* reset vpaths */
2015enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
2016{
2017 int i;
2018 enum vxge_hw_status status = VXGE_HW_OK;
2019
2020 for (i = 0; i < vdev->no_of_vpath; i++)
2021 if (vdev->vpaths[i].handle) {
2022 if (vxge_hw_vpath_reset(vdev->vpaths[i].handle)
2023 == VXGE_HW_OK) {
2024 if (is_vxge_card_up(vdev) &&
2025 vxge_hw_vpath_recover_from_reset(
2026 vdev->vpaths[i].handle)
2027 != VXGE_HW_OK) {
2028 vxge_debug_init(VXGE_ERR,
2029 "vxge_hw_vpath_recover_"
2030 "from_reset failed for vpath: "
2031 "%d", i);
2032 return status;
2033 }
2034 } else {
2035 vxge_debug_init(VXGE_ERR,
2036 "vxge_hw_vpath_reset failed for "
2037 "vpath:%d", i);
2038 return status;
2039 }
2040 }
2041 return status;
2042}
2043
2044/* close vpaths */
2045void vxge_close_vpaths(struct vxgedev *vdev, int index)
2046{
2047 int i;
2048 for (i = index; i < vdev->no_of_vpath; i++) {
2049 if (vdev->vpaths[i].handle && vdev->vpaths[i].is_open) {
2050 vxge_hw_vpath_close(vdev->vpaths[i].handle);
2051 vdev->stats.vpaths_open--;
2052 }
2053 vdev->vpaths[i].is_open = 0;
2054 vdev->vpaths[i].handle = NULL;
2055 }
2056}
2057
2058/* open vpaths */
2059int vxge_open_vpaths(struct vxgedev *vdev)
2060{
2061 enum vxge_hw_status status;
2062 int i;
2063 u32 vp_id = 0;
2064 struct vxge_hw_vpath_attr attr;
2065
2066 for (i = 0; i < vdev->no_of_vpath; i++) {
2067 vxge_assert(vdev->vpaths[i].is_configured);
2068 attr.vp_id = vdev->vpaths[i].device_id;
2069 attr.fifo_attr.callback = vxge_xmit_compl;
2070 attr.fifo_attr.txdl_term = vxge_tx_term;
2071 attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
2072 attr.fifo_attr.userdata = (void *)&vdev->vpaths[i].fifo;
2073
2074 attr.ring_attr.callback = vxge_rx_1b_compl;
2075 attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
2076 attr.ring_attr.rxd_term = vxge_rx_term;
2077 attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
2078 attr.ring_attr.userdata = (void *)&vdev->vpaths[i].ring;
2079
2080 vdev->vpaths[i].ring.ndev = vdev->ndev;
2081 vdev->vpaths[i].ring.pdev = vdev->pdev;
2082 status = vxge_hw_vpath_open(vdev->devh, &attr,
2083 &(vdev->vpaths[i].handle));
2084 if (status == VXGE_HW_OK) {
2085 vdev->vpaths[i].fifo.handle =
2086 (struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
2087 vdev->vpaths[i].ring.handle =
2088 (struct __vxge_hw_ring *)attr.ring_attr.userdata;
2089 vdev->vpaths[i].fifo.tx_steering_type =
2090 vdev->config.tx_steering_type;
2091 vdev->vpaths[i].fifo.ndev = vdev->ndev;
2092 vdev->vpaths[i].fifo.pdev = vdev->pdev;
2093 vdev->vpaths[i].fifo.indicate_max_pkts =
2094 vdev->config.fifo_indicate_max_pkts;
2095 vdev->vpaths[i].ring.rx_vector_no = 0;
2096 vdev->vpaths[i].ring.rx_csum = vdev->rx_csum;
2097 vdev->vpaths[i].is_open = 1;
2098 vdev->vp_handles[i] = vdev->vpaths[i].handle;
2099 vdev->vpaths[i].ring.gro_enable =
2100 vdev->config.gro_enable;
2101 vdev->vpaths[i].ring.vlan_tag_strip =
2102 vdev->vlan_tag_strip;
2103 vdev->stats.vpaths_open++;
2104 } else {
2105 vdev->stats.vpath_open_fail++;
2106 vxge_debug_init(VXGE_ERR,
2107 "%s: vpath: %d failed to open "
2108 "with status: %d",
2109 vdev->ndev->name, vdev->vpaths[i].device_id,
2110 status);
2111 vxge_close_vpaths(vdev, 0);
2112 return -EPERM;
2113 }
2114
2115 vp_id =
2116 ((struct __vxge_hw_vpath_handle *)vdev->vpaths[i].handle)->
2117 vpath->vp_id;
2118 vdev->vpaths_deployed |= vxge_mBIT(vp_id);
2119 }
2120 return VXGE_HW_OK;
2121}
2122
2123/*
2124 * vxge_isr_napi
2125 * @irq: the irq of the device.
2126 * @dev_id: a void pointer to the hldev structure of the Titan device
2127 * @ptregs: pointer to the registers pushed on the stack.
2128 *
2129 * This function is the ISR handler of the device when napi is enabled. It
2130 * identifies the reason for the interrupt and calls the relevant service
2131 * routines.
2132 */
2133static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2134{
703da5a1 2135 struct net_device *dev;
a5d165b5 2136 struct __vxge_hw_device *hldev;
703da5a1
RV
2137 u64 reason;
2138 enum vxge_hw_status status;
a5d165b5 2139 struct vxgedev *vdev = (struct vxgedev *) dev_id;;
703da5a1
RV
2140
2141 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
2142
a5d165b5
SH
2143 dev = vdev->ndev;
2144 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
703da5a1
RV
2145
2146 if (pci_channel_offline(vdev->pdev))
2147 return IRQ_NONE;
2148
2149 if (unlikely(!is_vxge_card_up(vdev)))
2150 return IRQ_NONE;
2151
2152 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode,
2153 &reason);
2154 if (status == VXGE_HW_OK) {
2155 vxge_hw_device_mask_all(hldev);
2156
2157 if (reason &
2158 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
2159 vdev->vpaths_deployed >>
2160 (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) {
2161
2162 vxge_hw_device_clear_tx_rx(hldev);
2163 napi_schedule(&vdev->napi);
2164 vxge_debug_intr(VXGE_TRACE,
2165 "%s:%d Exiting...", __func__, __LINE__);
2166 return IRQ_HANDLED;
2167 } else
2168 vxge_hw_device_unmask_all(hldev);
2169 } else if (unlikely((status == VXGE_HW_ERR_VPATH) ||
2170 (status == VXGE_HW_ERR_CRITICAL) ||
2171 (status == VXGE_HW_ERR_FIFO))) {
2172 vxge_hw_device_mask_all(hldev);
2173 vxge_hw_device_flush_io(hldev);
2174 return IRQ_HANDLED;
2175 } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE))
2176 return IRQ_HANDLED;
2177
2178 vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__);
2179 return IRQ_NONE;
2180}
2181
2182#ifdef CONFIG_PCI_MSI
2183
2184static irqreturn_t
2185vxge_tx_msix_handle(int irq, void *dev_id)
2186{
2187 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
2188
2189 VXGE_COMPLETE_VPATH_TX(fifo);
2190
2191 return IRQ_HANDLED;
2192}
2193
2194static irqreturn_t
2195vxge_rx_msix_napi_handle(int irq, void *dev_id)
2196{
2197 struct vxge_ring *ring = (struct vxge_ring *)dev_id;
2198
2199 /* MSIX_IDX for Rx is 1 */
2200 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
2201 ring->rx_vector_no);
2202
2203 napi_schedule(&ring->napi);
2204 return IRQ_HANDLED;
2205}
2206
2207static irqreturn_t
2208vxge_alarm_msix_handle(int irq, void *dev_id)
2209{
2210 int i;
2211 enum vxge_hw_status status;
2212 struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
2213 struct vxgedev *vdev = vpath->vdev;
2214 int alarm_msix_id =
2215 VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
2216
2217 for (i = 0; i < vdev->no_of_vpath; i++) {
2218 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle,
2219 alarm_msix_id);
2220
2221 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2222 vdev->exec_mode);
2223 if (status == VXGE_HW_OK) {
2224
2225 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
2226 alarm_msix_id);
2227 continue;
2228 }
2229 vxge_debug_intr(VXGE_ERR,
2230 "%s: vxge_hw_vpath_alarm_process failed %x ",
2231 VXGE_DRIVER_NAME, status);
2232 }
2233 return IRQ_HANDLED;
2234}
2235
2236static int vxge_alloc_msix(struct vxgedev *vdev)
2237{
2238 int j, i, ret = 0;
2239 int intr_cnt = 0;
2240 int alarm_msix_id = 0, msix_intr_vect = 0;
2241 vdev->intr_cnt = 0;
2242
2243 /* Tx/Rx MSIX Vectors count */
2244 vdev->intr_cnt = vdev->no_of_vpath * 2;
2245
2246 /* Alarm MSIX Vectors count */
2247 vdev->intr_cnt++;
2248
2249 intr_cnt = (vdev->max_vpath_supported * 2) + 1;
2250 vdev->entries = kzalloc(intr_cnt * sizeof(struct msix_entry),
2251 GFP_KERNEL);
2252 if (!vdev->entries) {
2253 vxge_debug_init(VXGE_ERR,
2254 "%s: memory allocation failed",
2255 VXGE_DRIVER_NAME);
2256 return -ENOMEM;
2257 }
2258
2259 vdev->vxge_entries = kzalloc(intr_cnt * sizeof(struct vxge_msix_entry),
2260 GFP_KERNEL);
2261 if (!vdev->vxge_entries) {
2262 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
2263 VXGE_DRIVER_NAME);
2264 kfree(vdev->entries);
2265 return -ENOMEM;
2266 }
2267
2268 /* Last vector in the list is used for alarm */
2269 alarm_msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
2270 for (i = 0, j = 0; i < vdev->max_vpath_supported; i++) {
2271
2272 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
2273
2274 /* Initialize the fifo vector */
2275 vdev->entries[j].entry = msix_intr_vect;
2276 vdev->vxge_entries[j].entry = msix_intr_vect;
2277 vdev->vxge_entries[j].in_use = 0;
2278 j++;
2279
2280 /* Initialize the ring vector */
2281 vdev->entries[j].entry = msix_intr_vect + 1;
2282 vdev->vxge_entries[j].entry = msix_intr_vect + 1;
2283 vdev->vxge_entries[j].in_use = 0;
2284 j++;
2285 }
2286
2287 /* Initialize the alarm vector */
2288 vdev->entries[j].entry = alarm_msix_id;
2289 vdev->vxge_entries[j].entry = alarm_msix_id;
2290 vdev->vxge_entries[j].in_use = 0;
2291
2292 ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt);
2293 /* if driver request exceeeds available irq's, request with a small
2294 * number.
2295 */
2296 if (ret > 0) {
2297 vxge_debug_init(VXGE_ERR,
2298 "%s: MSI-X enable failed for %d vectors, available: %d",
2299 VXGE_DRIVER_NAME, intr_cnt, ret);
2300 vdev->max_vpath_supported = vdev->no_of_vpath;
2301 intr_cnt = (vdev->max_vpath_supported * 2) + 1;
2302
2303 /* Reset the alarm vector setting */
2304 vdev->entries[j].entry = 0;
2305 vdev->vxge_entries[j].entry = 0;
2306
2307 /* Initialize the alarm vector with new setting */
2308 vdev->entries[intr_cnt - 1].entry = alarm_msix_id;
2309 vdev->vxge_entries[intr_cnt - 1].entry = alarm_msix_id;
2310 vdev->vxge_entries[intr_cnt - 1].in_use = 0;
2311
2312 ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt);
2313 if (!ret)
2314 vxge_debug_init(VXGE_ERR,
2315 "%s: MSI-X enabled for %d vectors",
2316 VXGE_DRIVER_NAME, intr_cnt);
2317 }
2318
2319 if (ret) {
2320 vxge_debug_init(VXGE_ERR,
2321 "%s: MSI-X enable failed for %d vectors, ret: %d",
2322 VXGE_DRIVER_NAME, intr_cnt, ret);
2323 kfree(vdev->entries);
2324 kfree(vdev->vxge_entries);
2325 vdev->entries = NULL;
2326 vdev->vxge_entries = NULL;
2327 return -ENODEV;
2328 }
2329 return 0;
2330}
2331
2332static int vxge_enable_msix(struct vxgedev *vdev)
2333{
2334
2335 int i, ret = 0;
2336 enum vxge_hw_status status;
2337 /* 0 - Tx, 1 - Rx */
2338 int tim_msix_id[4];
2339 int alarm_msix_id = 0, msix_intr_vect = 0;;
2340 vdev->intr_cnt = 0;
2341
2342 /* allocate msix vectors */
2343 ret = vxge_alloc_msix(vdev);
2344 if (!ret) {
2345 /* Last vector in the list is used for alarm */
2346 alarm_msix_id =
2347 VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
2348 for (i = 0; i < vdev->no_of_vpath; i++) {
2349
2350 /* If fifo or ring are not enabled
2351 the MSIX vector for that should be set to 0
2352 Hence initializeing this array to all 0s.
2353 */
2354 memset(tim_msix_id, 0, sizeof(tim_msix_id));
2355 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
2356 tim_msix_id[0] = msix_intr_vect;
2357
2358 tim_msix_id[1] = msix_intr_vect + 1;
2359 vdev->vpaths[i].ring.rx_vector_no = tim_msix_id[1];
2360
2361 status = vxge_hw_vpath_msix_set(
2362 vdev->vpaths[i].handle,
2363 tim_msix_id, alarm_msix_id);
2364 if (status != VXGE_HW_OK) {
2365 vxge_debug_init(VXGE_ERR,
2366 "vxge_hw_vpath_msix_set "
2367 "failed with status : %x", status);
2368 kfree(vdev->entries);
2369 kfree(vdev->vxge_entries);
2370 pci_disable_msix(vdev->pdev);
2371 return -ENODEV;
2372 }
2373 }
2374 }
2375
2376 return ret;
2377}
2378
2379static void vxge_rem_msix_isr(struct vxgedev *vdev)
2380{
2381 int intr_cnt;
2382
2383 for (intr_cnt = 0; intr_cnt < (vdev->max_vpath_supported * 2 + 1);
2384 intr_cnt++) {
2385 if (vdev->vxge_entries[intr_cnt].in_use) {
2386 synchronize_irq(vdev->entries[intr_cnt].vector);
2387 free_irq(vdev->entries[intr_cnt].vector,
2388 vdev->vxge_entries[intr_cnt].arg);
2389 vdev->vxge_entries[intr_cnt].in_use = 0;
2390 }
2391 }
2392
2393 kfree(vdev->entries);
2394 kfree(vdev->vxge_entries);
2395 vdev->entries = NULL;
2396 vdev->vxge_entries = NULL;
2397
2398 if (vdev->config.intr_type == MSI_X)
2399 pci_disable_msix(vdev->pdev);
2400}
2401#endif
2402
2403static void vxge_rem_isr(struct vxgedev *vdev)
2404{
2405 struct __vxge_hw_device *hldev;
2406 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
2407
2408#ifdef CONFIG_PCI_MSI
2409 if (vdev->config.intr_type == MSI_X) {
2410 vxge_rem_msix_isr(vdev);
2411 } else
2412#endif
2413 if (vdev->config.intr_type == INTA) {
2414 synchronize_irq(vdev->pdev->irq);
a5d165b5 2415 free_irq(vdev->pdev->irq, vdev);
703da5a1
RV
2416 }
2417}
2418
2419static int vxge_add_isr(struct vxgedev *vdev)
2420{
2421 int ret = 0;
703da5a1
RV
2422#ifdef CONFIG_PCI_MSI
2423 int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
2424 u64 function_mode = vdev->config.device_hw_info.function_mode;
2425 int pci_fun = PCI_FUNC(vdev->pdev->devfn);
2426
2427 if (vdev->config.intr_type == MSI_X)
2428 ret = vxge_enable_msix(vdev);
2429
2430 if (ret) {
2431 vxge_debug_init(VXGE_ERR,
2432 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
2433 if ((function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) &&
2434 test_and_set_bit(__VXGE_STATE_CARD_UP,
2435 &driver_config->inta_dev_open))
2436 return VXGE_HW_FAIL;
2437 else {
2438 vxge_debug_init(VXGE_ERR,
2439 "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
2440 vdev->config.intr_type = INTA;
2441 vxge_hw_device_set_intr_type(vdev->devh,
2442 VXGE_HW_INTR_MODE_IRQLINE);
2443 vxge_close_vpaths(vdev, 1);
2444 vdev->no_of_vpath = 1;
2445 vdev->stats.vpaths_open = 1;
2446 }
2447 }
2448
2449 if (vdev->config.intr_type == MSI_X) {
2450 for (intr_idx = 0;
2451 intr_idx < (vdev->no_of_vpath *
2452 VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
2453
2454 msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE;
2455 irq_req = 0;
2456
2457 switch (msix_idx) {
2458 case 0:
2459 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2460 "%s:vxge fn: %d vpath: %d Tx MSI-X: %d",
2461 vdev->ndev->name, pci_fun, vp_idx,
2462 vdev->entries[intr_cnt].entry);
2463 ret = request_irq(
2464 vdev->entries[intr_cnt].vector,
2465 vxge_tx_msix_handle, 0,
2466 vdev->desc[intr_cnt],
2467 &vdev->vpaths[vp_idx].fifo);
2468 vdev->vxge_entries[intr_cnt].arg =
2469 &vdev->vpaths[vp_idx].fifo;
2470 irq_req = 1;
2471 break;
2472 case 1:
2473 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2474 "%s:vxge fn: %d vpath: %d Rx MSI-X: %d",
2475 vdev->ndev->name, pci_fun, vp_idx,
2476 vdev->entries[intr_cnt].entry);
2477 ret = request_irq(
2478 vdev->entries[intr_cnt].vector,
2479 vxge_rx_msix_napi_handle,
2480 0,
2481 vdev->desc[intr_cnt],
2482 &vdev->vpaths[vp_idx].ring);
2483 vdev->vxge_entries[intr_cnt].arg =
2484 &vdev->vpaths[vp_idx].ring;
2485 irq_req = 1;
2486 break;
2487 }
2488
2489 if (ret) {
2490 vxge_debug_init(VXGE_ERR,
2491 "%s: MSIX - %d Registration failed",
2492 vdev->ndev->name, intr_cnt);
2493 vxge_rem_msix_isr(vdev);
2494 if ((function_mode ==
2495 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) &&
2496 test_and_set_bit(__VXGE_STATE_CARD_UP,
2497 &driver_config->inta_dev_open))
2498 return VXGE_HW_FAIL;
2499 else {
2500 vxge_hw_device_set_intr_type(
2501 vdev->devh,
2502 VXGE_HW_INTR_MODE_IRQLINE);
2503 vdev->config.intr_type = INTA;
2504 vxge_debug_init(VXGE_ERR,
2505 "%s: Defaulting to INTA"
2506 , vdev->ndev->name);
2507 vxge_close_vpaths(vdev, 1);
2508 vdev->no_of_vpath = 1;
2509 vdev->stats.vpaths_open = 1;
2510 goto INTA_MODE;
2511 }
2512 }
2513
2514 if (irq_req) {
2515 /* We requested for this msix interrupt */
2516 vdev->vxge_entries[intr_cnt].in_use = 1;
2517 vxge_hw_vpath_msix_unmask(
2518 vdev->vpaths[vp_idx].handle,
2519 intr_idx);
2520 intr_cnt++;
2521 }
2522
2523 /* Point to next vpath handler */
2524 if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0)
2525 && (vp_idx < (vdev->no_of_vpath - 1)))
2526 vp_idx++;
2527 }
2528
2529 intr_cnt = vdev->max_vpath_supported * 2;
2530 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2531 "%s:vxge Alarm fn: %d MSI-X: %d",
2532 vdev->ndev->name, pci_fun,
2533 vdev->entries[intr_cnt].entry);
2534 /* For Alarm interrupts */
2535 ret = request_irq(vdev->entries[intr_cnt].vector,
2536 vxge_alarm_msix_handle, 0,
2537 vdev->desc[intr_cnt],
2538 &vdev->vpaths[vp_idx]);
2539 if (ret) {
2540 vxge_debug_init(VXGE_ERR,
2541 "%s: MSIX - %d Registration failed",
2542 vdev->ndev->name, intr_cnt);
2543 vxge_rem_msix_isr(vdev);
2544 if ((function_mode ==
2545 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) &&
2546 test_and_set_bit(__VXGE_STATE_CARD_UP,
2547 &driver_config->inta_dev_open))
2548 return VXGE_HW_FAIL;
2549 else {
2550 vxge_hw_device_set_intr_type(vdev->devh,
2551 VXGE_HW_INTR_MODE_IRQLINE);
2552 vdev->config.intr_type = INTA;
2553 vxge_debug_init(VXGE_ERR,
2554 "%s: Defaulting to INTA",
2555 vdev->ndev->name);
2556 vxge_close_vpaths(vdev, 1);
2557 vdev->no_of_vpath = 1;
2558 vdev->stats.vpaths_open = 1;
2559 goto INTA_MODE;
2560 }
2561 }
2562
2563 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
2564 intr_idx - 2);
2565 vdev->vxge_entries[intr_cnt].in_use = 1;
2566 vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[vp_idx];
2567 }
2568INTA_MODE:
2569#endif
2570 snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge", vdev->ndev->name);
2571
2572 if (vdev->config.intr_type == INTA) {
2573 ret = request_irq((int) vdev->pdev->irq,
2574 vxge_isr_napi,
a5d165b5 2575 IRQF_SHARED, vdev->desc[0], vdev);
703da5a1
RV
2576 if (ret) {
2577 vxge_debug_init(VXGE_ERR,
2578 "%s %s-%d: ISR registration failed",
2579 VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq);
2580 return -ENODEV;
2581 }
2582 vxge_debug_init(VXGE_TRACE,
2583 "new %s-%d line allocated",
2584 "IRQ", vdev->pdev->irq);
2585 }
2586
2587 return VXGE_HW_OK;
2588}
2589
2590static void vxge_poll_vp_reset(unsigned long data)
2591{
2592 struct vxgedev *vdev = (struct vxgedev *)data;
2593 int i, j = 0;
2594
2595 for (i = 0; i < vdev->no_of_vpath; i++) {
2596 if (test_bit(i, &vdev->vp_reset)) {
2597 vxge_reset_vpath(vdev, i);
2598 j++;
2599 }
2600 }
2601 if (j && (vdev->config.intr_type != MSI_X)) {
2602 vxge_hw_device_unmask_all(vdev->devh);
2603 vxge_hw_device_flush_io(vdev->devh);
2604 }
2605
2606 mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2);
2607}
2608
2609static void vxge_poll_vp_lockup(unsigned long data)
2610{
2611 struct vxgedev *vdev = (struct vxgedev *)data;
2612 int i;
2613 struct vxge_ring *ring;
2614 enum vxge_hw_status status = VXGE_HW_OK;
2615
2616 for (i = 0; i < vdev->no_of_vpath; i++) {
2617 ring = &vdev->vpaths[i].ring;
2618 /* Did this vpath received any packets */
2619 if (ring->stats.prev_rx_frms == ring->stats.rx_frms) {
2620 status = vxge_hw_vpath_check_leak(ring->handle);
2621
2622 /* Did it received any packets last time */
2623 if ((VXGE_HW_FAIL == status) &&
2624 (VXGE_HW_FAIL == ring->last_status)) {
2625
2626 /* schedule vpath reset */
2627 if (!test_and_set_bit(i, &vdev->vp_reset)) {
2628
2629 /* disable interrupts for this vpath */
2630 vxge_vpath_intr_disable(vdev, i);
2631
2632 /* stop the queue for this vpath */
2633 vxge_stop_tx_queue(&vdev->vpaths[i].
2634 fifo);
2635 continue;
2636 }
2637 }
2638 }
2639 ring->stats.prev_rx_frms = ring->stats.rx_frms;
2640 ring->last_status = status;
2641 }
2642
2643 /* Check every 1 milli second */
2644 mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
2645}
2646
2647/**
2648 * vxge_open
2649 * @dev: pointer to the device structure.
2650 *
2651 * This function is the open entry point of the driver. It mainly calls a
2652 * function to allocate Rx buffers and inserts them into the buffer
2653 * descriptors and then enables the Rx part of the NIC.
2654 * Return value: '0' on success and an appropriate (-)ve integer as
2655 * defined in errno.h file on failure.
2656 */
2657int
2658vxge_open(struct net_device *dev)
2659{
2660 enum vxge_hw_status status;
2661 struct vxgedev *vdev;
2662 struct __vxge_hw_device *hldev;
2663 int ret = 0;
2664 int i;
2665 u64 val64, function_mode;
2666 vxge_debug_entryexit(VXGE_TRACE,
2667 "%s: %s:%d", dev->name, __func__, __LINE__);
2668
2669 vdev = (struct vxgedev *)netdev_priv(dev);
2670 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
2671 function_mode = vdev->config.device_hw_info.function_mode;
2672
2673 /* make sure you have link off by default every time Nic is
2674 * initialized */
2675 netif_carrier_off(dev);
2676
2677 /* Check for another device already opn with INTA */
2678 if ((function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) &&
2679 test_bit(__VXGE_STATE_CARD_UP, &driver_config->inta_dev_open)) {
2680 ret = -EPERM;
2681 goto out0;
2682 }
2683
2684 /* Open VPATHs */
2685 status = vxge_open_vpaths(vdev);
2686 if (status != VXGE_HW_OK) {
2687 vxge_debug_init(VXGE_ERR,
2688 "%s: fatal: Vpath open failed", vdev->ndev->name);
2689 ret = -EPERM;
2690 goto out0;
2691 }
2692
2693 vdev->mtu = dev->mtu;
2694
2695 status = vxge_add_isr(vdev);
2696 if (status != VXGE_HW_OK) {
2697 vxge_debug_init(VXGE_ERR,
2698 "%s: fatal: ISR add failed", dev->name);
2699 ret = -EPERM;
2700 goto out1;
2701 }
2702
2703
2704 if (vdev->config.intr_type != MSI_X) {
2705 netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
2706 vdev->config.napi_weight);
2707 napi_enable(&vdev->napi);
a5d165b5
SH
2708 for (i = 0; i < vdev->no_of_vpath; i++)
2709 vdev->vpaths[i].ring.napi_p = &vdev->napi;
703da5a1
RV
2710 } else {
2711 for (i = 0; i < vdev->no_of_vpath; i++) {
2712 netif_napi_add(dev, &vdev->vpaths[i].ring.napi,
2713 vxge_poll_msix, vdev->config.napi_weight);
2714 napi_enable(&vdev->vpaths[i].ring.napi);
a5d165b5
SH
2715 vdev->vpaths[i].ring.napi_p =
2716 &vdev->vpaths[i].ring.napi;
703da5a1
RV
2717 }
2718 }
2719
2720 /* configure RTH */
2721 if (vdev->config.rth_steering) {
2722 status = vxge_rth_configure(vdev);
2723 if (status != VXGE_HW_OK) {
2724 vxge_debug_init(VXGE_ERR,
2725 "%s: fatal: RTH configuration failed",
2726 dev->name);
2727 ret = -EPERM;
2728 goto out2;
2729 }
2730 }
2731
2732 for (i = 0; i < vdev->no_of_vpath; i++) {
2733 /* set initial mtu before enabling the device */
2734 status = vxge_hw_vpath_mtu_set(vdev->vpaths[i].handle,
2735 vdev->mtu);
2736 if (status != VXGE_HW_OK) {
2737 vxge_debug_init(VXGE_ERR,
2738 "%s: fatal: can not set new MTU", dev->name);
2739 ret = -EPERM;
2740 goto out2;
2741 }
2742 }
2743
2744 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev);
2745 vxge_debug_init(vdev->level_trace,
2746 "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
2747 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
2748
2749 /* Reprogram the DA table with populated mac addresses */
2750 for (i = 0; i < vdev->no_of_vpath; i++) {
2751 vxge_restore_vpath_mac_addr(&vdev->vpaths[i]);
2752 vxge_restore_vpath_vid_table(&vdev->vpaths[i]);
2753 }
2754
2755 /* Enable vpath to sniff all unicast/multicast traffic that not
2756 * addressed to them. We allow promiscous mode for PF only
2757 */
2758
2759 val64 = 0;
2760 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
2761 val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i);
2762
2763 vxge_hw_mgmt_reg_write(vdev->devh,
2764 vxge_hw_mgmt_reg_type_mrpcim,
2765 0,
2766 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2767 rxmac_authorize_all_addr),
2768 val64);
2769
2770 vxge_hw_mgmt_reg_write(vdev->devh,
2771 vxge_hw_mgmt_reg_type_mrpcim,
2772 0,
2773 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2774 rxmac_authorize_all_vid),
2775 val64);
2776
2777 vxge_set_multicast(dev);
2778
2779 /* Enabling Bcast and mcast for all vpath */
2780 for (i = 0; i < vdev->no_of_vpath; i++) {
2781 status = vxge_hw_vpath_bcast_enable(vdev->vpaths[i].handle);
2782 if (status != VXGE_HW_OK)
2783 vxge_debug_init(VXGE_ERR,
2784 "%s : Can not enable bcast for vpath "
2785 "id %d", dev->name, i);
2786 if (vdev->config.addr_learn_en) {
2787 status =
2788 vxge_hw_vpath_mcast_enable(vdev->vpaths[i].handle);
2789 if (status != VXGE_HW_OK)
2790 vxge_debug_init(VXGE_ERR,
2791 "%s : Can not enable mcast for vpath "
2792 "id %d", dev->name, i);
2793 }
2794 }
2795
2796 vxge_hw_device_setpause_data(vdev->devh, 0,
2797 vdev->config.tx_pause_enable,
2798 vdev->config.rx_pause_enable);
2799
2800 if (vdev->vp_reset_timer.function == NULL)
2801 vxge_os_timer(vdev->vp_reset_timer,
2802 vxge_poll_vp_reset, vdev, (HZ/2));
2803
2804 if (vdev->vp_lockup_timer.function == NULL)
2805 vxge_os_timer(vdev->vp_lockup_timer,
2806 vxge_poll_vp_lockup, vdev, (HZ/2));
2807
2808 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2809
2810 smp_wmb();
2811
2812 if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
2813 netif_carrier_on(vdev->ndev);
2814 printk(KERN_NOTICE "%s: Link Up\n", vdev->ndev->name);
2815 vdev->stats.link_up++;
2816 }
2817
2818 vxge_hw_device_intr_enable(vdev->devh);
2819
2820 smp_wmb();
2821
2822 for (i = 0; i < vdev->no_of_vpath; i++) {
2823 vxge_hw_vpath_enable(vdev->vpaths[i].handle);
2824 smp_wmb();
2825 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
2826 }
2827
2828 vxge_start_all_tx_queue(vdev);
2829 goto out0;
2830
2831out2:
2832 vxge_rem_isr(vdev);
2833
2834 /* Disable napi */
2835 if (vdev->config.intr_type != MSI_X)
2836 napi_disable(&vdev->napi);
2837 else {
2838 for (i = 0; i < vdev->no_of_vpath; i++)
2839 napi_disable(&vdev->vpaths[i].ring.napi);
2840 }
2841
2842out1:
2843 vxge_close_vpaths(vdev, 0);
2844out0:
2845 vxge_debug_entryexit(VXGE_TRACE,
2846 "%s: %s:%d Exiting...",
2847 dev->name, __func__, __LINE__);
2848 return ret;
2849}
2850
2851/* Loop throught the mac address list and delete all the entries */
2852void vxge_free_mac_add_list(struct vxge_vpath *vpath)
2853{
2854
2855 struct list_head *entry, *next;
2856 if (list_empty(&vpath->mac_addr_list))
2857 return;
2858
2859 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
2860 list_del(entry);
2861 kfree((struct vxge_mac_addrs *)entry);
2862 }
2863}
2864
2865static void vxge_napi_del_all(struct vxgedev *vdev)
2866{
2867 int i;
2868 if (vdev->config.intr_type != MSI_X)
2869 netif_napi_del(&vdev->napi);
2870 else {
2871 for (i = 0; i < vdev->no_of_vpath; i++)
2872 netif_napi_del(&vdev->vpaths[i].ring.napi);
2873 }
2874 return;
2875}
2876
2877int do_vxge_close(struct net_device *dev, int do_io)
2878{
2879 enum vxge_hw_status status;
2880 struct vxgedev *vdev;
2881 struct __vxge_hw_device *hldev;
2882 int i;
2883 u64 val64, vpath_vector;
2884 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
2885 dev->name, __func__, __LINE__);
2886
2887 vdev = (struct vxgedev *)netdev_priv(dev);
2888 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
2889
bd9ee680
SH
2890 if (unlikely(!is_vxge_card_up(vdev)))
2891 return 0;
2892
703da5a1
RV
2893 /* If vxge_handle_crit_err task is executing,
2894 * wait till it completes. */
2895 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
2896 msleep(50);
2897
2898 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2899 if (do_io) {
2900 /* Put the vpath back in normal mode */
2901 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
2902 status = vxge_hw_mgmt_reg_read(vdev->devh,
2903 vxge_hw_mgmt_reg_type_mrpcim,
2904 0,
2905 (ulong)offsetof(
2906 struct vxge_hw_mrpcim_reg,
2907 rts_mgr_cbasin_cfg),
2908 &val64);
2909
2910 if (status == VXGE_HW_OK) {
2911 val64 &= ~vpath_vector;
2912 status = vxge_hw_mgmt_reg_write(vdev->devh,
2913 vxge_hw_mgmt_reg_type_mrpcim,
2914 0,
2915 (ulong)offsetof(
2916 struct vxge_hw_mrpcim_reg,
2917 rts_mgr_cbasin_cfg),
2918 val64);
2919 }
2920
2921 /* Remove the function 0 from promiscous mode */
2922 vxge_hw_mgmt_reg_write(vdev->devh,
2923 vxge_hw_mgmt_reg_type_mrpcim,
2924 0,
2925 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2926 rxmac_authorize_all_addr),
2927 0);
2928
2929 vxge_hw_mgmt_reg_write(vdev->devh,
2930 vxge_hw_mgmt_reg_type_mrpcim,
2931 0,
2932 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2933 rxmac_authorize_all_vid),
2934 0);
2935
2936 smp_wmb();
2937 }
2938 del_timer_sync(&vdev->vp_lockup_timer);
2939
2940 del_timer_sync(&vdev->vp_reset_timer);
2941
2942 /* Disable napi */
2943 if (vdev->config.intr_type != MSI_X)
2944 napi_disable(&vdev->napi);
2945 else {
2946 for (i = 0; i < vdev->no_of_vpath; i++)
2947 napi_disable(&vdev->vpaths[i].ring.napi);
2948 }
2949
2950 netif_carrier_off(vdev->ndev);
2951 printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name);
2952 vxge_stop_all_tx_queue(vdev);
2953
2954 /* Note that at this point xmit() is stopped by upper layer */
2955 if (do_io)
2956 vxge_hw_device_intr_disable(vdev->devh);
2957
2958 mdelay(1000);
2959
2960 vxge_rem_isr(vdev);
2961
2962 vxge_napi_del_all(vdev);
2963
2964 if (do_io)
2965 vxge_reset_all_vpaths(vdev);
2966
2967 vxge_close_vpaths(vdev, 0);
2968
2969 vxge_debug_entryexit(VXGE_TRACE,
2970 "%s: %s:%d Exiting...", dev->name, __func__, __LINE__);
2971
2972 clear_bit(__VXGE_STATE_CARD_UP, &driver_config->inta_dev_open);
2973 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
2974
2975 return 0;
2976}
2977
2978/**
2979 * vxge_close
2980 * @dev: device pointer.
2981 *
2982 * This is the stop entry point of the driver. It needs to undo exactly
2983 * whatever was done by the open entry point, thus it's usually referred to
2984 * as the close function.Among other things this function mainly stops the
2985 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2986 * Return value: '0' on success and an appropriate (-)ve integer as
2987 * defined in errno.h file on failure.
2988 */
2989int
2990vxge_close(struct net_device *dev)
2991{
2992 do_vxge_close(dev, 1);
2993 return 0;
2994}
2995
2996/**
2997 * vxge_change_mtu
2998 * @dev: net device pointer.
2999 * @new_mtu :the new MTU size for the device.
3000 *
3001 * A driver entry point to change MTU size for the device. Before changing
3002 * the MTU the device must be stopped.
3003 */
3004static int vxge_change_mtu(struct net_device *dev, int new_mtu)
3005{
3006 struct vxgedev *vdev = netdev_priv(dev);
3007
3008 vxge_debug_entryexit(vdev->level_trace,
3009 "%s:%d", __func__, __LINE__);
3010 if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > VXGE_HW_MAX_MTU)) {
3011 vxge_debug_init(vdev->level_err,
3012 "%s: mtu size is invalid", dev->name);
3013 return -EPERM;
3014 }
3015
3016 /* check if device is down already */
3017 if (unlikely(!is_vxge_card_up(vdev))) {
3018 /* just store new value, will use later on open() */
3019 dev->mtu = new_mtu;
3020 vxge_debug_init(vdev->level_err,
3021 "%s", "device is down on MTU change");
3022 return 0;
3023 }
3024
3025 vxge_debug_init(vdev->level_trace,
3026 "trying to apply new MTU %d", new_mtu);
3027
3028 if (vxge_close(dev))
3029 return -EIO;
3030
3031 dev->mtu = new_mtu;
3032 vdev->mtu = new_mtu;
3033
3034 if (vxge_open(dev))
3035 return -EIO;
3036
3037 vxge_debug_init(vdev->level_trace,
3038 "%s: MTU changed to %d", vdev->ndev->name, new_mtu);
3039
3040 vxge_debug_entryexit(vdev->level_trace,
3041 "%s:%d Exiting...", __func__, __LINE__);
3042
3043 return 0;
3044}
3045
3046/**
3047 * vxge_get_stats
3048 * @dev: pointer to the device structure
3049 *
3050 * Updates the device statistics structure. This function updates the device
3051 * statistics structure in the net_device structure and returns a pointer
3052 * to the same.
3053 */
3054static struct net_device_stats *
3055vxge_get_stats(struct net_device *dev)
3056{
3057 struct vxgedev *vdev;
3058 struct net_device_stats *net_stats;
3059 int k;
3060
3061 vdev = netdev_priv(dev);
3062
3063 net_stats = &vdev->stats.net_stats;
3064
3065 memset(net_stats, 0, sizeof(struct net_device_stats));
3066
3067 for (k = 0; k < vdev->no_of_vpath; k++) {
3068 net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms;
3069 net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes;
3070 net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors;
3071 net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast;
3072 net_stats->rx_dropped +=
3073 vdev->vpaths[k].ring.stats.rx_dropped;
3074
3075 net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms;
3076 net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes;
3077 net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors;
3078 }
3079
3080 return net_stats;
3081}
3082
3083/**
3084 * vxge_ioctl
3085 * @dev: Device pointer.
3086 * @ifr: An IOCTL specific structure, that can contain a pointer to
3087 * a proprietary structure used to pass information to the driver.
3088 * @cmd: This is used to distinguish between the different commands that
3089 * can be passed to the IOCTL functions.
3090 *
3091 * Entry point for the Ioctl.
3092 */
3093static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3094{
3095 return -EOPNOTSUPP;
3096}
3097
3098/**
3099 * vxge_tx_watchdog
3100 * @dev: pointer to net device structure
3101 *
3102 * Watchdog for transmit side.
3103 * This function is triggered if the Tx Queue is stopped
3104 * for a pre-defined amount of time when the Interface is still up.
3105 */
3106static void
3107vxge_tx_watchdog(struct net_device *dev)
3108{
3109 struct vxgedev *vdev;
3110
3111 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3112
3113 vdev = (struct vxgedev *)netdev_priv(dev);
3114
3115 vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
3116
3117 vxge_reset(vdev);
3118 vxge_debug_entryexit(VXGE_TRACE,
3119 "%s:%d Exiting...", __func__, __LINE__);
3120}
3121
3122/**
3123 * vxge_vlan_rx_register
3124 * @dev: net device pointer.
3125 * @grp: vlan group
3126 *
3127 * Vlan group registration
3128 */
3129static void
3130vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
3131{
3132 struct vxgedev *vdev;
3133 struct vxge_vpath *vpath;
3134 int vp;
3135 u64 vid;
3136 enum vxge_hw_status status;
3137 int i;
3138
3139 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3140
3141 vdev = (struct vxgedev *)netdev_priv(dev);
3142
3143 vpath = &vdev->vpaths[0];
3144 if ((NULL == grp) && (vpath->is_open)) {
3145 /* Get the first vlan */
3146 status = vxge_hw_vpath_vid_get(vpath->handle, &vid);
3147
3148 while (status == VXGE_HW_OK) {
3149
3150 /* Delete this vlan from the vid table */
3151 for (vp = 0; vp < vdev->no_of_vpath; vp++) {
3152 vpath = &vdev->vpaths[vp];
3153 if (!vpath->is_open)
3154 continue;
3155
3156 vxge_hw_vpath_vid_delete(vpath->handle, vid);
3157 }
3158
3159 /* Get the next vlan to be deleted */
3160 vpath = &vdev->vpaths[0];
3161 status = vxge_hw_vpath_vid_get(vpath->handle, &vid);
3162 }
3163 }
3164
3165 vdev->vlgrp = grp;
3166
3167 for (i = 0; i < vdev->no_of_vpath; i++) {
3168 if (vdev->vpaths[i].is_configured)
3169 vdev->vpaths[i].ring.vlgrp = grp;
3170 }
3171
3172 vxge_debug_entryexit(VXGE_TRACE,
3173 "%s:%d Exiting...", __func__, __LINE__);
3174}
3175
3176/**
3177 * vxge_vlan_rx_add_vid
3178 * @dev: net device pointer.
3179 * @vid: vid
3180 *
3181 * Add the vlan id to the devices vlan id table
3182 */
3183static void
3184vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
3185{
3186 struct vxgedev *vdev;
3187 struct vxge_vpath *vpath;
3188 int vp_id;
3189
3190 vdev = (struct vxgedev *)netdev_priv(dev);
3191
3192 /* Add these vlan to the vid table */
3193 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3194 vpath = &vdev->vpaths[vp_id];
3195 if (!vpath->is_open)
3196 continue;
3197 vxge_hw_vpath_vid_add(vpath->handle, vid);
3198 }
3199}
3200
3201/**
3202 * vxge_vlan_rx_add_vid
3203 * @dev: net device pointer.
3204 * @vid: vid
3205 *
3206 * Remove the vlan id from the device's vlan id table
3207 */
3208static void
3209vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
3210{
3211 struct vxgedev *vdev;
3212 struct vxge_vpath *vpath;
3213 int vp_id;
3214
3215 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3216
3217 vdev = (struct vxgedev *)netdev_priv(dev);
3218
3219 vlan_group_set_device(vdev->vlgrp, vid, NULL);
3220
3221 /* Delete this vlan from the vid table */
3222 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3223 vpath = &vdev->vpaths[vp_id];
3224 if (!vpath->is_open)
3225 continue;
3226 vxge_hw_vpath_vid_delete(vpath->handle, vid);
3227 }
3228 vxge_debug_entryexit(VXGE_TRACE,
3229 "%s:%d Exiting...", __func__, __LINE__);
3230}
3231
3232static const struct net_device_ops vxge_netdev_ops = {
3233 .ndo_open = vxge_open,
3234 .ndo_stop = vxge_close,
3235 .ndo_get_stats = vxge_get_stats,
3236 .ndo_start_xmit = vxge_xmit,
3237 .ndo_validate_addr = eth_validate_addr,
3238 .ndo_set_multicast_list = vxge_set_multicast,
3239
3240 .ndo_do_ioctl = vxge_ioctl,
3241
3242 .ndo_set_mac_address = vxge_set_mac_addr,
3243 .ndo_change_mtu = vxge_change_mtu,
3244 .ndo_vlan_rx_register = vxge_vlan_rx_register,
3245 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
3246 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
3247
3248 .ndo_tx_timeout = vxge_tx_watchdog,
3249#ifdef CONFIG_NET_POLL_CONTROLLER
3250 .ndo_poll_controller = vxge_netpoll,
3251#endif
3252};
3253
3254int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3255 struct vxge_config *config,
3256 int high_dma, int no_of_vpath,
3257 struct vxgedev **vdev_out)
3258{
3259 struct net_device *ndev;
3260 enum vxge_hw_status status = VXGE_HW_OK;
3261 struct vxgedev *vdev;
3262 int i, ret = 0, no_of_queue = 1;
3263 u64 stat;
3264
3265 *vdev_out = NULL;
3266 if (config->tx_steering_type == TX_MULTIQ_STEERING)
3267 no_of_queue = no_of_vpath;
3268
3269 ndev = alloc_etherdev_mq(sizeof(struct vxgedev),
3270 no_of_queue);
3271 if (ndev == NULL) {
3272 vxge_debug_init(
3273 vxge_hw_device_trace_level_get(hldev),
3274 "%s : device allocation failed", __func__);
3275 ret = -ENODEV;
3276 goto _out0;
3277 }
3278
3279 vxge_debug_entryexit(
3280 vxge_hw_device_trace_level_get(hldev),
3281 "%s: %s:%d Entering...",
3282 ndev->name, __func__, __LINE__);
3283
3284 vdev = netdev_priv(ndev);
3285 memset(vdev, 0, sizeof(struct vxgedev));
3286
3287 vdev->ndev = ndev;
3288 vdev->devh = hldev;
3289 vdev->pdev = hldev->pdev;
3290 memcpy(&vdev->config, config, sizeof(struct vxge_config));
3291 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */
3292
3293 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3294
3295 ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
3296 NETIF_F_HW_VLAN_FILTER;
3297 /* Driver entry points */
3298 ndev->irq = vdev->pdev->irq;
3299 ndev->base_addr = (unsigned long) hldev->bar0;
3300
3301 ndev->netdev_ops = &vxge_netdev_ops;
3302
3303 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
3304
3305 initialize_ethtool_ops(ndev);
3306
3307 /* Allocate memory for vpath */
3308 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
3309 no_of_vpath, GFP_KERNEL);
3310 if (!vdev->vpaths) {
3311 vxge_debug_init(VXGE_ERR,
3312 "%s: vpath memory allocation failed",
3313 vdev->ndev->name);
3314 ret = -ENODEV;
3315 goto _out1;
3316 }
3317
3318 ndev->features |= NETIF_F_SG;
3319
3320 ndev->features |= NETIF_F_HW_CSUM;
3321 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3322 "%s : checksuming enabled", __func__);
3323
3324 if (high_dma) {
3325 ndev->features |= NETIF_F_HIGHDMA;
3326 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3327 "%s : using High DMA", __func__);
3328 }
3329
3330 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
3331
3332 if (vdev->config.gro_enable)
3333 ndev->features |= NETIF_F_GRO;
3334
3335 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
3336 ndev->real_num_tx_queues = no_of_vpath;
3337
3338#ifdef NETIF_F_LLTX
3339 ndev->features |= NETIF_F_LLTX;
3340#endif
3341
3342 for (i = 0; i < no_of_vpath; i++)
3343 spin_lock_init(&vdev->vpaths[i].fifo.tx_lock);
3344
3345 if (register_netdev(ndev)) {
3346 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3347 "%s: %s : device registration failed!",
3348 ndev->name, __func__);
3349 ret = -ENODEV;
3350 goto _out2;
3351 }
3352
3353 /* Set the factory defined MAC address initially */
3354 ndev->addr_len = ETH_ALEN;
3355
3356 /* Make Link state as off at this point, when the Link change
3357 * interrupt comes the state will be automatically changed to
3358 * the right state.
3359 */
3360 netif_carrier_off(ndev);
3361
3362 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3363 "%s: Ethernet device registered",
3364 ndev->name);
3365
3366 *vdev_out = vdev;
3367
3368 /* Resetting the Device stats */
3369 status = vxge_hw_mrpcim_stats_access(
3370 hldev,
3371 VXGE_HW_STATS_OP_CLEAR_ALL_STATS,
3372 0,
3373 0,
3374 &stat);
3375
3376 if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION)
3377 vxge_debug_init(
3378 vxge_hw_device_trace_level_get(hldev),
3379 "%s: device stats clear returns"
3380 "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name);
3381
3382 vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev),
3383 "%s: %s:%d Exiting...",
3384 ndev->name, __func__, __LINE__);
3385
3386 return ret;
3387_out2:
3388 kfree(vdev->vpaths);
3389_out1:
3390 free_netdev(ndev);
3391_out0:
3392 return ret;
3393}
3394
3395/*
3396 * vxge_device_unregister
3397 *
3398 * This function will unregister and free network device
3399 */
3400void
3401vxge_device_unregister(struct __vxge_hw_device *hldev)
3402{
3403 struct vxgedev *vdev;
3404 struct net_device *dev;
3405 char buf[IFNAMSIZ];
3406#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3407 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3408 u32 level_trace;
3409#endif
3410
3411 dev = hldev->ndev;
3412 vdev = netdev_priv(dev);
3413#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3414 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3415 level_trace = vdev->level_trace;
3416#endif
3417 vxge_debug_entryexit(level_trace,
3418 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3419
3420 memcpy(buf, vdev->ndev->name, IFNAMSIZ);
3421
3422 /* in 2.6 will call stop() if device is up */
3423 unregister_netdev(dev);
3424
3425 flush_scheduled_work();
3426
3427 vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf);
3428 vxge_debug_entryexit(level_trace,
3429 "%s: %s:%d Exiting...", buf, __func__, __LINE__);
3430}
3431
3432/*
3433 * vxge_callback_crit_err
3434 *
3435 * This function is called by the alarm handler in interrupt context.
3436 * Driver must analyze it based on the event type.
3437 */
3438static void
3439vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3440 enum vxge_hw_event type, u64 vp_id)
3441{
3442 struct net_device *dev = hldev->ndev;
3443 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
3444 int vpath_idx;
3445
3446 vxge_debug_entryexit(vdev->level_trace,
3447 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3448
3449 /* Note: This event type should be used for device wide
3450 * indications only - Serious errors, Slot freeze and critical errors
3451 */
3452 vdev->cric_err_event = type;
3453
3454 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++)
3455 if (vdev->vpaths[vpath_idx].device_id == vp_id)
3456 break;
3457
3458 if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
3459 if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
3460 vxge_debug_init(VXGE_ERR,
3461 "%s: Slot is frozen", vdev->ndev->name);
3462 } else if (type == VXGE_HW_EVENT_SERR) {
3463 vxge_debug_init(VXGE_ERR,
3464 "%s: Encountered Serious Error",
3465 vdev->ndev->name);
3466 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR)
3467 vxge_debug_init(VXGE_ERR,
3468 "%s: Encountered Critical Error",
3469 vdev->ndev->name);
3470 }
3471
3472 if ((type == VXGE_HW_EVENT_SERR) ||
3473 (type == VXGE_HW_EVENT_SLOT_FREEZE)) {
3474 if (unlikely(vdev->exec_mode))
3475 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3476 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) {
3477 vxge_hw_device_mask_all(hldev);
3478 if (unlikely(vdev->exec_mode))
3479 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3480 } else if ((type == VXGE_HW_EVENT_FIFO_ERR) ||
3481 (type == VXGE_HW_EVENT_VPATH_ERR)) {
3482
3483 if (unlikely(vdev->exec_mode))
3484 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3485 else {
3486 /* check if this vpath is already set for reset */
3487 if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) {
3488
3489 /* disable interrupts for this vpath */
3490 vxge_vpath_intr_disable(vdev, vpath_idx);
3491
3492 /* stop the queue for this vpath */
3493 vxge_stop_tx_queue(&vdev->vpaths[vpath_idx].
3494 fifo);
3495 }
3496 }
3497 }
3498
3499 vxge_debug_entryexit(vdev->level_trace,
3500 "%s: %s:%d Exiting...",
3501 vdev->ndev->name, __func__, __LINE__);
3502}
3503
3504static void verify_bandwidth(void)
3505{
3506 int i, band_width, total = 0, equal_priority = 0;
3507
3508 /* 1. If user enters 0 for some fifo, give equal priority to all */
3509 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3510 if (bw_percentage[i] == 0) {
3511 equal_priority = 1;
3512 break;
3513 }
3514 }
3515
3516 if (!equal_priority) {
3517 /* 2. If sum exceeds 100, give equal priority to all */
3518 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3519 if (bw_percentage[i] == 0xFF)
3520 break;
3521
3522 total += bw_percentage[i];
3523 if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) {
3524 equal_priority = 1;
3525 break;
3526 }
3527 }
3528 }
3529
3530 if (!equal_priority) {
3531 /* Is all the bandwidth consumed? */
3532 if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) {
3533 if (i < VXGE_HW_MAX_VIRTUAL_PATHS) {
3534 /* Split rest of bw equally among next VPs*/
3535 band_width =
3536 (VXGE_HW_VPATH_BANDWIDTH_MAX - total) /
3537 (VXGE_HW_MAX_VIRTUAL_PATHS - i);
3538 if (band_width < 2) /* min of 2% */
3539 equal_priority = 1;
3540 else {
3541 for (; i < VXGE_HW_MAX_VIRTUAL_PATHS;
3542 i++)
3543 bw_percentage[i] =
3544 band_width;
3545 }
3546 }
3547 } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS)
3548 equal_priority = 1;
3549 }
3550
3551 if (equal_priority) {
3552 vxge_debug_init(VXGE_ERR,
3553 "%s: Assigning equal bandwidth to all the vpaths",
3554 VXGE_DRIVER_NAME);
3555 bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX /
3556 VXGE_HW_MAX_VIRTUAL_PATHS;
3557 for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3558 bw_percentage[i] = bw_percentage[0];
3559 }
3560
3561 return;
3562}
3563
3564/*
3565 * Vpath configuration
3566 */
3567static int __devinit vxge_config_vpaths(
3568 struct vxge_hw_device_config *device_config,
3569 u64 vpath_mask, struct vxge_config *config_param)
3570{
3571 int i, no_of_vpaths = 0, default_no_vpath = 0, temp;
3572 u32 txdl_size, txdl_per_memblock;
3573
3574 temp = driver_config->vpath_per_dev;
3575 if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) &&
3576 (max_config_dev == VXGE_MAX_CONFIG_DEV)) {
3577 /* No more CPU. Return vpath number as zero.*/
3578 if (driver_config->g_no_cpus == -1)
3579 return 0;
3580
3581 if (!driver_config->g_no_cpus)
3582 driver_config->g_no_cpus = num_online_cpus();
3583
3584 driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
3585 if (!driver_config->vpath_per_dev)
3586 driver_config->vpath_per_dev = 1;
3587
3588 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3589 if (!vxge_bVALn(vpath_mask, i, 1))
3590 continue;
3591 else
3592 default_no_vpath++;
3593 if (default_no_vpath < driver_config->vpath_per_dev)
3594 driver_config->vpath_per_dev = default_no_vpath;
3595
3596 driver_config->g_no_cpus = driver_config->g_no_cpus -
3597 (driver_config->vpath_per_dev * 2);
3598 if (driver_config->g_no_cpus <= 0)
3599 driver_config->g_no_cpus = -1;
3600 }
3601
3602 if (driver_config->vpath_per_dev == 1) {
3603 vxge_debug_ll_config(VXGE_TRACE,
3604 "%s: Disable tx and rx steering, "
3605 "as single vpath is configured", VXGE_DRIVER_NAME);
3606 config_param->rth_steering = NO_STEERING;
3607 config_param->tx_steering_type = NO_STEERING;
3608 device_config->rth_en = 0;
3609 }
3610
3611 /* configure bandwidth */
3612 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3613 device_config->vp_config[i].min_bandwidth = bw_percentage[i];
3614
3615 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3616 device_config->vp_config[i].vp_id = i;
3617 device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU;
3618 if (no_of_vpaths < driver_config->vpath_per_dev) {
3619 if (!vxge_bVALn(vpath_mask, i, 1)) {
3620 vxge_debug_ll_config(VXGE_TRACE,
3621 "%s: vpath: %d is not available",
3622 VXGE_DRIVER_NAME, i);
3623 continue;
3624 } else {
3625 vxge_debug_ll_config(VXGE_TRACE,
3626 "%s: vpath: %d available",
3627 VXGE_DRIVER_NAME, i);
3628 no_of_vpaths++;
3629 }
3630 } else {
3631 vxge_debug_ll_config(VXGE_TRACE,
3632 "%s: vpath: %d is not configured, "
3633 "max_config_vpath exceeded",
3634 VXGE_DRIVER_NAME, i);
3635 break;
3636 }
3637
3638 /* Configure Tx fifo's */
3639 device_config->vp_config[i].fifo.enable =
3640 VXGE_HW_FIFO_ENABLE;
3641 device_config->vp_config[i].fifo.max_frags =
3642 MAX_SKB_FRAGS;
3643 device_config->vp_config[i].fifo.memblock_size =
3644 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
3645
3646 txdl_size = MAX_SKB_FRAGS * sizeof(struct vxge_hw_fifo_txd);
3647 txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
3648
3649 device_config->vp_config[i].fifo.fifo_blocks =
3650 ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1;
3651
3652 device_config->vp_config[i].fifo.intr =
3653 VXGE_HW_FIFO_QUEUE_INTR_DISABLE;
3654
3655 /* Configure tti properties */
3656 device_config->vp_config[i].tti.intr_enable =
3657 VXGE_HW_TIM_INTR_ENABLE;
3658
3659 device_config->vp_config[i].tti.btimer_val =
3660 (VXGE_TTI_BTIMER_VAL * 1000) / 272;
3661
3662 device_config->vp_config[i].tti.timer_ac_en =
3663 VXGE_HW_TIM_TIMER_AC_ENABLE;
3664
3665 /* For msi-x with napi (each vector
3666 has a handler of its own) -
3667 Set CI to OFF for all vpaths */
3668 device_config->vp_config[i].tti.timer_ci_en =
3669 VXGE_HW_TIM_TIMER_CI_DISABLE;
3670
3671 device_config->vp_config[i].tti.timer_ri_en =
3672 VXGE_HW_TIM_TIMER_RI_DISABLE;
3673
3674 device_config->vp_config[i].tti.util_sel =
3675 VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
3676
3677 device_config->vp_config[i].tti.ltimer_val =
3678 (VXGE_TTI_LTIMER_VAL * 1000) / 272;
3679
3680 device_config->vp_config[i].tti.rtimer_val =
3681 (VXGE_TTI_RTIMER_VAL * 1000) / 272;
3682
3683 device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A;
3684 device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B;
3685 device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C;
3686 device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A;
3687 device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B;
3688 device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C;
3689 device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D;
3690
3691 /* Configure Rx rings */
3692 device_config->vp_config[i].ring.enable =
3693 VXGE_HW_RING_ENABLE;
3694
3695 device_config->vp_config[i].ring.ring_blocks =
3696 VXGE_HW_DEF_RING_BLOCKS;
3697 device_config->vp_config[i].ring.buffer_mode =
3698 VXGE_HW_RING_RXD_BUFFER_MODE_1;
3699 device_config->vp_config[i].ring.rxds_limit =
3700 VXGE_HW_DEF_RING_RXDS_LIMIT;
3701 device_config->vp_config[i].ring.scatter_mode =
3702 VXGE_HW_RING_SCATTER_MODE_A;
3703
3704 /* Configure rti properties */
3705 device_config->vp_config[i].rti.intr_enable =
3706 VXGE_HW_TIM_INTR_ENABLE;
3707
3708 device_config->vp_config[i].rti.btimer_val =
3709 (VXGE_RTI_BTIMER_VAL * 1000)/272;
3710
3711 device_config->vp_config[i].rti.timer_ac_en =
3712 VXGE_HW_TIM_TIMER_AC_ENABLE;
3713
3714 device_config->vp_config[i].rti.timer_ci_en =
3715 VXGE_HW_TIM_TIMER_CI_DISABLE;
3716
3717 device_config->vp_config[i].rti.timer_ri_en =
3718 VXGE_HW_TIM_TIMER_RI_DISABLE;
3719
3720 device_config->vp_config[i].rti.util_sel =
3721 VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
3722
3723 device_config->vp_config[i].rti.urange_a =
3724 RTI_RX_URANGE_A;
3725 device_config->vp_config[i].rti.urange_b =
3726 RTI_RX_URANGE_B;
3727 device_config->vp_config[i].rti.urange_c =
3728 RTI_RX_URANGE_C;
3729 device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A;
3730 device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B;
3731 device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C;
3732 device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D;
3733
3734 device_config->vp_config[i].rti.rtimer_val =
3735 (VXGE_RTI_RTIMER_VAL * 1000) / 272;
3736
3737 device_config->vp_config[i].rti.ltimer_val =
3738 (VXGE_RTI_LTIMER_VAL * 1000) / 272;
3739
3740 device_config->vp_config[i].rpa_strip_vlan_tag =
3741 vlan_tag_strip;
3742 }
3743
3744 driver_config->vpath_per_dev = temp;
3745 return no_of_vpaths;
3746}
3747
3748/* initialize device configuratrions */
3749static void __devinit vxge_device_config_init(
3750 struct vxge_hw_device_config *device_config,
3751 int *intr_type)
3752{
3753 /* Used for CQRQ/SRQ. */
3754 device_config->dma_blockpool_initial =
3755 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
3756
3757 device_config->dma_blockpool_max =
3758 VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
3759
3760 if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
3761 max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
3762
3763#ifndef CONFIG_PCI_MSI
3764 vxge_debug_init(VXGE_ERR,
3765 "%s: This Kernel does not support "
3766 "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
3767 *intr_type = INTA;
3768#endif
3769
3770 /* Configure whether MSI-X or IRQL. */
3771 switch (*intr_type) {
3772 case INTA:
3773 device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
3774 break;
3775
3776 case MSI_X:
3777 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX;
3778 break;
3779 }
3780 /* Timer period between device poll */
3781 device_config->device_poll_millis = VXGE_TIMER_DELAY;
3782
3783 /* Configure mac based steering. */
3784 device_config->rts_mac_en = addr_learn_en;
3785
3786 /* Configure Vpaths */
3787 device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT;
3788
3789 vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
3790 __func__);
3791 vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_initial : %d",
3792 device_config->dma_blockpool_initial);
3793 vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_max : %d",
3794 device_config->dma_blockpool_max);
3795 vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
3796 device_config->intr_mode);
3797 vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
3798 device_config->device_poll_millis);
3799 vxge_debug_ll_config(VXGE_TRACE, "rts_mac_en : %d",
3800 device_config->rts_mac_en);
3801 vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
3802 device_config->rth_en);
3803 vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
3804 device_config->rth_it_type);
3805}
3806
3807static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3808{
3809 int i;
3810
3811 vxge_debug_init(VXGE_TRACE,
3812 "%s: %d Vpath(s) opened",
3813 vdev->ndev->name, vdev->no_of_vpath);
3814
3815 switch (vdev->config.intr_type) {
3816 case INTA:
3817 vxge_debug_init(VXGE_TRACE,
3818 "%s: Interrupt type INTA", vdev->ndev->name);
3819 break;
3820
3821 case MSI_X:
3822 vxge_debug_init(VXGE_TRACE,
3823 "%s: Interrupt type MSI-X", vdev->ndev->name);
3824 break;
3825 }
3826
3827 if (vdev->config.rth_steering) {
3828 vxge_debug_init(VXGE_TRACE,
3829 "%s: RTH steering enabled for TCP_IPV4",
3830 vdev->ndev->name);
3831 } else {
3832 vxge_debug_init(VXGE_TRACE,
3833 "%s: RTH steering disabled", vdev->ndev->name);
3834 }
3835
3836 switch (vdev->config.tx_steering_type) {
3837 case NO_STEERING:
3838 vxge_debug_init(VXGE_TRACE,
3839 "%s: Tx steering disabled", vdev->ndev->name);
3840 break;
3841 case TX_PRIORITY_STEERING:
3842 vxge_debug_init(VXGE_TRACE,
3843 "%s: Unsupported tx steering option",
3844 vdev->ndev->name);
3845 vxge_debug_init(VXGE_TRACE,
3846 "%s: Tx steering disabled", vdev->ndev->name);
3847 vdev->config.tx_steering_type = 0;
3848 break;
3849 case TX_VLAN_STEERING:
3850 vxge_debug_init(VXGE_TRACE,
3851 "%s: Unsupported tx steering option",
3852 vdev->ndev->name);
3853 vxge_debug_init(VXGE_TRACE,
3854 "%s: Tx steering disabled", vdev->ndev->name);
3855 vdev->config.tx_steering_type = 0;
3856 break;
3857 case TX_MULTIQ_STEERING:
3858 vxge_debug_init(VXGE_TRACE,
3859 "%s: Tx multiqueue steering enabled",
3860 vdev->ndev->name);
3861 break;
3862 case TX_PORT_STEERING:
3863 vxge_debug_init(VXGE_TRACE,
3864 "%s: Tx port steering enabled",
3865 vdev->ndev->name);
3866 break;
3867 default:
3868 vxge_debug_init(VXGE_ERR,
3869 "%s: Unsupported tx steering type",
3870 vdev->ndev->name);
3871 vxge_debug_init(VXGE_TRACE,
3872 "%s: Tx steering disabled", vdev->ndev->name);
3873 vdev->config.tx_steering_type = 0;
3874 }
3875
3876 if (vdev->config.gro_enable) {
3877 vxge_debug_init(VXGE_ERR,
3878 "%s: Generic receive offload enabled",
3879 vdev->ndev->name);
3880 } else
3881 vxge_debug_init(VXGE_TRACE,
3882 "%s: Generic receive offload disabled",
3883 vdev->ndev->name);
3884
3885 if (vdev->config.addr_learn_en)
3886 vxge_debug_init(VXGE_TRACE,
3887 "%s: MAC Address learning enabled", vdev->ndev->name);
3888
3889 vxge_debug_init(VXGE_TRACE,
3890 "%s: Rx doorbell mode enabled", vdev->ndev->name);
3891
3892 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3893 if (!vxge_bVALn(vpath_mask, i, 1))
3894 continue;
3895 vxge_debug_ll_config(VXGE_TRACE,
3896 "%s: MTU size - %d", vdev->ndev->name,
3897 ((struct __vxge_hw_device *)(vdev->devh))->
3898 config.vp_config[i].mtu);
3899 vxge_debug_init(VXGE_TRACE,
3900 "%s: VLAN tag stripping %s", vdev->ndev->name,
3901 ((struct __vxge_hw_device *)(vdev->devh))->
3902 config.vp_config[i].rpa_strip_vlan_tag
3903 ? "Enabled" : "Disabled");
3904 vxge_debug_init(VXGE_TRACE,
3905 "%s: Ring blocks : %d", vdev->ndev->name,
3906 ((struct __vxge_hw_device *)(vdev->devh))->
3907 config.vp_config[i].ring.ring_blocks);
3908 vxge_debug_init(VXGE_TRACE,
3909 "%s: Fifo blocks : %d", vdev->ndev->name,
3910 ((struct __vxge_hw_device *)(vdev->devh))->
3911 config.vp_config[i].fifo.fifo_blocks);
3912 vxge_debug_ll_config(VXGE_TRACE,
3913 "%s: Max frags : %d", vdev->ndev->name,
3914 ((struct __vxge_hw_device *)(vdev->devh))->
3915 config.vp_config[i].fifo.max_frags);
3916 break;
3917 }
3918}
3919
3920#ifdef CONFIG_PM
3921/**
3922 * vxge_pm_suspend - vxge power management suspend entry point
3923 *
3924 */
3925static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state)
3926{
3927 return -ENOSYS;
3928}
3929/**
3930 * vxge_pm_resume - vxge power management resume entry point
3931 *
3932 */
3933static int vxge_pm_resume(struct pci_dev *pdev)
3934{
3935 return -ENOSYS;
3936}
3937
3938#endif
3939
3940/**
3941 * vxge_io_error_detected - called when PCI error is detected
3942 * @pdev: Pointer to PCI device
3943 * @state: The current pci connection state
3944 *
3945 * This function is called after a PCI bus error affecting
3946 * this device has been detected.
3947 */
3948static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
3949 pci_channel_state_t state)
3950{
3951 struct __vxge_hw_device *hldev =
3952 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3953 struct net_device *netdev = hldev->ndev;
3954
3955 netif_device_detach(netdev);
3956
3957 if (netif_running(netdev)) {
3958 /* Bring down the card, while avoiding PCI I/O */
3959 do_vxge_close(netdev, 0);
3960 }
3961
3962 pci_disable_device(pdev);
3963
3964 return PCI_ERS_RESULT_NEED_RESET;
3965}
3966
3967/**
3968 * vxge_io_slot_reset - called after the pci bus has been reset.
3969 * @pdev: Pointer to PCI device
3970 *
3971 * Restart the card from scratch, as if from a cold-boot.
3972 * At this point, the card has exprienced a hard reset,
3973 * followed by fixups by BIOS, and has its config space
3974 * set up identically to what it was at cold boot.
3975 */
3976static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
3977{
3978 struct __vxge_hw_device *hldev =
3979 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3980 struct net_device *netdev = hldev->ndev;
3981
3982 struct vxgedev *vdev = netdev_priv(netdev);
3983
3984 if (pci_enable_device(pdev)) {
3985 printk(KERN_ERR "%s: "
3986 "Cannot re-enable device after reset\n",
3987 VXGE_DRIVER_NAME);
3988 return PCI_ERS_RESULT_DISCONNECT;
3989 }
3990
3991 pci_set_master(pdev);
3992 vxge_reset(vdev);
3993
3994 return PCI_ERS_RESULT_RECOVERED;
3995}
3996
3997/**
3998 * vxge_io_resume - called when traffic can start flowing again.
3999 * @pdev: Pointer to PCI device
4000 *
4001 * This callback is called when the error recovery driver tells
4002 * us that its OK to resume normal operation.
4003 */
4004static void vxge_io_resume(struct pci_dev *pdev)
4005{
4006 struct __vxge_hw_device *hldev =
4007 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
4008 struct net_device *netdev = hldev->ndev;
4009
4010 if (netif_running(netdev)) {
4011 if (vxge_open(netdev)) {
4012 printk(KERN_ERR "%s: "
4013 "Can't bring device back up after reset\n",
4014 VXGE_DRIVER_NAME);
4015 return;
4016 }
4017 }
4018
4019 netif_device_attach(netdev);
4020}
4021
4022/**
4023 * vxge_probe
4024 * @pdev : structure containing the PCI related information of the device.
4025 * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
4026 * Description:
4027 * This function is called when a new PCI device gets detected and initializes
4028 * it.
4029 * Return value:
4030 * returns 0 on success and negative on failure.
4031 *
4032 */
4033static int __devinit
4034vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4035{
4036 struct __vxge_hw_device *hldev;
4037 enum vxge_hw_status status;
4038 int ret;
4039 int high_dma = 0;
4040 u64 vpath_mask = 0;
4041 struct vxgedev *vdev;
4042 struct vxge_config ll_config;
4043 struct vxge_hw_device_config *device_config = NULL;
4044 struct vxge_hw_device_attr attr;
4045 int i, j, no_of_vpath = 0, max_vpath_supported = 0;
4046 u8 *macaddr;
4047 struct vxge_mac_addrs *entry;
4048 static int bus = -1, device = -1;
4049 u8 new_device = 0;
4050
4051 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
4052 attr.pdev = pdev;
4053
4054 if (bus != pdev->bus->number)
4055 new_device = 1;
4056 if (device != PCI_SLOT(pdev->devfn))
4057 new_device = 1;
4058
4059 bus = pdev->bus->number;
4060 device = PCI_SLOT(pdev->devfn);
4061
4062 if (new_device) {
4063 if (driver_config->config_dev_cnt &&
4064 (driver_config->config_dev_cnt !=
4065 driver_config->total_dev_cnt))
4066 vxge_debug_init(VXGE_ERR,
4067 "%s: Configured %d of %d devices",
4068 VXGE_DRIVER_NAME,
4069 driver_config->config_dev_cnt,
4070 driver_config->total_dev_cnt);
4071 driver_config->config_dev_cnt = 0;
4072 driver_config->total_dev_cnt = 0;
4073 driver_config->g_no_cpus = 0;
4074 driver_config->vpath_per_dev = max_config_vpath;
4075 }
4076
4077 driver_config->total_dev_cnt++;
4078 if (++driver_config->config_dev_cnt > max_config_dev) {
4079 ret = 0;
4080 goto _exit0;
4081 }
4082
4083 device_config = kzalloc(sizeof(struct vxge_hw_device_config),
4084 GFP_KERNEL);
4085 if (!device_config) {
4086 ret = -ENOMEM;
4087 vxge_debug_init(VXGE_ERR,
4088 "device_config : malloc failed %s %d",
4089 __FILE__, __LINE__);
4090 goto _exit0;
4091 }
4092
4093 memset(&ll_config, 0, sizeof(struct vxge_config));
4094 ll_config.tx_steering_type = TX_MULTIQ_STEERING;
4095 ll_config.intr_type = MSI_X;
4096 ll_config.napi_weight = NEW_NAPI_WEIGHT;
4097 ll_config.rth_steering = RTH_STEERING;
4098
4099 /* get the default configuration parameters */
4100 vxge_hw_device_config_default_get(device_config);
4101
4102 /* initialize configuration parameters */
4103 vxge_device_config_init(device_config, &ll_config.intr_type);
4104
4105 ret = pci_enable_device(pdev);
4106 if (ret) {
4107 vxge_debug_init(VXGE_ERR,
4108 "%s : can not enable PCI device", __func__);
4109 goto _exit0;
4110 }
4111
4112 if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) {
4113 vxge_debug_ll_config(VXGE_TRACE,
4114 "%s : using 64bit DMA", __func__);
4115
4116 high_dma = 1;
4117
4118 if (pci_set_consistent_dma_mask(pdev,
4119 0xffffffffffffffffULL)) {
4120 vxge_debug_init(VXGE_ERR,
4121 "%s : unable to obtain 64bit DMA for "
4122 "consistent allocations", __func__);
4123 ret = -ENOMEM;
4124 goto _exit1;
4125 }
4126 } else if (!pci_set_dma_mask(pdev, 0xffffffffUL)) {
4127 vxge_debug_ll_config(VXGE_TRACE,
4128 "%s : using 32bit DMA", __func__);
4129 } else {
4130 ret = -ENOMEM;
4131 goto _exit1;
4132 }
4133
4134 if (pci_request_regions(pdev, VXGE_DRIVER_NAME)) {
4135 vxge_debug_init(VXGE_ERR,
4136 "%s : request regions failed", __func__);
4137 ret = -ENODEV;
4138 goto _exit1;
4139 }
4140
4141 pci_set_master(pdev);
4142
4143 attr.bar0 = pci_ioremap_bar(pdev, 0);
4144 if (!attr.bar0) {
4145 vxge_debug_init(VXGE_ERR,
4146 "%s : cannot remap io memory bar0", __func__);
4147 ret = -ENODEV;
4148 goto _exit2;
4149 }
4150 vxge_debug_ll_config(VXGE_TRACE,
4151 "pci ioremap bar0: %p:0x%llx",
4152 attr.bar0,
4153 (unsigned long long)pci_resource_start(pdev, 0));
4154
703da5a1
RV
4155 status = vxge_hw_device_hw_info_get(attr.bar0,
4156 &ll_config.device_hw_info);
4157 if (status != VXGE_HW_OK) {
4158 vxge_debug_init(VXGE_ERR,
4159 "%s: Reading of hardware info failed."
4160 "Please try upgrading the firmware.", VXGE_DRIVER_NAME);
4161 ret = -EINVAL;
7975d1ee 4162 goto _exit3;
703da5a1
RV
4163 }
4164
4165 if (ll_config.device_hw_info.fw_version.major !=
4166 VXGE_DRIVER_VERSION_MAJOR) {
4167 vxge_debug_init(VXGE_ERR,
4168 "FW Ver.(maj): %d not driver's expected version: %d",
4169 ll_config.device_hw_info.fw_version.major,
4170 VXGE_DRIVER_VERSION_MAJOR);
4171 ret = -EINVAL;
7975d1ee 4172 goto _exit3;
703da5a1
RV
4173 }
4174
4175 vpath_mask = ll_config.device_hw_info.vpath_mask;
4176 if (vpath_mask == 0) {
4177 vxge_debug_ll_config(VXGE_TRACE,
4178 "%s: No vpaths available in device", VXGE_DRIVER_NAME);
4179 ret = -EINVAL;
7975d1ee 4180 goto _exit3;
703da5a1
RV
4181 }
4182
4183 vxge_debug_ll_config(VXGE_TRACE,
4184 "%s:%d Vpath mask = %llx", __func__, __LINE__,
4185 (unsigned long long)vpath_mask);
4186
4187 /* Check how many vpaths are available */
4188 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4189 if (!((vpath_mask) & vxge_mBIT(i)))
4190 continue;
4191 max_vpath_supported++;
4192 }
4193
5dbc9011
SS
4194 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
4195 if ((VXGE_HW_FUNCTION_MODE_SRIOV ==
4196 ll_config.device_hw_info.function_mode) &&
4197 (max_config_dev > 1) && (pdev->is_physfn)) {
4198 ret = pci_enable_sriov(pdev, max_config_dev - 1);
4199 if (ret)
4200 vxge_debug_ll_config(VXGE_ERR,
4201 "Failed to enable SRIOV: %d \n", ret);
4202 }
4203
703da5a1
RV
4204 /*
4205 * Configure vpaths and get driver configured number of vpaths
4206 * which is less than or equal to the maximum vpaths per function.
4207 */
4208 no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, &ll_config);
4209 if (!no_of_vpath) {
4210 vxge_debug_ll_config(VXGE_ERR,
4211 "%s: No more vpaths to configure", VXGE_DRIVER_NAME);
4212 ret = 0;
7975d1ee 4213 goto _exit3;
703da5a1
RV
4214 }
4215
4216 /* Setting driver callbacks */
4217 attr.uld_callbacks.link_up = vxge_callback_link_up;
4218 attr.uld_callbacks.link_down = vxge_callback_link_down;
4219 attr.uld_callbacks.crit_err = vxge_callback_crit_err;
4220
4221 status = vxge_hw_device_initialize(&hldev, &attr, device_config);
4222 if (status != VXGE_HW_OK) {
4223 vxge_debug_init(VXGE_ERR,
4224 "Failed to initialize device (%d)", status);
4225 ret = -EINVAL;
7975d1ee 4226 goto _exit3;
703da5a1
RV
4227 }
4228
4229 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4230
4231 /* set private device info */
4232 pci_set_drvdata(pdev, hldev);
4233
4234 ll_config.gro_enable = VXGE_GRO_ALWAYS_AGGREGATE;
4235 ll_config.fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
4236 ll_config.addr_learn_en = addr_learn_en;
4237 ll_config.rth_algorithm = RTH_ALG_JENKINS;
4238 ll_config.rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4;
4239 ll_config.rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE;
4240 ll_config.rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
4241 ll_config.rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
4242 ll_config.rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
4243 ll_config.rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
4244 ll_config.rth_bkt_sz = RTH_BUCKET_SIZE;
4245 ll_config.tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4246 ll_config.rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4247
4248 if (vxge_device_register(hldev, &ll_config, high_dma, no_of_vpath,
4249 &vdev)) {
4250 ret = -EINVAL;
7975d1ee 4251 goto _exit4;
703da5a1
RV
4252 }
4253
4254 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
4255 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4256 vxge_hw_device_trace_level_get(hldev));
4257
4258 /* set private HW device info */
4259 hldev->ndev = vdev->ndev;
4260 vdev->mtu = VXGE_HW_DEFAULT_MTU;
4261 vdev->bar0 = attr.bar0;
703da5a1
RV
4262 vdev->max_vpath_supported = max_vpath_supported;
4263 vdev->no_of_vpath = no_of_vpath;
4264
4265 /* Virtual Path count */
4266 for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4267 if (!vxge_bVALn(vpath_mask, i, 1))
4268 continue;
4269 if (j >= vdev->no_of_vpath)
4270 break;
4271
4272 vdev->vpaths[j].is_configured = 1;
4273 vdev->vpaths[j].device_id = i;
4274 vdev->vpaths[j].fifo.driver_id = j;
4275 vdev->vpaths[j].ring.driver_id = j;
4276 vdev->vpaths[j].vdev = vdev;
4277 vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
4278 memcpy((u8 *)vdev->vpaths[j].macaddr,
4279 (u8 *)ll_config.device_hw_info.mac_addrs[i],
4280 ETH_ALEN);
4281
4282 /* Initialize the mac address list header */
4283 INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list);
4284
4285 vdev->vpaths[j].mac_addr_cnt = 0;
4286 vdev->vpaths[j].mcast_addr_cnt = 0;
4287 j++;
4288 }
4289 vdev->exec_mode = VXGE_EXEC_MODE_DISABLE;
4290 vdev->max_config_port = max_config_port;
4291
4292 vdev->vlan_tag_strip = vlan_tag_strip;
4293
4294 /* map the hashing selector table to the configured vpaths */
4295 for (i = 0; i < vdev->no_of_vpath; i++)
4296 vdev->vpath_selector[i] = vpath_selector[i];
4297
4298 macaddr = (u8 *)vdev->vpaths[0].macaddr;
4299
4300 ll_config.device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
4301 ll_config.device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
4302 ll_config.device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
4303
4304 vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
4305 vdev->ndev->name, ll_config.device_hw_info.serial_number);
4306
4307 vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
4308 vdev->ndev->name, ll_config.device_hw_info.part_number);
4309
4310 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
4311 vdev->ndev->name, ll_config.device_hw_info.product_desc);
4312
4313 vxge_debug_init(VXGE_TRACE,
4314 "%s: MAC ADDR: %02X:%02X:%02X:%02X:%02X:%02X",
4315 vdev->ndev->name, macaddr[0], macaddr[1], macaddr[2],
4316 macaddr[3], macaddr[4], macaddr[5]);
4317
4318 vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
4319 vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
4320
4321 vxge_debug_init(VXGE_TRACE,
4322 "%s: Firmware version : %s Date : %s", vdev->ndev->name,
4323 ll_config.device_hw_info.fw_version.version,
4324 ll_config.device_hw_info.fw_date.date);
4325
4326 vxge_print_parm(vdev, vpath_mask);
4327
4328 /* Store the fw version for ethttool option */
4329 strcpy(vdev->fw_version, ll_config.device_hw_info.fw_version.version);
4330 memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
4331 memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN);
4332
4333 /* Copy the station mac address to the list */
4334 for (i = 0; i < vdev->no_of_vpath; i++) {
4335 entry = (struct vxge_mac_addrs *)
4336 kzalloc(sizeof(struct vxge_mac_addrs),
4337 GFP_KERNEL);
4338 if (NULL == entry) {
4339 vxge_debug_init(VXGE_ERR,
4340 "%s: mac_addr_list : memory allocation failed",
4341 vdev->ndev->name);
4342 ret = -EPERM;
7975d1ee 4343 goto _exit5;
703da5a1
RV
4344 }
4345 macaddr = (u8 *)&entry->macaddr;
4346 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
4347 list_add(&entry->item, &vdev->vpaths[i].mac_addr_list);
4348 vdev->vpaths[i].mac_addr_cnt = 1;
4349 }
4350
914d0d71 4351 kfree(device_config);
703da5a1
RV
4352 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
4353 vdev->ndev->name, __func__, __LINE__);
4354
4355 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4356 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4357 vxge_hw_device_trace_level_get(hldev));
4358
4359 return 0;
4360
7975d1ee 4361_exit5:
703da5a1
RV
4362 for (i = 0; i < vdev->no_of_vpath; i++)
4363 vxge_free_mac_add_list(&vdev->vpaths[i]);
4364
4365 vxge_device_unregister(hldev);
7975d1ee 4366_exit4:
5dbc9011 4367 pci_disable_sriov(pdev);
703da5a1 4368 vxge_hw_device_terminate(hldev);
703da5a1
RV
4369_exit3:
4370 iounmap(attr.bar0);
4371_exit2:
4372 pci_release_regions(pdev);
4373_exit1:
4374 pci_disable_device(pdev);
4375_exit0:
4376 kfree(device_config);
4377 driver_config->config_dev_cnt--;
4378 pci_set_drvdata(pdev, NULL);
4379 return ret;
4380}
4381
4382/**
4383 * vxge_rem_nic - Free the PCI device
4384 * @pdev: structure containing the PCI related information of the device.
4385 * Description: This function is called by the Pci subsystem to release a
4386 * PCI device and free up all resource held up by the device.
4387 */
4388static void __devexit
4389vxge_remove(struct pci_dev *pdev)
4390{
4391 struct __vxge_hw_device *hldev;
4392 struct vxgedev *vdev = NULL;
4393 struct net_device *dev;
4394 int i = 0;
4395#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
4396 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4397 u32 level_trace;
4398#endif
4399
4400 hldev = (struct __vxge_hw_device *) pci_get_drvdata(pdev);
4401
4402 if (hldev == NULL)
4403 return;
4404 dev = hldev->ndev;
4405 vdev = netdev_priv(dev);
4406
4407#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
4408 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4409 level_trace = vdev->level_trace;
4410#endif
4411 vxge_debug_entryexit(level_trace,
4412 "%s:%d", __func__, __LINE__);
4413
4414 vxge_debug_init(level_trace,
4415 "%s : removing PCI device...", __func__);
4416 vxge_device_unregister(hldev);
4417
4418 for (i = 0; i < vdev->no_of_vpath; i++) {
4419 vxge_free_mac_add_list(&vdev->vpaths[i]);
4420 vdev->vpaths[i].mcast_addr_cnt = 0;
4421 vdev->vpaths[i].mac_addr_cnt = 0;
4422 }
4423
4424 kfree(vdev->vpaths);
4425
4426 iounmap(vdev->bar0);
703da5a1 4427
5dbc9011
SS
4428 pci_disable_sriov(pdev);
4429
703da5a1
RV
4430 /* we are safe to free it now */
4431 free_netdev(dev);
4432
4433 vxge_debug_init(level_trace,
4434 "%s:%d Device unregistered", __func__, __LINE__);
4435
4436 vxge_hw_device_terminate(hldev);
4437
4438 pci_disable_device(pdev);
4439 pci_release_regions(pdev);
4440 pci_set_drvdata(pdev, NULL);
4441 vxge_debug_entryexit(level_trace,
4442 "%s:%d Exiting...", __func__, __LINE__);
4443}
4444
4445static struct pci_error_handlers vxge_err_handler = {
4446 .error_detected = vxge_io_error_detected,
4447 .slot_reset = vxge_io_slot_reset,
4448 .resume = vxge_io_resume,
4449};
4450
4451static struct pci_driver vxge_driver = {
4452 .name = VXGE_DRIVER_NAME,
4453 .id_table = vxge_id_table,
4454 .probe = vxge_probe,
4455 .remove = __devexit_p(vxge_remove),
4456#ifdef CONFIG_PM
4457 .suspend = vxge_pm_suspend,
4458 .resume = vxge_pm_resume,
4459#endif
4460 .err_handler = &vxge_err_handler,
4461};
4462
4463static int __init
4464vxge_starter(void)
4465{
4466 int ret = 0;
4467 char version[32];
4468 snprintf(version, 32, "%s", DRV_VERSION);
4469
4470 printk(KERN_CRIT "%s: Copyright(c) 2002-2009 Neterion Inc\n",
4471 VXGE_DRIVER_NAME);
4472 printk(KERN_CRIT "%s: Driver version: %s\n",
4473 VXGE_DRIVER_NAME, version);
4474
4475 verify_bandwidth();
4476
4477 driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL);
4478 if (!driver_config)
4479 return -ENOMEM;
4480
4481 ret = pci_register_driver(&vxge_driver);
4482
4483 if (driver_config->config_dev_cnt &&
4484 (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
4485 vxge_debug_init(VXGE_ERR,
4486 "%s: Configured %d of %d devices",
4487 VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
4488 driver_config->total_dev_cnt);
4489
4490 if (ret)
4491 kfree(driver_config);
4492
4493 return ret;
4494}
4495
4496static void __exit
4497vxge_closer(void)
4498{
4499 pci_unregister_driver(&vxge_driver);
4500 kfree(driver_config);
4501}
4502module_init(vxge_starter);
4503module_exit(vxge_closer);