]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/vxge/vxge-main.c
Neterion: New driver: Main entry points
[net-next-2.6.git] / drivers / net / vxge / vxge-main.c
CommitLineData
703da5a1
RV
1/******************************************************************************
2* This software may be used and distributed according to the terms of
3* the GNU General Public License (GPL), incorporated herein by reference.
4* Drivers based on or derived from this code fall under the GPL and must
5* retain the authorship, copyright and license notice. This file is not
6* a complete program and may only be used when the entire operating
7* system is licensed under the GPL.
8* See the file COPYING in this distribution for more information.
9*
10* vxge-main.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11* Virtualized Server Adapter.
12* Copyright(c) 2002-2009 Neterion Inc.
13*
14* The module loadable parameters that are supported by the driver and a brief
15* explanation of all the variables:
16* vlan_tag_strip:
17* Strip VLAN Tag enable/disable. Instructs the device to remove
18* the VLAN tag from all received tagged frames that are not
19* replicated at the internal L2 switch.
20* 0 - Do not strip the VLAN tag.
21* 1 - Strip the VLAN tag.
22*
23* addr_learn_en:
24* Enable learning the mac address of the guest OS interface in
25* a virtualization environment.
26* 0 - DISABLE
27* 1 - ENABLE
28*
29* max_config_port:
30* Maximum number of port to be supported.
31* MIN -1 and MAX - 2
32*
33* max_config_vpath:
34* This configures the maximum no of VPATH configures for each
35* device function.
36* MIN - 1 and MAX - 17
37*
38* max_config_dev:
39* This configures maximum no of Device function to be enabled.
40* MIN - 1 and MAX - 17
41*
42******************************************************************************/
43
44#include <linux/if_vlan.h>
45#include <linux/pci.h>
46#include <net/ip.h>
47#include <linux/netdevice.h>
48#include <linux/etherdevice.h>
49#include "vxge-main.h"
50#include "vxge-reg.h"
51
52MODULE_LICENSE("Dual BSD/GPL");
53MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
54 "Virtualized Server Adapter");
55
56static struct pci_device_id vxge_id_table[] __devinitdata = {
57 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
58 PCI_ANY_ID},
59 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
60 PCI_ANY_ID},
61 {0}
62};
63
64MODULE_DEVICE_TABLE(pci, vxge_id_table);
65
66VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE);
67VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT);
68VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT);
69VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT);
70VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT);
71VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV);
72
73static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] =
74 {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
75static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
76 {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF};
77module_param_array(bw_percentage, uint, NULL, 0);
78
79static struct vxge_drv_config *driver_config;
80
81static inline int is_vxge_card_up(struct vxgedev *vdev)
82{
83 return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
84}
85
86static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
87{
88 unsigned long flags = 0;
89 struct sk_buff *skb_ptr = NULL;
90 struct sk_buff **temp, *head, *skb;
91
92 if (spin_trylock_irqsave(&fifo->tx_lock, flags)) {
93 vxge_hw_vpath_poll_tx(fifo->handle, (void **)&skb_ptr);
94 spin_unlock_irqrestore(&fifo->tx_lock, flags);
95 }
96 /* free SKBs */
97 head = skb_ptr;
98 while (head) {
99 skb = head;
100 temp = (struct sk_buff **)&skb->cb;
101 head = *temp;
102 *temp = NULL;
103 dev_kfree_skb_irq(skb);
104 }
105}
106
107static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
108{
109 int i;
110
111 /* Complete all transmits */
112 for (i = 0; i < vdev->no_of_vpath; i++)
113 VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo);
114}
115
116static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
117{
118 int i;
119 struct vxge_ring *ring;
120
121 /* Complete all receives*/
122 for (i = 0; i < vdev->no_of_vpath; i++) {
123 ring = &vdev->vpaths[i].ring;
124 vxge_hw_vpath_poll_rx(ring->handle);
125 }
126}
127
128/*
129 * MultiQ manipulation helper functions
130 */
131void vxge_stop_all_tx_queue(struct vxgedev *vdev)
132{
133 int i;
134 struct net_device *dev = vdev->ndev;
135
136 if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) {
137 for (i = 0; i < vdev->no_of_vpath; i++)
138 vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_STOP;
139 }
140 netif_tx_stop_all_queues(dev);
141}
142
143void vxge_stop_tx_queue(struct vxge_fifo *fifo)
144{
145 struct net_device *dev = fifo->ndev;
146
147 struct netdev_queue *txq = NULL;
148 if (fifo->tx_steering_type == TX_MULTIQ_STEERING)
149 txq = netdev_get_tx_queue(dev, fifo->driver_id);
150 else {
151 txq = netdev_get_tx_queue(dev, 0);
152 fifo->queue_state = VPATH_QUEUE_STOP;
153 }
154
155 netif_tx_stop_queue(txq);
156}
157
158void vxge_start_all_tx_queue(struct vxgedev *vdev)
159{
160 int i;
161 struct net_device *dev = vdev->ndev;
162
163 if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) {
164 for (i = 0; i < vdev->no_of_vpath; i++)
165 vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_START;
166 }
167 netif_tx_start_all_queues(dev);
168}
169
170static void vxge_wake_all_tx_queue(struct vxgedev *vdev)
171{
172 int i;
173 struct net_device *dev = vdev->ndev;
174
175 if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) {
176 for (i = 0; i < vdev->no_of_vpath; i++)
177 vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_START;
178 }
179 netif_tx_wake_all_queues(dev);
180}
181
182void vxge_wake_tx_queue(struct vxge_fifo *fifo, struct sk_buff *skb)
183{
184 struct net_device *dev = fifo->ndev;
185
186 int vpath_no = fifo->driver_id;
187 struct netdev_queue *txq = NULL;
188 if (fifo->tx_steering_type == TX_MULTIQ_STEERING) {
189 txq = netdev_get_tx_queue(dev, vpath_no);
190 if (netif_tx_queue_stopped(txq))
191 netif_tx_wake_queue(txq);
192 } else {
193 txq = netdev_get_tx_queue(dev, 0);
194 if (fifo->queue_state == VPATH_QUEUE_STOP)
195 if (netif_tx_queue_stopped(txq)) {
196 fifo->queue_state = VPATH_QUEUE_START;
197 netif_tx_wake_queue(txq);
198 }
199 }
200}
201
202/*
203 * vxge_callback_link_up
204 *
205 * This function is called during interrupt context to notify link up state
206 * change.
207 */
208void
209vxge_callback_link_up(struct __vxge_hw_device *hldev)
210{
211 struct net_device *dev = hldev->ndev;
212 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
213
214 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
215 vdev->ndev->name, __func__, __LINE__);
216 printk(KERN_NOTICE "%s: Link Up\n", vdev->ndev->name);
217 vdev->stats.link_up++;
218
219 netif_carrier_on(vdev->ndev);
220 vxge_wake_all_tx_queue(vdev);
221
222 vxge_debug_entryexit(VXGE_TRACE,
223 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
224}
225
226/*
227 * vxge_callback_link_down
228 *
229 * This function is called during interrupt context to notify link down state
230 * change.
231 */
232void
233vxge_callback_link_down(struct __vxge_hw_device *hldev)
234{
235 struct net_device *dev = hldev->ndev;
236 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
237
238 vxge_debug_entryexit(VXGE_TRACE,
239 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
240 printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name);
241
242 vdev->stats.link_down++;
243 netif_carrier_off(vdev->ndev);
244 vxge_stop_all_tx_queue(vdev);
245
246 vxge_debug_entryexit(VXGE_TRACE,
247 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
248}
249
250/*
251 * vxge_rx_alloc
252 *
253 * Allocate SKB.
254 */
255static struct sk_buff*
256vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
257{
258 struct net_device *dev;
259 struct sk_buff *skb;
260 struct vxge_rx_priv *rx_priv;
261
262 dev = ring->ndev;
263 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
264 ring->ndev->name, __func__, __LINE__);
265
266 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
267
268 /* try to allocate skb first. this one may fail */
269 skb = netdev_alloc_skb(dev, skb_size +
270 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
271 if (skb == NULL) {
272 vxge_debug_mem(VXGE_ERR,
273 "%s: out of memory to allocate SKB", dev->name);
274 ring->stats.skb_alloc_fail++;
275 return NULL;
276 }
277
278 vxge_debug_mem(VXGE_TRACE,
279 "%s: %s:%d Skb : 0x%p", ring->ndev->name,
280 __func__, __LINE__, skb);
281
282 skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
283
284 rx_priv->skb = skb;
285 rx_priv->data_size = skb_size;
286 vxge_debug_entryexit(VXGE_TRACE,
287 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
288
289 return skb;
290}
291
292/*
293 * vxge_rx_map
294 */
295static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
296{
297 struct vxge_rx_priv *rx_priv;
298 dma_addr_t dma_addr;
299
300 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
301 ring->ndev->name, __func__, __LINE__);
302 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
303
304 dma_addr = pci_map_single(ring->pdev, rx_priv->skb->data,
305 rx_priv->data_size, PCI_DMA_FROMDEVICE);
306
307 if (dma_addr == 0) {
308 ring->stats.pci_map_fail++;
309 return -EIO;
310 }
311 vxge_debug_mem(VXGE_TRACE,
312 "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
313 ring->ndev->name, __func__, __LINE__,
314 (unsigned long long)dma_addr);
315 vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size);
316
317 rx_priv->data_dma = dma_addr;
318 vxge_debug_entryexit(VXGE_TRACE,
319 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
320
321 return 0;
322}
323
324/*
325 * vxge_rx_initial_replenish
326 * Allocation of RxD as an initial replenish procedure.
327 */
328static enum vxge_hw_status
329vxge_rx_initial_replenish(void *dtrh, void *userdata)
330{
331 struct vxge_ring *ring = (struct vxge_ring *)userdata;
332 struct vxge_rx_priv *rx_priv;
333
334 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
335 ring->ndev->name, __func__, __LINE__);
336 if (vxge_rx_alloc(dtrh, ring,
337 VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL)
338 return VXGE_HW_FAIL;
339
340 if (vxge_rx_map(dtrh, ring)) {
341 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
342 dev_kfree_skb(rx_priv->skb);
343
344 return VXGE_HW_FAIL;
345 }
346 vxge_debug_entryexit(VXGE_TRACE,
347 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
348
349 return VXGE_HW_OK;
350}
351
352static inline void
353vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
354 int pkt_length, struct vxge_hw_ring_rxd_info *ext_info)
355{
356
357 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
358 ring->ndev->name, __func__, __LINE__);
359 skb_record_rx_queue(skb, ring->driver_id);
360 skb->protocol = eth_type_trans(skb, ring->ndev);
361
362 ring->stats.rx_frms++;
363 ring->stats.rx_bytes += pkt_length;
364
365 if (skb->pkt_type == PACKET_MULTICAST)
366 ring->stats.rx_mcast++;
367
368 vxge_debug_rx(VXGE_TRACE,
369 "%s: %s:%d skb protocol = %d",
370 ring->ndev->name, __func__, __LINE__, skb->protocol);
371
372 if (ring->gro_enable) {
373 if (ring->vlgrp && ext_info->vlan &&
374 (ring->vlan_tag_strip ==
375 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
376 vlan_gro_receive(&ring->napi, ring->vlgrp,
377 ext_info->vlan, skb);
378 else
379 napi_gro_receive(&ring->napi, skb);
380 } else {
381 if (ring->vlgrp && vlan &&
382 (ring->vlan_tag_strip ==
383 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
384 vlan_hwaccel_receive_skb(skb, ring->vlgrp, vlan);
385 else
386 netif_receive_skb(skb);
387 }
388 vxge_debug_entryexit(VXGE_TRACE,
389 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
390}
391
392static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
393 struct vxge_rx_priv *rx_priv)
394{
395 pci_dma_sync_single_for_device(ring->pdev,
396 rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE);
397
398 vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
399 vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
400}
401
402static inline void vxge_post(int *dtr_cnt, void **first_dtr,
403 void *post_dtr, struct __vxge_hw_ring *ringh)
404{
405 int dtr_count = *dtr_cnt;
406 if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) {
407 if (*first_dtr)
408 vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr);
409 *first_dtr = post_dtr;
410 } else
411 vxge_hw_ring_rxd_post_post(ringh, post_dtr);
412 dtr_count++;
413 *dtr_cnt = dtr_count;
414}
415
416/*
417 * vxge_rx_1b_compl
418 *
419 * If the interrupt is because of a received frame or if the receive ring
420 * contains fresh as yet un-processed frames, this function is called.
421 */
422enum vxge_hw_status
423vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
424 u8 t_code, void *userdata)
425{
426 struct vxge_ring *ring = (struct vxge_ring *)userdata;
427 struct net_device *dev = ring->ndev;
428 unsigned int dma_sizes;
429 void *first_dtr = NULL;
430 int dtr_cnt = 0;
431 int data_size;
432 dma_addr_t data_dma;
433 int pkt_length;
434 struct sk_buff *skb;
435 struct vxge_rx_priv *rx_priv;
436 struct vxge_hw_ring_rxd_info ext_info;
437 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
438 ring->ndev->name, __func__, __LINE__);
439 ring->pkts_processed = 0;
440
441 vxge_hw_ring_replenish(ringh, 0);
442
443 do {
444 rx_priv = vxge_hw_ring_rxd_private_get(dtr);
445 skb = rx_priv->skb;
446 data_size = rx_priv->data_size;
447 data_dma = rx_priv->data_dma;
448
449 vxge_debug_rx(VXGE_TRACE,
450 "%s: %s:%d skb = 0x%p",
451 ring->ndev->name, __func__, __LINE__, skb);
452
453 vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes);
454 pkt_length = dma_sizes;
455
456 vxge_debug_rx(VXGE_TRACE,
457 "%s: %s:%d Packet Length = %d",
458 ring->ndev->name, __func__, __LINE__, pkt_length);
459
460 vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info);
461
462 /* check skb validity */
463 vxge_assert(skb);
464
465 prefetch((char *)skb + L1_CACHE_BYTES);
466 if (unlikely(t_code)) {
467
468 if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
469 VXGE_HW_OK) {
470
471 ring->stats.rx_errors++;
472 vxge_debug_rx(VXGE_TRACE,
473 "%s: %s :%d Rx T_code is %d",
474 ring->ndev->name, __func__,
475 __LINE__, t_code);
476
477 /* If the t_code is not supported and if the
478 * t_code is other than 0x5 (unparseable packet
479 * such as unknown UPV6 header), Drop it !!!
480 */
481 vxge_re_pre_post(dtr, ring, rx_priv);
482
483 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
484 ring->stats.rx_dropped++;
485 continue;
486 }
487 }
488
489 if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
490
491 if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
492
493 if (!vxge_rx_map(dtr, ring)) {
494 skb_put(skb, pkt_length);
495
496 pci_unmap_single(ring->pdev, data_dma,
497 data_size, PCI_DMA_FROMDEVICE);
498
499 vxge_hw_ring_rxd_pre_post(ringh, dtr);
500 vxge_post(&dtr_cnt, &first_dtr, dtr,
501 ringh);
502 } else {
503 dev_kfree_skb(rx_priv->skb);
504 rx_priv->skb = skb;
505 rx_priv->data_size = data_size;
506 vxge_re_pre_post(dtr, ring, rx_priv);
507
508 vxge_post(&dtr_cnt, &first_dtr, dtr,
509 ringh);
510 ring->stats.rx_dropped++;
511 break;
512 }
513 } else {
514 vxge_re_pre_post(dtr, ring, rx_priv);
515
516 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
517 ring->stats.rx_dropped++;
518 break;
519 }
520 } else {
521 struct sk_buff *skb_up;
522
523 skb_up = netdev_alloc_skb(dev, pkt_length +
524 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
525 if (skb_up != NULL) {
526 skb_reserve(skb_up,
527 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
528
529 pci_dma_sync_single_for_cpu(ring->pdev,
530 data_dma, data_size,
531 PCI_DMA_FROMDEVICE);
532
533 vxge_debug_mem(VXGE_TRACE,
534 "%s: %s:%d skb_up = %p",
535 ring->ndev->name, __func__,
536 __LINE__, skb);
537 memcpy(skb_up->data, skb->data, pkt_length);
538
539 vxge_re_pre_post(dtr, ring, rx_priv);
540
541 vxge_post(&dtr_cnt, &first_dtr, dtr,
542 ringh);
543 /* will netif_rx small SKB instead */
544 skb = skb_up;
545 skb_put(skb, pkt_length);
546 } else {
547 vxge_re_pre_post(dtr, ring, rx_priv);
548
549 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
550 vxge_debug_rx(VXGE_ERR,
551 "%s: vxge_rx_1b_compl: out of "
552 "memory", dev->name);
553 ring->stats.skb_alloc_fail++;
554 break;
555 }
556 }
557
558 if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
559 !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
560 ring->rx_csum && /* Offload Rx side CSUM */
561 ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
562 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
563 skb->ip_summed = CHECKSUM_UNNECESSARY;
564 else
565 skb->ip_summed = CHECKSUM_NONE;
566
567 vxge_rx_complete(ring, skb, ext_info.vlan,
568 pkt_length, &ext_info);
569
570 ring->budget--;
571 ring->pkts_processed++;
572 if (!ring->budget)
573 break;
574
575 } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr,
576 &t_code) == VXGE_HW_OK);
577
578 if (first_dtr)
579 vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
580
581 dev->last_rx = jiffies;
582
583 vxge_debug_entryexit(VXGE_TRACE,
584 "%s:%d Exiting...",
585 __func__, __LINE__);
586 return VXGE_HW_OK;
587}
588
589/*
590 * vxge_xmit_compl
591 *
592 * If an interrupt was raised to indicate DMA complete of the Tx packet,
593 * this function is called. It identifies the last TxD whose buffer was
594 * freed and frees all skbs whose data have already DMA'ed into the NICs
595 * internal memory.
596 */
597enum vxge_hw_status
598vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
599 enum vxge_hw_fifo_tcode t_code, void *userdata,
600 void **skb_ptr)
601{
602 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
603 struct sk_buff *skb, *head = NULL;
604 struct sk_buff **temp;
605 int pkt_cnt = 0;
606
607 vxge_debug_entryexit(VXGE_TRACE,
608 "%s:%d Entered....", __func__, __LINE__);
609
610 do {
611 int frg_cnt;
612 skb_frag_t *frag;
613 int i = 0, j;
614 struct vxge_tx_priv *txd_priv =
615 vxge_hw_fifo_txdl_private_get(dtr);
616
617 skb = txd_priv->skb;
618 frg_cnt = skb_shinfo(skb)->nr_frags;
619 frag = &skb_shinfo(skb)->frags[0];
620
621 vxge_debug_tx(VXGE_TRACE,
622 "%s: %s:%d fifo_hw = %p dtr = %p "
623 "tcode = 0x%x", fifo->ndev->name, __func__,
624 __LINE__, fifo_hw, dtr, t_code);
625 /* check skb validity */
626 vxge_assert(skb);
627 vxge_debug_tx(VXGE_TRACE,
628 "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
629 fifo->ndev->name, __func__, __LINE__,
630 skb, txd_priv, frg_cnt);
631 if (unlikely(t_code)) {
632 fifo->stats.tx_errors++;
633 vxge_debug_tx(VXGE_ERR,
634 "%s: tx: dtr %p completed due to "
635 "error t_code %01x", fifo->ndev->name,
636 dtr, t_code);
637 vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code);
638 }
639
640 /* for unfragmented skb */
641 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
642 skb_headlen(skb), PCI_DMA_TODEVICE);
643
644 for (j = 0; j < frg_cnt; j++) {
645 pci_unmap_page(fifo->pdev,
646 txd_priv->dma_buffers[i++],
647 frag->size, PCI_DMA_TODEVICE);
648 frag += 1;
649 }
650
651 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
652
653 /* Updating the statistics block */
654 fifo->stats.tx_frms++;
655 fifo->stats.tx_bytes += skb->len;
656
657 temp = (struct sk_buff **)&skb->cb;
658 *temp = head;
659 head = skb;
660
661 pkt_cnt++;
662 if (pkt_cnt > fifo->indicate_max_pkts)
663 break;
664
665 } while (vxge_hw_fifo_txdl_next_completed(fifo_hw,
666 &dtr, &t_code) == VXGE_HW_OK);
667
668 vxge_wake_tx_queue(fifo, skb);
669
670 if (skb_ptr)
671 *skb_ptr = (void *) head;
672
673 vxge_debug_entryexit(VXGE_TRACE,
674 "%s: %s:%d Exiting...",
675 fifo->ndev->name, __func__, __LINE__);
676 return VXGE_HW_OK;
677}
678
679/* select a vpath to trasmit the packet */
680static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb,
681 int *do_lock)
682{
683 u16 queue_len, counter = 0;
684 if (skb->protocol == htons(ETH_P_IP)) {
685 struct iphdr *ip;
686 struct tcphdr *th;
687
688 ip = ip_hdr(skb);
689
690 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
691 th = (struct tcphdr *)(((unsigned char *)ip) +
692 ip->ihl*4);
693
694 queue_len = vdev->no_of_vpath;
695 counter = (ntohs(th->source) +
696 ntohs(th->dest)) &
697 vdev->vpath_selector[queue_len - 1];
698 if (counter >= queue_len)
699 counter = queue_len - 1;
700
701 if (ip->protocol == IPPROTO_UDP) {
702#ifdef NETIF_F_LLTX
703 *do_lock = 0;
704#endif
705 }
706 }
707 }
708 return counter;
709}
710
711static enum vxge_hw_status vxge_search_mac_addr_in_list(
712 struct vxge_vpath *vpath, u64 del_mac)
713{
714 struct list_head *entry, *next;
715 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
716 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac)
717 return TRUE;
718 }
719 return FALSE;
720}
721
722static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
723{
724 struct macInfo mac_info;
725 u8 *mac_address = NULL;
726 u64 mac_addr = 0, vpath_vector = 0;
727 int vpath_idx = 0;
728 enum vxge_hw_status status = VXGE_HW_OK;
729 struct vxge_vpath *vpath = NULL;
730 struct __vxge_hw_device *hldev;
731
732 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
733
734 mac_address = (u8 *)&mac_addr;
735 memcpy(mac_address, mac_header, ETH_ALEN);
736
737 /* Is this mac address already in the list? */
738 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
739 vpath = &vdev->vpaths[vpath_idx];
740 if (vxge_search_mac_addr_in_list(vpath, mac_addr))
741 return vpath_idx;
742 }
743
744 memset(&mac_info, 0, sizeof(struct macInfo));
745 memcpy(mac_info.macaddr, mac_header, ETH_ALEN);
746
747 /* Any vpath has room to add mac address to its da table? */
748 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
749 vpath = &vdev->vpaths[vpath_idx];
750 if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) {
751 /* Add this mac address to this vpath */
752 mac_info.vpath_no = vpath_idx;
753 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
754 status = vxge_add_mac_addr(vdev, &mac_info);
755 if (status != VXGE_HW_OK)
756 return -EPERM;
757 return vpath_idx;
758 }
759 }
760
761 mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST;
762 vpath_idx = 0;
763 mac_info.vpath_no = vpath_idx;
764 /* Is the first vpath already selected as catch-basin ? */
765 vpath = &vdev->vpaths[vpath_idx];
766 if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) {
767 /* Add this mac address to this vpath */
768 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
769 return -EPERM;
770 return vpath_idx;
771 }
772
773 /* Select first vpath as catch-basin */
774 vpath_vector = vxge_mBIT(vpath->device_id);
775 status = vxge_hw_mgmt_reg_write(vpath->vdev->devh,
776 vxge_hw_mgmt_reg_type_mrpcim,
777 0,
778 (ulong)offsetof(
779 struct vxge_hw_mrpcim_reg,
780 rts_mgr_cbasin_cfg),
781 vpath_vector);
782 if (status != VXGE_HW_OK) {
783 vxge_debug_tx(VXGE_ERR,
784 "%s: Unable to set the vpath-%d in catch-basin mode",
785 VXGE_DRIVER_NAME, vpath->device_id);
786 return -EPERM;
787 }
788
789 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
790 return -EPERM;
791
792 return vpath_idx;
793}
794
795/**
796 * vxge_xmit
797 * @skb : the socket buffer containing the Tx data.
798 * @dev : device pointer.
799 *
800 * This function is the Tx entry point of the driver. Neterion NIC supports
801 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
802 * NOTE: when device cant queue the pkt, just the trans_start variable will
803 * not be upadted.
804*/
805static int
806vxge_xmit(struct sk_buff *skb, struct net_device *dev)
807{
808 struct vxge_fifo *fifo = NULL;
809 void *dtr_priv;
810 void *dtr = NULL;
811 struct vxgedev *vdev = NULL;
812 enum vxge_hw_status status;
813 int frg_cnt, first_frg_len;
814 skb_frag_t *frag;
815 int i = 0, j = 0, avail;
816 u64 dma_pointer;
817 struct vxge_tx_priv *txdl_priv = NULL;
818 struct __vxge_hw_fifo *fifo_hw;
819 u32 max_mss = 0x0;
820 int offload_type;
821 unsigned long flags = 0;
822 int vpath_no = 0;
823 int do_spin_tx_lock = 1;
824
825 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
826 dev->name, __func__, __LINE__);
827
828 /* A buffer with no data will be dropped */
829 if (unlikely(skb->len <= 0)) {
830 vxge_debug_tx(VXGE_ERR,
831 "%s: Buffer has no data..", dev->name);
832 dev_kfree_skb(skb);
833 return NETDEV_TX_OK;
834 }
835
836 vdev = (struct vxgedev *)netdev_priv(dev);
837
838 if (unlikely(!is_vxge_card_up(vdev))) {
839 vxge_debug_tx(VXGE_ERR,
840 "%s: vdev not initialized", dev->name);
841 dev_kfree_skb(skb);
842 return NETDEV_TX_OK;
843 }
844
845 if (vdev->config.addr_learn_en) {
846 vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN);
847 if (vpath_no == -EPERM) {
848 vxge_debug_tx(VXGE_ERR,
849 "%s: Failed to store the mac address",
850 dev->name);
851 dev_kfree_skb(skb);
852 return NETDEV_TX_OK;
853 }
854 }
855
856 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
857 vpath_no = skb_get_queue_mapping(skb);
858 else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
859 vpath_no = vxge_get_vpath_no(vdev, skb, &do_spin_tx_lock);
860
861 vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
862
863 if (vpath_no >= vdev->no_of_vpath)
864 vpath_no = 0;
865
866 fifo = &vdev->vpaths[vpath_no].fifo;
867 fifo_hw = fifo->handle;
868
869 if (do_spin_tx_lock)
870 spin_lock_irqsave(&fifo->tx_lock, flags);
871 else {
872 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
873 return NETDEV_TX_LOCKED;
874 }
875
876 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) {
877 if (netif_subqueue_stopped(dev, skb)) {
878 spin_unlock_irqrestore(&fifo->tx_lock, flags);
879 return NETDEV_TX_BUSY;
880 }
881 } else if (unlikely(fifo->queue_state == VPATH_QUEUE_STOP)) {
882 if (netif_queue_stopped(dev)) {
883 spin_unlock_irqrestore(&fifo->tx_lock, flags);
884 return NETDEV_TX_BUSY;
885 }
886 }
887 avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
888 if (avail == 0) {
889 vxge_debug_tx(VXGE_ERR,
890 "%s: No free TXDs available", dev->name);
891 fifo->stats.txd_not_free++;
892 vxge_stop_tx_queue(fifo);
893 goto _exit2;
894 }
895
896 status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
897 if (unlikely(status != VXGE_HW_OK)) {
898 vxge_debug_tx(VXGE_ERR,
899 "%s: Out of descriptors .", dev->name);
900 fifo->stats.txd_out_of_desc++;
901 vxge_stop_tx_queue(fifo);
902 goto _exit2;
903 }
904
905 vxge_debug_tx(VXGE_TRACE,
906 "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
907 dev->name, __func__, __LINE__,
908 fifo_hw, dtr, dtr_priv);
909
910 if (vdev->vlgrp && vlan_tx_tag_present(skb)) {
911 u16 vlan_tag = vlan_tx_tag_get(skb);
912 vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
913 }
914
915 first_frg_len = skb_headlen(skb);
916
917 dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len,
918 PCI_DMA_TODEVICE);
919
920 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
921 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
922 vxge_stop_tx_queue(fifo);
923 fifo->stats.pci_map_fail++;
924 goto _exit2;
925 }
926
927 txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
928 txdl_priv->skb = skb;
929 txdl_priv->dma_buffers[j] = dma_pointer;
930
931 frg_cnt = skb_shinfo(skb)->nr_frags;
932 vxge_debug_tx(VXGE_TRACE,
933 "%s: %s:%d skb = %p txdl_priv = %p "
934 "frag_cnt = %d dma_pointer = 0x%llx", dev->name,
935 __func__, __LINE__, skb, txdl_priv,
936 frg_cnt, (unsigned long long)dma_pointer);
937
938 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
939 first_frg_len);
940
941 frag = &skb_shinfo(skb)->frags[0];
942 for (i = 0; i < frg_cnt; i++) {
943 /* ignore 0 length fragment */
944 if (!frag->size)
945 continue;
946
947 dma_pointer =
948 (u64)pci_map_page(fifo->pdev, frag->page,
949 frag->page_offset, frag->size,
950 PCI_DMA_TODEVICE);
951
952 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer)))
953 goto _exit0;
954 vxge_debug_tx(VXGE_TRACE,
955 "%s: %s:%d frag = %d dma_pointer = 0x%llx",
956 dev->name, __func__, __LINE__, i,
957 (unsigned long long)dma_pointer);
958
959 txdl_priv->dma_buffers[j] = dma_pointer;
960 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
961 frag->size);
962 frag += 1;
963 }
964
965 offload_type = vxge_offload_type(skb);
966
967 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
968
969 int mss = vxge_tcp_mss(skb);
970 if (mss) {
971 max_mss = dev->mtu + ETH_HLEN -
972 VXGE_HW_TCPIP_HEADER_MAX_SIZE;
973 if (mss > max_mss)
974 mss = max_mss;
975 vxge_debug_tx(VXGE_TRACE,
976 "%s: %s:%d mss = %d",
977 dev->name, __func__, __LINE__, mss);
978 vxge_hw_fifo_txdl_mss_set(dtr, mss);
979 } else {
980 vxge_assert(skb->len <=
981 dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE);
982 vxge_assert(0);
983 goto _exit1;
984 }
985 }
986
987 if (skb->ip_summed == CHECKSUM_PARTIAL)
988 vxge_hw_fifo_txdl_cksum_set_bits(dtr,
989 VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN |
990 VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN |
991 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
992
993 vxge_hw_fifo_txdl_post(fifo_hw, dtr);
994 dev->trans_start = jiffies;
995 spin_unlock_irqrestore(&fifo->tx_lock, flags);
996
997 VXGE_COMPLETE_VPATH_TX(fifo);
998 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
999 dev->name, __func__, __LINE__);
1000 return 0;
1001
1002_exit0:
1003 vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
1004
1005_exit1:
1006 j = 0;
1007 frag = &skb_shinfo(skb)->frags[0];
1008
1009 pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++],
1010 skb_headlen(skb), PCI_DMA_TODEVICE);
1011
1012 for (; j < i; j++) {
1013 pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j],
1014 frag->size, PCI_DMA_TODEVICE);
1015 frag += 1;
1016 }
1017
1018 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
1019_exit2:
1020 dev_kfree_skb(skb);
1021 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1022 VXGE_COMPLETE_VPATH_TX(fifo);
1023
1024 return 0;
1025}
1026
1027/*
1028 * vxge_rx_term
1029 *
1030 * Function will be called by hw function to abort all outstanding receive
1031 * descriptors.
1032 */
1033static void
1034vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
1035{
1036 struct vxge_ring *ring = (struct vxge_ring *)userdata;
1037 struct vxge_rx_priv *rx_priv =
1038 vxge_hw_ring_rxd_private_get(dtrh);
1039
1040 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
1041 ring->ndev->name, __func__, __LINE__);
1042 if (state != VXGE_HW_RXD_STATE_POSTED)
1043 return;
1044
1045 pci_unmap_single(ring->pdev, rx_priv->data_dma,
1046 rx_priv->data_size, PCI_DMA_FROMDEVICE);
1047
1048 dev_kfree_skb(rx_priv->skb);
1049
1050 vxge_debug_entryexit(VXGE_TRACE,
1051 "%s: %s:%d Exiting...",
1052 ring->ndev->name, __func__, __LINE__);
1053}
1054
1055/*
1056 * vxge_tx_term
1057 *
1058 * Function will be called to abort all outstanding tx descriptors
1059 */
1060static void
1061vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
1062{
1063 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
1064 skb_frag_t *frag;
1065 int i = 0, j, frg_cnt;
1066 struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh);
1067 struct sk_buff *skb = txd_priv->skb;
1068
1069 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1070
1071 if (state != VXGE_HW_TXDL_STATE_POSTED)
1072 return;
1073
1074 /* check skb validity */
1075 vxge_assert(skb);
1076 frg_cnt = skb_shinfo(skb)->nr_frags;
1077 frag = &skb_shinfo(skb)->frags[0];
1078
1079 /* for unfragmented skb */
1080 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
1081 skb_headlen(skb), PCI_DMA_TODEVICE);
1082
1083 for (j = 0; j < frg_cnt; j++) {
1084 pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++],
1085 frag->size, PCI_DMA_TODEVICE);
1086 frag += 1;
1087 }
1088
1089 dev_kfree_skb(skb);
1090
1091 vxge_debug_entryexit(VXGE_TRACE,
1092 "%s:%d Exiting...", __func__, __LINE__);
1093}
1094
1095/**
1096 * vxge_set_multicast
1097 * @dev: pointer to the device structure
1098 *
1099 * Entry point for multicast address enable/disable
1100 * This function is a driver entry point which gets called by the kernel
1101 * whenever multicast addresses must be enabled/disabled. This also gets
1102 * called to set/reset promiscuous mode. Depending on the deivce flag, we
1103 * determine, if multicast address must be enabled or if promiscuous mode
1104 * is to be disabled etc.
1105 */
1106static void vxge_set_multicast(struct net_device *dev)
1107{
1108 struct dev_mc_list *mclist;
1109 struct vxgedev *vdev;
1110 int i, mcast_cnt = 0;
1111 struct __vxge_hw_device *hldev;
1112 enum vxge_hw_status status = VXGE_HW_OK;
1113 struct macInfo mac_info;
1114 int vpath_idx = 0;
1115 struct vxge_mac_addrs *mac_entry;
1116 struct list_head *list_head;
1117 struct list_head *entry, *next;
1118 u8 *mac_address = NULL;
1119
1120 vxge_debug_entryexit(VXGE_TRACE,
1121 "%s:%d", __func__, __LINE__);
1122
1123 vdev = (struct vxgedev *)netdev_priv(dev);
1124 hldev = (struct __vxge_hw_device *)vdev->devh;
1125
1126 if (unlikely(!is_vxge_card_up(vdev)))
1127 return;
1128
1129 if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
1130 for (i = 0; i < vdev->no_of_vpath; i++) {
1131 vxge_assert(vdev->vpaths[i].is_open);
1132 status = vxge_hw_vpath_mcast_enable(
1133 vdev->vpaths[i].handle);
1134 vdev->all_multi_flg = 1;
1135 }
1136 } else if ((dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
1137 for (i = 0; i < vdev->no_of_vpath; i++) {
1138 vxge_assert(vdev->vpaths[i].is_open);
1139 status = vxge_hw_vpath_mcast_disable(
1140 vdev->vpaths[i].handle);
1141 vdev->all_multi_flg = 1;
1142 }
1143 }
1144
1145 if (status != VXGE_HW_OK)
1146 vxge_debug_init(VXGE_ERR,
1147 "failed to %s multicast, status %d",
1148 dev->flags & IFF_ALLMULTI ?
1149 "enable" : "disable", status);
1150
1151 if (!vdev->config.addr_learn_en) {
1152 if (dev->flags & IFF_PROMISC) {
1153 for (i = 0; i < vdev->no_of_vpath; i++) {
1154 vxge_assert(vdev->vpaths[i].is_open);
1155 status = vxge_hw_vpath_promisc_enable(
1156 vdev->vpaths[i].handle);
1157 }
1158 } else {
1159 for (i = 0; i < vdev->no_of_vpath; i++) {
1160 vxge_assert(vdev->vpaths[i].is_open);
1161 status = vxge_hw_vpath_promisc_disable(
1162 vdev->vpaths[i].handle);
1163 }
1164 }
1165 }
1166
1167 memset(&mac_info, 0, sizeof(struct macInfo));
1168 /* Update individual M_CAST address list */
1169 if ((!vdev->all_multi_flg) && dev->mc_count) {
1170
1171 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1172 list_head = &vdev->vpaths[0].mac_addr_list;
1173 if ((dev->mc_count +
1174 (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
1175 vdev->vpaths[0].max_mac_addr_cnt)
1176 goto _set_all_mcast;
1177
1178 /* Delete previous MC's */
1179 for (i = 0; i < mcast_cnt; i++) {
1180 if (!list_empty(list_head))
1181 mac_entry = (struct vxge_mac_addrs *)
1182 list_first_entry(list_head,
1183 struct vxge_mac_addrs,
1184 item);
1185
1186 list_for_each_safe(entry, next, list_head) {
1187
1188 mac_entry = (struct vxge_mac_addrs *) entry;
1189 /* Copy the mac address to delete */
1190 mac_address = (u8 *)&mac_entry->macaddr;
1191 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1192
1193 /* Is this a multicast address */
1194 if (0x01 & mac_info.macaddr[0]) {
1195 for (vpath_idx = 0; vpath_idx <
1196 vdev->no_of_vpath;
1197 vpath_idx++) {
1198 mac_info.vpath_no = vpath_idx;
1199 status = vxge_del_mac_addr(
1200 vdev,
1201 &mac_info);
1202 }
1203 }
1204 }
1205 }
1206
1207 /* Add new ones */
1208 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
1209 i++, mclist = mclist->next) {
1210
1211 memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN);
1212 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1213 vpath_idx++) {
1214 mac_info.vpath_no = vpath_idx;
1215 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1216 status = vxge_add_mac_addr(vdev, &mac_info);
1217 if (status != VXGE_HW_OK) {
1218 vxge_debug_init(VXGE_ERR,
1219 "%s:%d Setting individual"
1220 "multicast address failed",
1221 __func__, __LINE__);
1222 goto _set_all_mcast;
1223 }
1224 }
1225 }
1226
1227 return;
1228_set_all_mcast:
1229 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1230 /* Delete previous MC's */
1231 for (i = 0; i < mcast_cnt; i++) {
1232
1233 list_for_each_safe(entry, next, list_head) {
1234
1235 mac_entry = (struct vxge_mac_addrs *) entry;
1236 /* Copy the mac address to delete */
1237 mac_address = (u8 *)&mac_entry->macaddr;
1238 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1239
1240 /* Is this a multicast address */
1241 if (0x01 & mac_info.macaddr[0])
1242 break;
1243 }
1244
1245 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1246 vpath_idx++) {
1247 mac_info.vpath_no = vpath_idx;
1248 status = vxge_del_mac_addr(vdev, &mac_info);
1249 }
1250 }
1251
1252 /* Enable all multicast */
1253 for (i = 0; i < vdev->no_of_vpath; i++) {
1254 vxge_assert(vdev->vpaths[i].is_open);
1255 status = vxge_hw_vpath_mcast_enable(
1256 vdev->vpaths[i].handle);
1257 if (status != VXGE_HW_OK) {
1258 vxge_debug_init(VXGE_ERR,
1259 "%s:%d Enabling all multicasts failed",
1260 __func__, __LINE__);
1261 }
1262 vdev->all_multi_flg = 1;
1263 }
1264 dev->flags |= IFF_ALLMULTI;
1265 }
1266
1267 vxge_debug_entryexit(VXGE_TRACE,
1268 "%s:%d Exiting...", __func__, __LINE__);
1269}
1270
1271/**
1272 * vxge_set_mac_addr
1273 * @dev: pointer to the device structure
1274 *
1275 * Update entry "0" (default MAC addr)
1276 */
1277static int vxge_set_mac_addr(struct net_device *dev, void *p)
1278{
1279 struct sockaddr *addr = p;
1280 struct vxgedev *vdev;
1281 struct __vxge_hw_device *hldev;
1282 enum vxge_hw_status status = VXGE_HW_OK;
1283 struct macInfo mac_info_new, mac_info_old;
1284 int vpath_idx = 0;
1285
1286 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1287
1288 vdev = (struct vxgedev *)netdev_priv(dev);
1289 hldev = vdev->devh;
1290
1291 if (!is_valid_ether_addr(addr->sa_data))
1292 return -EINVAL;
1293
1294 memset(&mac_info_new, 0, sizeof(struct macInfo));
1295 memset(&mac_info_old, 0, sizeof(struct macInfo));
1296
1297 vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...",
1298 __func__, __LINE__);
1299
1300 /* Get the old address */
1301 memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len);
1302
1303 /* Copy the new address */
1304 memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len);
1305
1306 /* First delete the old mac address from all the vpaths
1307 as we can't specify the index while adding new mac address */
1308 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1309 struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx];
1310 if (!vpath->is_open) {
1311 /* This can happen when this interface is added/removed
1312 to the bonding interface. Delete this station address
1313 from the linked list */
1314 vxge_mac_list_del(vpath, &mac_info_old);
1315
1316 /* Add this new address to the linked list
1317 for later restoring */
1318 vxge_mac_list_add(vpath, &mac_info_new);
1319
1320 continue;
1321 }
1322 /* Delete the station address */
1323 mac_info_old.vpath_no = vpath_idx;
1324 status = vxge_del_mac_addr(vdev, &mac_info_old);
1325 }
1326
1327 if (unlikely(!is_vxge_card_up(vdev))) {
1328 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1329 return VXGE_HW_OK;
1330 }
1331
1332 /* Set this mac address to all the vpaths */
1333 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1334 mac_info_new.vpath_no = vpath_idx;
1335 mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1336 status = vxge_add_mac_addr(vdev, &mac_info_new);
1337 if (status != VXGE_HW_OK)
1338 return -EINVAL;
1339 }
1340
1341 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1342
1343 return status;
1344}
1345
1346/*
1347 * vxge_vpath_intr_enable
1348 * @vdev: pointer to vdev
1349 * @vp_id: vpath for which to enable the interrupts
1350 *
1351 * Enables the interrupts for the vpath
1352*/
1353void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1354{
1355 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1356 int msix_id, alarm_msix_id;
1357 int tim_msix_id[4] = {[0 ...3] = 0};
1358
1359 vxge_hw_vpath_intr_enable(vpath->handle);
1360
1361 if (vdev->config.intr_type == INTA)
1362 vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
1363 else {
1364 msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1365 alarm_msix_id =
1366 VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
1367
1368 tim_msix_id[0] = msix_id;
1369 tim_msix_id[1] = msix_id + 1;
1370 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
1371 alarm_msix_id);
1372
1373 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1374 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
1375
1376 /* enable the alarm vector */
1377 vxge_hw_vpath_msix_unmask(vpath->handle, alarm_msix_id);
1378 }
1379}
1380
1381/*
1382 * vxge_vpath_intr_disable
1383 * @vdev: pointer to vdev
1384 * @vp_id: vpath for which to disable the interrupts
1385 *
1386 * Disables the interrupts for the vpath
1387*/
1388void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1389{
1390 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1391 int msix_id;
1392
1393 vxge_hw_vpath_intr_disable(vpath->handle);
1394
1395 if (vdev->config.intr_type == INTA)
1396 vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
1397 else {
1398 msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1399 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1400 vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
1401
1402 /* disable the alarm vector */
1403 msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
1404 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1405 }
1406}
1407
1408/*
1409 * vxge_reset_vpath
1410 * @vdev: pointer to vdev
1411 * @vp_id: vpath to reset
1412 *
1413 * Resets the vpath
1414*/
1415static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1416{
1417 enum vxge_hw_status status = VXGE_HW_OK;
1418 int ret = 0;
1419
1420 /* check if device is down already */
1421 if (unlikely(!is_vxge_card_up(vdev)))
1422 return 0;
1423
1424 /* is device reset already scheduled */
1425 if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1426 return 0;
1427
1428 if (vdev->vpaths[vp_id].handle) {
1429 if (vxge_hw_vpath_reset(vdev->vpaths[vp_id].handle)
1430 == VXGE_HW_OK) {
1431 if (is_vxge_card_up(vdev) &&
1432 vxge_hw_vpath_recover_from_reset(
1433 vdev->vpaths[vp_id].handle)
1434 != VXGE_HW_OK) {
1435 vxge_debug_init(VXGE_ERR,
1436 "vxge_hw_vpath_recover_from_reset"
1437 "failed for vpath:%d", vp_id);
1438 return status;
1439 }
1440 } else {
1441 vxge_debug_init(VXGE_ERR,
1442 "vxge_hw_vpath_reset failed for"
1443 "vpath:%d", vp_id);
1444 return status;
1445 }
1446 } else
1447 return VXGE_HW_FAIL;
1448
1449 vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
1450 vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
1451
1452 /* Enable all broadcast */
1453 vxge_hw_vpath_bcast_enable(vdev->vpaths[vp_id].handle);
1454
1455 /* Enable the interrupts */
1456 vxge_vpath_intr_enable(vdev, vp_id);
1457
1458 smp_wmb();
1459
1460 /* Enable the flow of traffic through the vpath */
1461 vxge_hw_vpath_enable(vdev->vpaths[vp_id].handle);
1462
1463 smp_wmb();
1464 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[vp_id].handle);
1465 vdev->vpaths[vp_id].ring.last_status = VXGE_HW_OK;
1466
1467 /* Vpath reset done */
1468 clear_bit(vp_id, &vdev->vp_reset);
1469
1470 /* Start the vpath queue */
1471 vxge_wake_tx_queue(&vdev->vpaths[vp_id].fifo, NULL);
1472
1473 return ret;
1474}
1475
1476static int do_vxge_reset(struct vxgedev *vdev, int event)
1477{
1478 enum vxge_hw_status status;
1479 int ret = 0, vp_id, i;
1480
1481 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1482
1483 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) {
1484 /* check if device is down already */
1485 if (unlikely(!is_vxge_card_up(vdev)))
1486 return 0;
1487
1488 /* is reset already scheduled */
1489 if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1490 return 0;
1491 }
1492
1493 if (event == VXGE_LL_FULL_RESET) {
1494 /* wait for all the vpath reset to complete */
1495 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1496 while (test_bit(vp_id, &vdev->vp_reset))
1497 msleep(50);
1498 }
1499
1500 /* if execution mode is set to debug, don't reset the adapter */
1501 if (unlikely(vdev->exec_mode)) {
1502 vxge_debug_init(VXGE_ERR,
1503 "%s: execution mode is debug, returning..",
1504 vdev->ndev->name);
1505 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1506 vxge_stop_all_tx_queue(vdev);
1507 return 0;
1508 }
1509 }
1510
1511 if (event == VXGE_LL_FULL_RESET) {
1512 vxge_hw_device_intr_disable(vdev->devh);
1513
1514 switch (vdev->cric_err_event) {
1515 case VXGE_HW_EVENT_UNKNOWN:
1516 vxge_stop_all_tx_queue(vdev);
1517 vxge_debug_init(VXGE_ERR,
1518 "fatal: %s: Disabling device due to"
1519 "unknown error",
1520 vdev->ndev->name);
1521 ret = -EPERM;
1522 goto out;
1523 case VXGE_HW_EVENT_RESET_START:
1524 break;
1525 case VXGE_HW_EVENT_RESET_COMPLETE:
1526 case VXGE_HW_EVENT_LINK_DOWN:
1527 case VXGE_HW_EVENT_LINK_UP:
1528 case VXGE_HW_EVENT_ALARM_CLEARED:
1529 case VXGE_HW_EVENT_ECCERR:
1530 case VXGE_HW_EVENT_MRPCIM_ECCERR:
1531 ret = -EPERM;
1532 goto out;
1533 case VXGE_HW_EVENT_FIFO_ERR:
1534 case VXGE_HW_EVENT_VPATH_ERR:
1535 break;
1536 case VXGE_HW_EVENT_CRITICAL_ERR:
1537 vxge_stop_all_tx_queue(vdev);
1538 vxge_debug_init(VXGE_ERR,
1539 "fatal: %s: Disabling device due to"
1540 "serious error",
1541 vdev->ndev->name);
1542 /* SOP or device reset required */
1543 /* This event is not currently used */
1544 ret = -EPERM;
1545 goto out;
1546 case VXGE_HW_EVENT_SERR:
1547 vxge_stop_all_tx_queue(vdev);
1548 vxge_debug_init(VXGE_ERR,
1549 "fatal: %s: Disabling device due to"
1550 "serious error",
1551 vdev->ndev->name);
1552 ret = -EPERM;
1553 goto out;
1554 case VXGE_HW_EVENT_SRPCIM_SERR:
1555 case VXGE_HW_EVENT_MRPCIM_SERR:
1556 ret = -EPERM;
1557 goto out;
1558 case VXGE_HW_EVENT_SLOT_FREEZE:
1559 vxge_stop_all_tx_queue(vdev);
1560 vxge_debug_init(VXGE_ERR,
1561 "fatal: %s: Disabling device due to"
1562 "slot freeze",
1563 vdev->ndev->name);
1564 ret = -EPERM;
1565 goto out;
1566 default:
1567 break;
1568
1569 }
1570 }
1571
1572 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET))
1573 vxge_stop_all_tx_queue(vdev);
1574
1575 if (event == VXGE_LL_FULL_RESET) {
1576 status = vxge_reset_all_vpaths(vdev);
1577 if (status != VXGE_HW_OK) {
1578 vxge_debug_init(VXGE_ERR,
1579 "fatal: %s: can not reset vpaths",
1580 vdev->ndev->name);
1581 ret = -EPERM;
1582 goto out;
1583 }
1584 }
1585
1586 if (event == VXGE_LL_COMPL_RESET) {
1587 for (i = 0; i < vdev->no_of_vpath; i++)
1588 if (vdev->vpaths[i].handle) {
1589 if (vxge_hw_vpath_recover_from_reset(
1590 vdev->vpaths[i].handle)
1591 != VXGE_HW_OK) {
1592 vxge_debug_init(VXGE_ERR,
1593 "vxge_hw_vpath_recover_"
1594 "from_reset failed for vpath: "
1595 "%d", i);
1596 ret = -EPERM;
1597 goto out;
1598 }
1599 } else {
1600 vxge_debug_init(VXGE_ERR,
1601 "vxge_hw_vpath_reset failed for "
1602 "vpath:%d", i);
1603 ret = -EPERM;
1604 goto out;
1605 }
1606 }
1607
1608 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) {
1609 /* Reprogram the DA table with populated mac addresses */
1610 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1611 vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
1612 vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
1613 }
1614
1615 /* enable vpath interrupts */
1616 for (i = 0; i < vdev->no_of_vpath; i++)
1617 vxge_vpath_intr_enable(vdev, i);
1618
1619 vxge_hw_device_intr_enable(vdev->devh);
1620
1621 smp_wmb();
1622
1623 /* Indicate card up */
1624 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1625
1626 /* Get the traffic to flow through the vpaths */
1627 for (i = 0; i < vdev->no_of_vpath; i++) {
1628 vxge_hw_vpath_enable(vdev->vpaths[i].handle);
1629 smp_wmb();
1630 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
1631 }
1632
1633 vxge_wake_all_tx_queue(vdev);
1634 }
1635
1636out:
1637 vxge_debug_entryexit(VXGE_TRACE,
1638 "%s:%d Exiting...", __func__, __LINE__);
1639
1640 /* Indicate reset done */
1641 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET))
1642 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
1643 return ret;
1644}
1645
1646/*
1647 * vxge_reset
1648 * @vdev: pointer to ll device
1649 *
1650 * driver may reset the chip on events of serr, eccerr, etc
1651 */
1652int vxge_reset(struct vxgedev *vdev)
1653{
1654 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
1655 return 0;
1656}
1657
1658/**
1659 * vxge_poll - Receive handler when Receive Polling is used.
1660 * @dev: pointer to the device structure.
1661 * @budget: Number of packets budgeted to be processed in this iteration.
1662 *
1663 * This function comes into picture only if Receive side is being handled
1664 * through polling (called NAPI in linux). It mostly does what the normal
1665 * Rx interrupt handler does in terms of descriptor and packet processing
1666 * but not in an interrupt context. Also it will process a specified number
1667 * of packets at most in one iteration. This value is passed down by the
1668 * kernel as the function argument 'budget'.
1669 */
1670static int vxge_poll_msix(struct napi_struct *napi, int budget)
1671{
1672 struct vxge_ring *ring =
1673 container_of(napi, struct vxge_ring, napi);
1674 int budget_org = budget;
1675 ring->budget = budget;
1676
1677 vxge_hw_vpath_poll_rx(ring->handle);
1678
1679 if (ring->pkts_processed < budget_org) {
1680 napi_complete(napi);
1681 /* Re enable the Rx interrupts for the vpath */
1682 vxge_hw_channel_msix_unmask(
1683 (struct __vxge_hw_channel *)ring->handle,
1684 ring->rx_vector_no);
1685 }
1686
1687 return ring->pkts_processed;
1688}
1689
1690static int vxge_poll_inta(struct napi_struct *napi, int budget)
1691{
1692 struct vxgedev *vdev = container_of(napi, struct vxgedev, napi);
1693 int pkts_processed = 0;
1694 int i;
1695 int budget_org = budget;
1696 struct vxge_ring *ring;
1697
1698 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
1699 pci_get_drvdata(vdev->pdev);
1700
1701 for (i = 0; i < vdev->no_of_vpath; i++) {
1702 ring = &vdev->vpaths[i].ring;
1703 ring->budget = budget;
1704 vxge_hw_vpath_poll_rx(ring->handle);
1705 pkts_processed += ring->pkts_processed;
1706 budget -= ring->pkts_processed;
1707 if (budget <= 0)
1708 break;
1709 }
1710
1711 VXGE_COMPLETE_ALL_TX(vdev);
1712
1713 if (pkts_processed < budget_org) {
1714 napi_complete(napi);
1715 /* Re enable the Rx interrupts for the ring */
1716 vxge_hw_device_unmask_all(hldev);
1717 vxge_hw_device_flush_io(hldev);
1718 }
1719
1720 return pkts_processed;
1721}
1722
1723#ifdef CONFIG_NET_POLL_CONTROLLER
1724/**
1725 * vxge_netpoll - netpoll event handler entry point
1726 * @dev : pointer to the device structure.
1727 * Description:
1728 * This function will be called by upper layer to check for events on the
1729 * interface in situations where interrupts are disabled. It is used for
1730 * specific in-kernel networking tasks, such as remote consoles and kernel
1731 * debugging over the network (example netdump in RedHat).
1732 */
1733static void vxge_netpoll(struct net_device *dev)
1734{
1735 struct __vxge_hw_device *hldev;
1736 struct vxgedev *vdev;
1737
1738 vdev = (struct vxgedev *)netdev_priv(dev);
1739 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
1740
1741 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1742
1743 if (pci_channel_offline(vdev->pdev))
1744 return;
1745
1746 disable_irq(dev->irq);
1747 vxge_hw_device_clear_tx_rx(hldev);
1748
1749 vxge_hw_device_clear_tx_rx(hldev);
1750 VXGE_COMPLETE_ALL_RX(vdev);
1751 VXGE_COMPLETE_ALL_TX(vdev);
1752
1753 enable_irq(dev->irq);
1754
1755 vxge_debug_entryexit(VXGE_TRACE,
1756 "%s:%d Exiting...", __func__, __LINE__);
1757 return;
1758}
1759#endif
1760
1761/* RTH configuration */
1762static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1763{
1764 enum vxge_hw_status status = VXGE_HW_OK;
1765 struct vxge_hw_rth_hash_types hash_types;
1766 u8 itable[256] = {0}; /* indirection table */
1767 u8 mtable[256] = {0}; /* CPU to vpath mapping */
1768 int index;
1769
1770 /*
1771 * Filling
1772 * - itable with bucket numbers
1773 * - mtable with bucket-to-vpath mapping
1774 */
1775 for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) {
1776 itable[index] = index;
1777 mtable[index] = index % vdev->no_of_vpath;
1778 }
1779
1780 /* Fill RTH hash types */
1781 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1782 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1783 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1784 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1785 hash_types.hash_type_tcpipv6ex_en =
1786 vdev->config.rth_hash_type_tcpipv6ex;
1787 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1788
1789 /* set indirection table, bucket-to-vpath mapping */
1790 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
1791 vdev->no_of_vpath,
1792 mtable, itable,
1793 vdev->config.rth_bkt_sz);
1794 if (status != VXGE_HW_OK) {
1795 vxge_debug_init(VXGE_ERR,
1796 "RTH indirection table configuration failed "
1797 "for vpath:%d", vdev->vpaths[0].device_id);
1798 return status;
1799 }
1800
1801 /*
1802 * Because the itable_set() method uses the active_table field
1803 * for the target virtual path the RTH config should be updated
1804 * for all VPATHs. The h/w only uses the lowest numbered VPATH
1805 * when steering frames.
1806 */
1807 for (index = 0; index < vdev->no_of_vpath; index++) {
1808 status = vxge_hw_vpath_rts_rth_set(
1809 vdev->vpaths[index].handle,
1810 vdev->config.rth_algorithm,
1811 &hash_types,
1812 vdev->config.rth_bkt_sz);
1813
1814 if (status != VXGE_HW_OK) {
1815 vxge_debug_init(VXGE_ERR,
1816 "RTH configuration failed for vpath:%d",
1817 vdev->vpaths[index].device_id);
1818 return status;
1819 }
1820 }
1821
1822 return status;
1823}
1824
1825int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
1826{
1827 struct vxge_mac_addrs *new_mac_entry;
1828 u8 *mac_address = NULL;
1829
1830 if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
1831 return TRUE;
1832
1833 new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
1834 if (!new_mac_entry) {
1835 vxge_debug_mem(VXGE_ERR,
1836 "%s: memory allocation failed",
1837 VXGE_DRIVER_NAME);
1838 return FALSE;
1839 }
1840
1841 list_add(&new_mac_entry->item, &vpath->mac_addr_list);
1842
1843 /* Copy the new mac address to the list */
1844 mac_address = (u8 *)&new_mac_entry->macaddr;
1845 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1846
1847 new_mac_entry->state = mac->state;
1848 vpath->mac_addr_cnt++;
1849
1850 /* Is this a multicast address */
1851 if (0x01 & mac->macaddr[0])
1852 vpath->mcast_addr_cnt++;
1853
1854 return TRUE;
1855}
1856
1857/* Add a mac address to DA table */
1858enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1859{
1860 enum vxge_hw_status status = VXGE_HW_OK;
1861 struct vxge_vpath *vpath;
1862 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
1863
1864 if (0x01 & mac->macaddr[0]) /* multicast address */
1865 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
1866 else
1867 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
1868
1869 vpath = &vdev->vpaths[mac->vpath_no];
1870 status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
1871 mac->macmask, duplicate_mode);
1872 if (status != VXGE_HW_OK) {
1873 vxge_debug_init(VXGE_ERR,
1874 "DA config add entry failed for vpath:%d",
1875 vpath->device_id);
1876 } else
1877 if (FALSE == vxge_mac_list_add(vpath, mac))
1878 status = -EPERM;
1879
1880 return status;
1881}
1882
1883int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1884{
1885 struct list_head *entry, *next;
1886 u64 del_mac = 0;
1887 u8 *mac_address = (u8 *) (&del_mac);
1888
1889 /* Copy the mac address to delete from the list */
1890 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1891
1892 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1893 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
1894 list_del(entry);
1895 kfree((struct vxge_mac_addrs *)entry);
1896 vpath->mac_addr_cnt--;
1897
1898 /* Is this a multicast address */
1899 if (0x01 & mac->macaddr[0])
1900 vpath->mcast_addr_cnt--;
1901 return TRUE;
1902 }
1903 }
1904
1905 return FALSE;
1906}
1907/* delete a mac address from DA table */
1908enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1909{
1910 enum vxge_hw_status status = VXGE_HW_OK;
1911 struct vxge_vpath *vpath;
1912
1913 vpath = &vdev->vpaths[mac->vpath_no];
1914 status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
1915 mac->macmask);
1916 if (status != VXGE_HW_OK) {
1917 vxge_debug_init(VXGE_ERR,
1918 "DA config delete entry failed for vpath:%d",
1919 vpath->device_id);
1920 } else
1921 vxge_mac_list_del(vpath, mac);
1922 return status;
1923}
1924
1925/* list all mac addresses from DA table */
1926enum vxge_hw_status
1927static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
1928 struct macInfo *mac)
1929{
1930 enum vxge_hw_status status = VXGE_HW_OK;
1931 unsigned char macmask[ETH_ALEN];
1932 unsigned char macaddr[ETH_ALEN];
1933
1934 status = vxge_hw_vpath_mac_addr_get(vpath->handle,
1935 macaddr, macmask);
1936 if (status != VXGE_HW_OK) {
1937 vxge_debug_init(VXGE_ERR,
1938 "DA config list entry failed for vpath:%d",
1939 vpath->device_id);
1940 return status;
1941 }
1942
1943 while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
1944
1945 status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
1946 macaddr, macmask);
1947 if (status != VXGE_HW_OK)
1948 break;
1949 }
1950
1951 return status;
1952}
1953
1954/* Store all vlan ids from the list to the vid table */
1955enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1956{
1957 enum vxge_hw_status status = VXGE_HW_OK;
1958 struct vxgedev *vdev = vpath->vdev;
1959 u16 vid;
1960
1961 if (vdev->vlgrp && vpath->is_open) {
1962
1963 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1964 if (!vlan_group_get_device(vdev->vlgrp, vid))
1965 continue;
1966 /* Add these vlan to the vid table */
1967 status = vxge_hw_vpath_vid_add(vpath->handle, vid);
1968 }
1969 }
1970
1971 return status;
1972}
1973
1974/* Store all mac addresses from the list to the DA table */
1975enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1976{
1977 enum vxge_hw_status status = VXGE_HW_OK;
1978 struct macInfo mac_info;
1979 u8 *mac_address = NULL;
1980 struct list_head *entry, *next;
1981
1982 memset(&mac_info, 0, sizeof(struct macInfo));
1983
1984 if (vpath->is_open) {
1985
1986 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1987 mac_address =
1988 (u8 *)&
1989 ((struct vxge_mac_addrs *)entry)->macaddr;
1990 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1991 ((struct vxge_mac_addrs *)entry)->state =
1992 VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1993 /* does this mac address already exist in da table? */
1994 status = vxge_search_mac_addr_in_da_table(vpath,
1995 &mac_info);
1996 if (status != VXGE_HW_OK) {
1997 /* Add this mac address to the DA table */
1998 status = vxge_hw_vpath_mac_addr_add(
1999 vpath->handle, mac_info.macaddr,
2000 mac_info.macmask,
2001 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
2002 if (status != VXGE_HW_OK) {
2003 vxge_debug_init(VXGE_ERR,
2004 "DA add entry failed for vpath:%d",
2005 vpath->device_id);
2006 ((struct vxge_mac_addrs *)entry)->state
2007 = VXGE_LL_MAC_ADDR_IN_LIST;
2008 }
2009 }
2010 }
2011 }
2012
2013 return status;
2014}
2015
2016/* reset vpaths */
2017enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
2018{
2019 int i;
2020 enum vxge_hw_status status = VXGE_HW_OK;
2021
2022 for (i = 0; i < vdev->no_of_vpath; i++)
2023 if (vdev->vpaths[i].handle) {
2024 if (vxge_hw_vpath_reset(vdev->vpaths[i].handle)
2025 == VXGE_HW_OK) {
2026 if (is_vxge_card_up(vdev) &&
2027 vxge_hw_vpath_recover_from_reset(
2028 vdev->vpaths[i].handle)
2029 != VXGE_HW_OK) {
2030 vxge_debug_init(VXGE_ERR,
2031 "vxge_hw_vpath_recover_"
2032 "from_reset failed for vpath: "
2033 "%d", i);
2034 return status;
2035 }
2036 } else {
2037 vxge_debug_init(VXGE_ERR,
2038 "vxge_hw_vpath_reset failed for "
2039 "vpath:%d", i);
2040 return status;
2041 }
2042 }
2043 return status;
2044}
2045
2046/* close vpaths */
2047void vxge_close_vpaths(struct vxgedev *vdev, int index)
2048{
2049 int i;
2050 for (i = index; i < vdev->no_of_vpath; i++) {
2051 if (vdev->vpaths[i].handle && vdev->vpaths[i].is_open) {
2052 vxge_hw_vpath_close(vdev->vpaths[i].handle);
2053 vdev->stats.vpaths_open--;
2054 }
2055 vdev->vpaths[i].is_open = 0;
2056 vdev->vpaths[i].handle = NULL;
2057 }
2058}
2059
2060/* open vpaths */
2061int vxge_open_vpaths(struct vxgedev *vdev)
2062{
2063 enum vxge_hw_status status;
2064 int i;
2065 u32 vp_id = 0;
2066 struct vxge_hw_vpath_attr attr;
2067
2068 for (i = 0; i < vdev->no_of_vpath; i++) {
2069 vxge_assert(vdev->vpaths[i].is_configured);
2070 attr.vp_id = vdev->vpaths[i].device_id;
2071 attr.fifo_attr.callback = vxge_xmit_compl;
2072 attr.fifo_attr.txdl_term = vxge_tx_term;
2073 attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
2074 attr.fifo_attr.userdata = (void *)&vdev->vpaths[i].fifo;
2075
2076 attr.ring_attr.callback = vxge_rx_1b_compl;
2077 attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
2078 attr.ring_attr.rxd_term = vxge_rx_term;
2079 attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
2080 attr.ring_attr.userdata = (void *)&vdev->vpaths[i].ring;
2081
2082 vdev->vpaths[i].ring.ndev = vdev->ndev;
2083 vdev->vpaths[i].ring.pdev = vdev->pdev;
2084 status = vxge_hw_vpath_open(vdev->devh, &attr,
2085 &(vdev->vpaths[i].handle));
2086 if (status == VXGE_HW_OK) {
2087 vdev->vpaths[i].fifo.handle =
2088 (struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
2089 vdev->vpaths[i].ring.handle =
2090 (struct __vxge_hw_ring *)attr.ring_attr.userdata;
2091 vdev->vpaths[i].fifo.tx_steering_type =
2092 vdev->config.tx_steering_type;
2093 vdev->vpaths[i].fifo.ndev = vdev->ndev;
2094 vdev->vpaths[i].fifo.pdev = vdev->pdev;
2095 vdev->vpaths[i].fifo.indicate_max_pkts =
2096 vdev->config.fifo_indicate_max_pkts;
2097 vdev->vpaths[i].ring.rx_vector_no = 0;
2098 vdev->vpaths[i].ring.rx_csum = vdev->rx_csum;
2099 vdev->vpaths[i].is_open = 1;
2100 vdev->vp_handles[i] = vdev->vpaths[i].handle;
2101 vdev->vpaths[i].ring.gro_enable =
2102 vdev->config.gro_enable;
2103 vdev->vpaths[i].ring.vlan_tag_strip =
2104 vdev->vlan_tag_strip;
2105 vdev->stats.vpaths_open++;
2106 } else {
2107 vdev->stats.vpath_open_fail++;
2108 vxge_debug_init(VXGE_ERR,
2109 "%s: vpath: %d failed to open "
2110 "with status: %d",
2111 vdev->ndev->name, vdev->vpaths[i].device_id,
2112 status);
2113 vxge_close_vpaths(vdev, 0);
2114 return -EPERM;
2115 }
2116
2117 vp_id =
2118 ((struct __vxge_hw_vpath_handle *)vdev->vpaths[i].handle)->
2119 vpath->vp_id;
2120 vdev->vpaths_deployed |= vxge_mBIT(vp_id);
2121 }
2122 return VXGE_HW_OK;
2123}
2124
2125/*
2126 * vxge_isr_napi
2127 * @irq: the irq of the device.
2128 * @dev_id: a void pointer to the hldev structure of the Titan device
2129 * @ptregs: pointer to the registers pushed on the stack.
2130 *
2131 * This function is the ISR handler of the device when napi is enabled. It
2132 * identifies the reason for the interrupt and calls the relevant service
2133 * routines.
2134 */
2135static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2136{
2137 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)dev_id;
2138 struct vxgedev *vdev;
2139 struct net_device *dev;
2140 u64 reason;
2141 enum vxge_hw_status status;
2142
2143 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
2144
2145 dev = hldev->ndev;
2146 vdev = netdev_priv(dev);
2147
2148 if (pci_channel_offline(vdev->pdev))
2149 return IRQ_NONE;
2150
2151 if (unlikely(!is_vxge_card_up(vdev)))
2152 return IRQ_NONE;
2153
2154 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode,
2155 &reason);
2156 if (status == VXGE_HW_OK) {
2157 vxge_hw_device_mask_all(hldev);
2158
2159 if (reason &
2160 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
2161 vdev->vpaths_deployed >>
2162 (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) {
2163
2164 vxge_hw_device_clear_tx_rx(hldev);
2165 napi_schedule(&vdev->napi);
2166 vxge_debug_intr(VXGE_TRACE,
2167 "%s:%d Exiting...", __func__, __LINE__);
2168 return IRQ_HANDLED;
2169 } else
2170 vxge_hw_device_unmask_all(hldev);
2171 } else if (unlikely((status == VXGE_HW_ERR_VPATH) ||
2172 (status == VXGE_HW_ERR_CRITICAL) ||
2173 (status == VXGE_HW_ERR_FIFO))) {
2174 vxge_hw_device_mask_all(hldev);
2175 vxge_hw_device_flush_io(hldev);
2176 return IRQ_HANDLED;
2177 } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE))
2178 return IRQ_HANDLED;
2179
2180 vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__);
2181 return IRQ_NONE;
2182}
2183
2184#ifdef CONFIG_PCI_MSI
2185
2186static irqreturn_t
2187vxge_tx_msix_handle(int irq, void *dev_id)
2188{
2189 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
2190
2191 VXGE_COMPLETE_VPATH_TX(fifo);
2192
2193 return IRQ_HANDLED;
2194}
2195
2196static irqreturn_t
2197vxge_rx_msix_napi_handle(int irq, void *dev_id)
2198{
2199 struct vxge_ring *ring = (struct vxge_ring *)dev_id;
2200
2201 /* MSIX_IDX for Rx is 1 */
2202 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
2203 ring->rx_vector_no);
2204
2205 napi_schedule(&ring->napi);
2206 return IRQ_HANDLED;
2207}
2208
2209static irqreturn_t
2210vxge_alarm_msix_handle(int irq, void *dev_id)
2211{
2212 int i;
2213 enum vxge_hw_status status;
2214 struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
2215 struct vxgedev *vdev = vpath->vdev;
2216 int alarm_msix_id =
2217 VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
2218
2219 for (i = 0; i < vdev->no_of_vpath; i++) {
2220 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle,
2221 alarm_msix_id);
2222
2223 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2224 vdev->exec_mode);
2225 if (status == VXGE_HW_OK) {
2226
2227 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
2228 alarm_msix_id);
2229 continue;
2230 }
2231 vxge_debug_intr(VXGE_ERR,
2232 "%s: vxge_hw_vpath_alarm_process failed %x ",
2233 VXGE_DRIVER_NAME, status);
2234 }
2235 return IRQ_HANDLED;
2236}
2237
2238static int vxge_alloc_msix(struct vxgedev *vdev)
2239{
2240 int j, i, ret = 0;
2241 int intr_cnt = 0;
2242 int alarm_msix_id = 0, msix_intr_vect = 0;
2243 vdev->intr_cnt = 0;
2244
2245 /* Tx/Rx MSIX Vectors count */
2246 vdev->intr_cnt = vdev->no_of_vpath * 2;
2247
2248 /* Alarm MSIX Vectors count */
2249 vdev->intr_cnt++;
2250
2251 intr_cnt = (vdev->max_vpath_supported * 2) + 1;
2252 vdev->entries = kzalloc(intr_cnt * sizeof(struct msix_entry),
2253 GFP_KERNEL);
2254 if (!vdev->entries) {
2255 vxge_debug_init(VXGE_ERR,
2256 "%s: memory allocation failed",
2257 VXGE_DRIVER_NAME);
2258 return -ENOMEM;
2259 }
2260
2261 vdev->vxge_entries = kzalloc(intr_cnt * sizeof(struct vxge_msix_entry),
2262 GFP_KERNEL);
2263 if (!vdev->vxge_entries) {
2264 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
2265 VXGE_DRIVER_NAME);
2266 kfree(vdev->entries);
2267 return -ENOMEM;
2268 }
2269
2270 /* Last vector in the list is used for alarm */
2271 alarm_msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
2272 for (i = 0, j = 0; i < vdev->max_vpath_supported; i++) {
2273
2274 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
2275
2276 /* Initialize the fifo vector */
2277 vdev->entries[j].entry = msix_intr_vect;
2278 vdev->vxge_entries[j].entry = msix_intr_vect;
2279 vdev->vxge_entries[j].in_use = 0;
2280 j++;
2281
2282 /* Initialize the ring vector */
2283 vdev->entries[j].entry = msix_intr_vect + 1;
2284 vdev->vxge_entries[j].entry = msix_intr_vect + 1;
2285 vdev->vxge_entries[j].in_use = 0;
2286 j++;
2287 }
2288
2289 /* Initialize the alarm vector */
2290 vdev->entries[j].entry = alarm_msix_id;
2291 vdev->vxge_entries[j].entry = alarm_msix_id;
2292 vdev->vxge_entries[j].in_use = 0;
2293
2294 ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt);
2295 /* if driver request exceeeds available irq's, request with a small
2296 * number.
2297 */
2298 if (ret > 0) {
2299 vxge_debug_init(VXGE_ERR,
2300 "%s: MSI-X enable failed for %d vectors, available: %d",
2301 VXGE_DRIVER_NAME, intr_cnt, ret);
2302 vdev->max_vpath_supported = vdev->no_of_vpath;
2303 intr_cnt = (vdev->max_vpath_supported * 2) + 1;
2304
2305 /* Reset the alarm vector setting */
2306 vdev->entries[j].entry = 0;
2307 vdev->vxge_entries[j].entry = 0;
2308
2309 /* Initialize the alarm vector with new setting */
2310 vdev->entries[intr_cnt - 1].entry = alarm_msix_id;
2311 vdev->vxge_entries[intr_cnt - 1].entry = alarm_msix_id;
2312 vdev->vxge_entries[intr_cnt - 1].in_use = 0;
2313
2314 ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt);
2315 if (!ret)
2316 vxge_debug_init(VXGE_ERR,
2317 "%s: MSI-X enabled for %d vectors",
2318 VXGE_DRIVER_NAME, intr_cnt);
2319 }
2320
2321 if (ret) {
2322 vxge_debug_init(VXGE_ERR,
2323 "%s: MSI-X enable failed for %d vectors, ret: %d",
2324 VXGE_DRIVER_NAME, intr_cnt, ret);
2325 kfree(vdev->entries);
2326 kfree(vdev->vxge_entries);
2327 vdev->entries = NULL;
2328 vdev->vxge_entries = NULL;
2329 return -ENODEV;
2330 }
2331 return 0;
2332}
2333
2334static int vxge_enable_msix(struct vxgedev *vdev)
2335{
2336
2337 int i, ret = 0;
2338 enum vxge_hw_status status;
2339 /* 0 - Tx, 1 - Rx */
2340 int tim_msix_id[4];
2341 int alarm_msix_id = 0, msix_intr_vect = 0;;
2342 vdev->intr_cnt = 0;
2343
2344 /* allocate msix vectors */
2345 ret = vxge_alloc_msix(vdev);
2346 if (!ret) {
2347 /* Last vector in the list is used for alarm */
2348 alarm_msix_id =
2349 VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
2350 for (i = 0; i < vdev->no_of_vpath; i++) {
2351
2352 /* If fifo or ring are not enabled
2353 the MSIX vector for that should be set to 0
2354 Hence initializeing this array to all 0s.
2355 */
2356 memset(tim_msix_id, 0, sizeof(tim_msix_id));
2357 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
2358 tim_msix_id[0] = msix_intr_vect;
2359
2360 tim_msix_id[1] = msix_intr_vect + 1;
2361 vdev->vpaths[i].ring.rx_vector_no = tim_msix_id[1];
2362
2363 status = vxge_hw_vpath_msix_set(
2364 vdev->vpaths[i].handle,
2365 tim_msix_id, alarm_msix_id);
2366 if (status != VXGE_HW_OK) {
2367 vxge_debug_init(VXGE_ERR,
2368 "vxge_hw_vpath_msix_set "
2369 "failed with status : %x", status);
2370 kfree(vdev->entries);
2371 kfree(vdev->vxge_entries);
2372 pci_disable_msix(vdev->pdev);
2373 return -ENODEV;
2374 }
2375 }
2376 }
2377
2378 return ret;
2379}
2380
2381static void vxge_rem_msix_isr(struct vxgedev *vdev)
2382{
2383 int intr_cnt;
2384
2385 for (intr_cnt = 0; intr_cnt < (vdev->max_vpath_supported * 2 + 1);
2386 intr_cnt++) {
2387 if (vdev->vxge_entries[intr_cnt].in_use) {
2388 synchronize_irq(vdev->entries[intr_cnt].vector);
2389 free_irq(vdev->entries[intr_cnt].vector,
2390 vdev->vxge_entries[intr_cnt].arg);
2391 vdev->vxge_entries[intr_cnt].in_use = 0;
2392 }
2393 }
2394
2395 kfree(vdev->entries);
2396 kfree(vdev->vxge_entries);
2397 vdev->entries = NULL;
2398 vdev->vxge_entries = NULL;
2399
2400 if (vdev->config.intr_type == MSI_X)
2401 pci_disable_msix(vdev->pdev);
2402}
2403#endif
2404
2405static void vxge_rem_isr(struct vxgedev *vdev)
2406{
2407 struct __vxge_hw_device *hldev;
2408 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
2409
2410#ifdef CONFIG_PCI_MSI
2411 if (vdev->config.intr_type == MSI_X) {
2412 vxge_rem_msix_isr(vdev);
2413 } else
2414#endif
2415 if (vdev->config.intr_type == INTA) {
2416 synchronize_irq(vdev->pdev->irq);
2417 free_irq(vdev->pdev->irq, hldev);
2418 }
2419}
2420
2421static int vxge_add_isr(struct vxgedev *vdev)
2422{
2423 int ret = 0;
2424 struct __vxge_hw_device *hldev =
2425 (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
2426#ifdef CONFIG_PCI_MSI
2427 int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
2428 u64 function_mode = vdev->config.device_hw_info.function_mode;
2429 int pci_fun = PCI_FUNC(vdev->pdev->devfn);
2430
2431 if (vdev->config.intr_type == MSI_X)
2432 ret = vxge_enable_msix(vdev);
2433
2434 if (ret) {
2435 vxge_debug_init(VXGE_ERR,
2436 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
2437 if ((function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) &&
2438 test_and_set_bit(__VXGE_STATE_CARD_UP,
2439 &driver_config->inta_dev_open))
2440 return VXGE_HW_FAIL;
2441 else {
2442 vxge_debug_init(VXGE_ERR,
2443 "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
2444 vdev->config.intr_type = INTA;
2445 vxge_hw_device_set_intr_type(vdev->devh,
2446 VXGE_HW_INTR_MODE_IRQLINE);
2447 vxge_close_vpaths(vdev, 1);
2448 vdev->no_of_vpath = 1;
2449 vdev->stats.vpaths_open = 1;
2450 }
2451 }
2452
2453 if (vdev->config.intr_type == MSI_X) {
2454 for (intr_idx = 0;
2455 intr_idx < (vdev->no_of_vpath *
2456 VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
2457
2458 msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE;
2459 irq_req = 0;
2460
2461 switch (msix_idx) {
2462 case 0:
2463 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2464 "%s:vxge fn: %d vpath: %d Tx MSI-X: %d",
2465 vdev->ndev->name, pci_fun, vp_idx,
2466 vdev->entries[intr_cnt].entry);
2467 ret = request_irq(
2468 vdev->entries[intr_cnt].vector,
2469 vxge_tx_msix_handle, 0,
2470 vdev->desc[intr_cnt],
2471 &vdev->vpaths[vp_idx].fifo);
2472 vdev->vxge_entries[intr_cnt].arg =
2473 &vdev->vpaths[vp_idx].fifo;
2474 irq_req = 1;
2475 break;
2476 case 1:
2477 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2478 "%s:vxge fn: %d vpath: %d Rx MSI-X: %d",
2479 vdev->ndev->name, pci_fun, vp_idx,
2480 vdev->entries[intr_cnt].entry);
2481 ret = request_irq(
2482 vdev->entries[intr_cnt].vector,
2483 vxge_rx_msix_napi_handle,
2484 0,
2485 vdev->desc[intr_cnt],
2486 &vdev->vpaths[vp_idx].ring);
2487 vdev->vxge_entries[intr_cnt].arg =
2488 &vdev->vpaths[vp_idx].ring;
2489 irq_req = 1;
2490 break;
2491 }
2492
2493 if (ret) {
2494 vxge_debug_init(VXGE_ERR,
2495 "%s: MSIX - %d Registration failed",
2496 vdev->ndev->name, intr_cnt);
2497 vxge_rem_msix_isr(vdev);
2498 if ((function_mode ==
2499 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) &&
2500 test_and_set_bit(__VXGE_STATE_CARD_UP,
2501 &driver_config->inta_dev_open))
2502 return VXGE_HW_FAIL;
2503 else {
2504 vxge_hw_device_set_intr_type(
2505 vdev->devh,
2506 VXGE_HW_INTR_MODE_IRQLINE);
2507 vdev->config.intr_type = INTA;
2508 vxge_debug_init(VXGE_ERR,
2509 "%s: Defaulting to INTA"
2510 , vdev->ndev->name);
2511 vxge_close_vpaths(vdev, 1);
2512 vdev->no_of_vpath = 1;
2513 vdev->stats.vpaths_open = 1;
2514 goto INTA_MODE;
2515 }
2516 }
2517
2518 if (irq_req) {
2519 /* We requested for this msix interrupt */
2520 vdev->vxge_entries[intr_cnt].in_use = 1;
2521 vxge_hw_vpath_msix_unmask(
2522 vdev->vpaths[vp_idx].handle,
2523 intr_idx);
2524 intr_cnt++;
2525 }
2526
2527 /* Point to next vpath handler */
2528 if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0)
2529 && (vp_idx < (vdev->no_of_vpath - 1)))
2530 vp_idx++;
2531 }
2532
2533 intr_cnt = vdev->max_vpath_supported * 2;
2534 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2535 "%s:vxge Alarm fn: %d MSI-X: %d",
2536 vdev->ndev->name, pci_fun,
2537 vdev->entries[intr_cnt].entry);
2538 /* For Alarm interrupts */
2539 ret = request_irq(vdev->entries[intr_cnt].vector,
2540 vxge_alarm_msix_handle, 0,
2541 vdev->desc[intr_cnt],
2542 &vdev->vpaths[vp_idx]);
2543 if (ret) {
2544 vxge_debug_init(VXGE_ERR,
2545 "%s: MSIX - %d Registration failed",
2546 vdev->ndev->name, intr_cnt);
2547 vxge_rem_msix_isr(vdev);
2548 if ((function_mode ==
2549 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) &&
2550 test_and_set_bit(__VXGE_STATE_CARD_UP,
2551 &driver_config->inta_dev_open))
2552 return VXGE_HW_FAIL;
2553 else {
2554 vxge_hw_device_set_intr_type(vdev->devh,
2555 VXGE_HW_INTR_MODE_IRQLINE);
2556 vdev->config.intr_type = INTA;
2557 vxge_debug_init(VXGE_ERR,
2558 "%s: Defaulting to INTA",
2559 vdev->ndev->name);
2560 vxge_close_vpaths(vdev, 1);
2561 vdev->no_of_vpath = 1;
2562 vdev->stats.vpaths_open = 1;
2563 goto INTA_MODE;
2564 }
2565 }
2566
2567 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
2568 intr_idx - 2);
2569 vdev->vxge_entries[intr_cnt].in_use = 1;
2570 vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[vp_idx];
2571 }
2572INTA_MODE:
2573#endif
2574 snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge", vdev->ndev->name);
2575
2576 if (vdev->config.intr_type == INTA) {
2577 ret = request_irq((int) vdev->pdev->irq,
2578 vxge_isr_napi,
2579 IRQF_SHARED, vdev->desc[0], hldev);
2580 if (ret) {
2581 vxge_debug_init(VXGE_ERR,
2582 "%s %s-%d: ISR registration failed",
2583 VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq);
2584 return -ENODEV;
2585 }
2586 vxge_debug_init(VXGE_TRACE,
2587 "new %s-%d line allocated",
2588 "IRQ", vdev->pdev->irq);
2589 }
2590
2591 return VXGE_HW_OK;
2592}
2593
2594static void vxge_poll_vp_reset(unsigned long data)
2595{
2596 struct vxgedev *vdev = (struct vxgedev *)data;
2597 int i, j = 0;
2598
2599 for (i = 0; i < vdev->no_of_vpath; i++) {
2600 if (test_bit(i, &vdev->vp_reset)) {
2601 vxge_reset_vpath(vdev, i);
2602 j++;
2603 }
2604 }
2605 if (j && (vdev->config.intr_type != MSI_X)) {
2606 vxge_hw_device_unmask_all(vdev->devh);
2607 vxge_hw_device_flush_io(vdev->devh);
2608 }
2609
2610 mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2);
2611}
2612
2613static void vxge_poll_vp_lockup(unsigned long data)
2614{
2615 struct vxgedev *vdev = (struct vxgedev *)data;
2616 int i;
2617 struct vxge_ring *ring;
2618 enum vxge_hw_status status = VXGE_HW_OK;
2619
2620 for (i = 0; i < vdev->no_of_vpath; i++) {
2621 ring = &vdev->vpaths[i].ring;
2622 /* Did this vpath received any packets */
2623 if (ring->stats.prev_rx_frms == ring->stats.rx_frms) {
2624 status = vxge_hw_vpath_check_leak(ring->handle);
2625
2626 /* Did it received any packets last time */
2627 if ((VXGE_HW_FAIL == status) &&
2628 (VXGE_HW_FAIL == ring->last_status)) {
2629
2630 /* schedule vpath reset */
2631 if (!test_and_set_bit(i, &vdev->vp_reset)) {
2632
2633 /* disable interrupts for this vpath */
2634 vxge_vpath_intr_disable(vdev, i);
2635
2636 /* stop the queue for this vpath */
2637 vxge_stop_tx_queue(&vdev->vpaths[i].
2638 fifo);
2639 continue;
2640 }
2641 }
2642 }
2643 ring->stats.prev_rx_frms = ring->stats.rx_frms;
2644 ring->last_status = status;
2645 }
2646
2647 /* Check every 1 milli second */
2648 mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
2649}
2650
2651/**
2652 * vxge_open
2653 * @dev: pointer to the device structure.
2654 *
2655 * This function is the open entry point of the driver. It mainly calls a
2656 * function to allocate Rx buffers and inserts them into the buffer
2657 * descriptors and then enables the Rx part of the NIC.
2658 * Return value: '0' on success and an appropriate (-)ve integer as
2659 * defined in errno.h file on failure.
2660 */
2661int
2662vxge_open(struct net_device *dev)
2663{
2664 enum vxge_hw_status status;
2665 struct vxgedev *vdev;
2666 struct __vxge_hw_device *hldev;
2667 int ret = 0;
2668 int i;
2669 u64 val64, function_mode;
2670 vxge_debug_entryexit(VXGE_TRACE,
2671 "%s: %s:%d", dev->name, __func__, __LINE__);
2672
2673 vdev = (struct vxgedev *)netdev_priv(dev);
2674 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
2675 function_mode = vdev->config.device_hw_info.function_mode;
2676
2677 /* make sure you have link off by default every time Nic is
2678 * initialized */
2679 netif_carrier_off(dev);
2680
2681 /* Check for another device already opn with INTA */
2682 if ((function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) &&
2683 test_bit(__VXGE_STATE_CARD_UP, &driver_config->inta_dev_open)) {
2684 ret = -EPERM;
2685 goto out0;
2686 }
2687
2688 /* Open VPATHs */
2689 status = vxge_open_vpaths(vdev);
2690 if (status != VXGE_HW_OK) {
2691 vxge_debug_init(VXGE_ERR,
2692 "%s: fatal: Vpath open failed", vdev->ndev->name);
2693 ret = -EPERM;
2694 goto out0;
2695 }
2696
2697 vdev->mtu = dev->mtu;
2698
2699 status = vxge_add_isr(vdev);
2700 if (status != VXGE_HW_OK) {
2701 vxge_debug_init(VXGE_ERR,
2702 "%s: fatal: ISR add failed", dev->name);
2703 ret = -EPERM;
2704 goto out1;
2705 }
2706
2707
2708 if (vdev->config.intr_type != MSI_X) {
2709 netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
2710 vdev->config.napi_weight);
2711 napi_enable(&vdev->napi);
2712 } else {
2713 for (i = 0; i < vdev->no_of_vpath; i++) {
2714 netif_napi_add(dev, &vdev->vpaths[i].ring.napi,
2715 vxge_poll_msix, vdev->config.napi_weight);
2716 napi_enable(&vdev->vpaths[i].ring.napi);
2717 }
2718 }
2719
2720 /* configure RTH */
2721 if (vdev->config.rth_steering) {
2722 status = vxge_rth_configure(vdev);
2723 if (status != VXGE_HW_OK) {
2724 vxge_debug_init(VXGE_ERR,
2725 "%s: fatal: RTH configuration failed",
2726 dev->name);
2727 ret = -EPERM;
2728 goto out2;
2729 }
2730 }
2731
2732 for (i = 0; i < vdev->no_of_vpath; i++) {
2733 /* set initial mtu before enabling the device */
2734 status = vxge_hw_vpath_mtu_set(vdev->vpaths[i].handle,
2735 vdev->mtu);
2736 if (status != VXGE_HW_OK) {
2737 vxge_debug_init(VXGE_ERR,
2738 "%s: fatal: can not set new MTU", dev->name);
2739 ret = -EPERM;
2740 goto out2;
2741 }
2742 }
2743
2744 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev);
2745 vxge_debug_init(vdev->level_trace,
2746 "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
2747 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
2748
2749 /* Reprogram the DA table with populated mac addresses */
2750 for (i = 0; i < vdev->no_of_vpath; i++) {
2751 vxge_restore_vpath_mac_addr(&vdev->vpaths[i]);
2752 vxge_restore_vpath_vid_table(&vdev->vpaths[i]);
2753 }
2754
2755 /* Enable vpath to sniff all unicast/multicast traffic that not
2756 * addressed to them. We allow promiscous mode for PF only
2757 */
2758
2759 val64 = 0;
2760 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
2761 val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i);
2762
2763 vxge_hw_mgmt_reg_write(vdev->devh,
2764 vxge_hw_mgmt_reg_type_mrpcim,
2765 0,
2766 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2767 rxmac_authorize_all_addr),
2768 val64);
2769
2770 vxge_hw_mgmt_reg_write(vdev->devh,
2771 vxge_hw_mgmt_reg_type_mrpcim,
2772 0,
2773 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2774 rxmac_authorize_all_vid),
2775 val64);
2776
2777 vxge_set_multicast(dev);
2778
2779 /* Enabling Bcast and mcast for all vpath */
2780 for (i = 0; i < vdev->no_of_vpath; i++) {
2781 status = vxge_hw_vpath_bcast_enable(vdev->vpaths[i].handle);
2782 if (status != VXGE_HW_OK)
2783 vxge_debug_init(VXGE_ERR,
2784 "%s : Can not enable bcast for vpath "
2785 "id %d", dev->name, i);
2786 if (vdev->config.addr_learn_en) {
2787 status =
2788 vxge_hw_vpath_mcast_enable(vdev->vpaths[i].handle);
2789 if (status != VXGE_HW_OK)
2790 vxge_debug_init(VXGE_ERR,
2791 "%s : Can not enable mcast for vpath "
2792 "id %d", dev->name, i);
2793 }
2794 }
2795
2796 vxge_hw_device_setpause_data(vdev->devh, 0,
2797 vdev->config.tx_pause_enable,
2798 vdev->config.rx_pause_enable);
2799
2800 if (vdev->vp_reset_timer.function == NULL)
2801 vxge_os_timer(vdev->vp_reset_timer,
2802 vxge_poll_vp_reset, vdev, (HZ/2));
2803
2804 if (vdev->vp_lockup_timer.function == NULL)
2805 vxge_os_timer(vdev->vp_lockup_timer,
2806 vxge_poll_vp_lockup, vdev, (HZ/2));
2807
2808 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2809
2810 smp_wmb();
2811
2812 if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
2813 netif_carrier_on(vdev->ndev);
2814 printk(KERN_NOTICE "%s: Link Up\n", vdev->ndev->name);
2815 vdev->stats.link_up++;
2816 }
2817
2818 vxge_hw_device_intr_enable(vdev->devh);
2819
2820 smp_wmb();
2821
2822 for (i = 0; i < vdev->no_of_vpath; i++) {
2823 vxge_hw_vpath_enable(vdev->vpaths[i].handle);
2824 smp_wmb();
2825 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
2826 }
2827
2828 vxge_start_all_tx_queue(vdev);
2829 goto out0;
2830
2831out2:
2832 vxge_rem_isr(vdev);
2833
2834 /* Disable napi */
2835 if (vdev->config.intr_type != MSI_X)
2836 napi_disable(&vdev->napi);
2837 else {
2838 for (i = 0; i < vdev->no_of_vpath; i++)
2839 napi_disable(&vdev->vpaths[i].ring.napi);
2840 }
2841
2842out1:
2843 vxge_close_vpaths(vdev, 0);
2844out0:
2845 vxge_debug_entryexit(VXGE_TRACE,
2846 "%s: %s:%d Exiting...",
2847 dev->name, __func__, __LINE__);
2848 return ret;
2849}
2850
2851/* Loop throught the mac address list and delete all the entries */
2852void vxge_free_mac_add_list(struct vxge_vpath *vpath)
2853{
2854
2855 struct list_head *entry, *next;
2856 if (list_empty(&vpath->mac_addr_list))
2857 return;
2858
2859 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
2860 list_del(entry);
2861 kfree((struct vxge_mac_addrs *)entry);
2862 }
2863}
2864
2865static void vxge_napi_del_all(struct vxgedev *vdev)
2866{
2867 int i;
2868 if (vdev->config.intr_type != MSI_X)
2869 netif_napi_del(&vdev->napi);
2870 else {
2871 for (i = 0; i < vdev->no_of_vpath; i++)
2872 netif_napi_del(&vdev->vpaths[i].ring.napi);
2873 }
2874 return;
2875}
2876
2877int do_vxge_close(struct net_device *dev, int do_io)
2878{
2879 enum vxge_hw_status status;
2880 struct vxgedev *vdev;
2881 struct __vxge_hw_device *hldev;
2882 int i;
2883 u64 val64, vpath_vector;
2884 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
2885 dev->name, __func__, __LINE__);
2886
2887 vdev = (struct vxgedev *)netdev_priv(dev);
2888 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
2889
2890 /* If vxge_handle_crit_err task is executing,
2891 * wait till it completes. */
2892 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
2893 msleep(50);
2894
2895 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2896 if (do_io) {
2897 /* Put the vpath back in normal mode */
2898 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
2899 status = vxge_hw_mgmt_reg_read(vdev->devh,
2900 vxge_hw_mgmt_reg_type_mrpcim,
2901 0,
2902 (ulong)offsetof(
2903 struct vxge_hw_mrpcim_reg,
2904 rts_mgr_cbasin_cfg),
2905 &val64);
2906
2907 if (status == VXGE_HW_OK) {
2908 val64 &= ~vpath_vector;
2909 status = vxge_hw_mgmt_reg_write(vdev->devh,
2910 vxge_hw_mgmt_reg_type_mrpcim,
2911 0,
2912 (ulong)offsetof(
2913 struct vxge_hw_mrpcim_reg,
2914 rts_mgr_cbasin_cfg),
2915 val64);
2916 }
2917
2918 /* Remove the function 0 from promiscous mode */
2919 vxge_hw_mgmt_reg_write(vdev->devh,
2920 vxge_hw_mgmt_reg_type_mrpcim,
2921 0,
2922 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2923 rxmac_authorize_all_addr),
2924 0);
2925
2926 vxge_hw_mgmt_reg_write(vdev->devh,
2927 vxge_hw_mgmt_reg_type_mrpcim,
2928 0,
2929 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2930 rxmac_authorize_all_vid),
2931 0);
2932
2933 smp_wmb();
2934 }
2935 del_timer_sync(&vdev->vp_lockup_timer);
2936
2937 del_timer_sync(&vdev->vp_reset_timer);
2938
2939 /* Disable napi */
2940 if (vdev->config.intr_type != MSI_X)
2941 napi_disable(&vdev->napi);
2942 else {
2943 for (i = 0; i < vdev->no_of_vpath; i++)
2944 napi_disable(&vdev->vpaths[i].ring.napi);
2945 }
2946
2947 netif_carrier_off(vdev->ndev);
2948 printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name);
2949 vxge_stop_all_tx_queue(vdev);
2950
2951 /* Note that at this point xmit() is stopped by upper layer */
2952 if (do_io)
2953 vxge_hw_device_intr_disable(vdev->devh);
2954
2955 mdelay(1000);
2956
2957 vxge_rem_isr(vdev);
2958
2959 vxge_napi_del_all(vdev);
2960
2961 if (do_io)
2962 vxge_reset_all_vpaths(vdev);
2963
2964 vxge_close_vpaths(vdev, 0);
2965
2966 vxge_debug_entryexit(VXGE_TRACE,
2967 "%s: %s:%d Exiting...", dev->name, __func__, __LINE__);
2968
2969 clear_bit(__VXGE_STATE_CARD_UP, &driver_config->inta_dev_open);
2970 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
2971
2972 return 0;
2973}
2974
2975/**
2976 * vxge_close
2977 * @dev: device pointer.
2978 *
2979 * This is the stop entry point of the driver. It needs to undo exactly
2980 * whatever was done by the open entry point, thus it's usually referred to
2981 * as the close function.Among other things this function mainly stops the
2982 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2983 * Return value: '0' on success and an appropriate (-)ve integer as
2984 * defined in errno.h file on failure.
2985 */
2986int
2987vxge_close(struct net_device *dev)
2988{
2989 do_vxge_close(dev, 1);
2990 return 0;
2991}
2992
2993/**
2994 * vxge_change_mtu
2995 * @dev: net device pointer.
2996 * @new_mtu :the new MTU size for the device.
2997 *
2998 * A driver entry point to change MTU size for the device. Before changing
2999 * the MTU the device must be stopped.
3000 */
3001static int vxge_change_mtu(struct net_device *dev, int new_mtu)
3002{
3003 struct vxgedev *vdev = netdev_priv(dev);
3004
3005 vxge_debug_entryexit(vdev->level_trace,
3006 "%s:%d", __func__, __LINE__);
3007 if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > VXGE_HW_MAX_MTU)) {
3008 vxge_debug_init(vdev->level_err,
3009 "%s: mtu size is invalid", dev->name);
3010 return -EPERM;
3011 }
3012
3013 /* check if device is down already */
3014 if (unlikely(!is_vxge_card_up(vdev))) {
3015 /* just store new value, will use later on open() */
3016 dev->mtu = new_mtu;
3017 vxge_debug_init(vdev->level_err,
3018 "%s", "device is down on MTU change");
3019 return 0;
3020 }
3021
3022 vxge_debug_init(vdev->level_trace,
3023 "trying to apply new MTU %d", new_mtu);
3024
3025 if (vxge_close(dev))
3026 return -EIO;
3027
3028 dev->mtu = new_mtu;
3029 vdev->mtu = new_mtu;
3030
3031 if (vxge_open(dev))
3032 return -EIO;
3033
3034 vxge_debug_init(vdev->level_trace,
3035 "%s: MTU changed to %d", vdev->ndev->name, new_mtu);
3036
3037 vxge_debug_entryexit(vdev->level_trace,
3038 "%s:%d Exiting...", __func__, __LINE__);
3039
3040 return 0;
3041}
3042
3043/**
3044 * vxge_get_stats
3045 * @dev: pointer to the device structure
3046 *
3047 * Updates the device statistics structure. This function updates the device
3048 * statistics structure in the net_device structure and returns a pointer
3049 * to the same.
3050 */
3051static struct net_device_stats *
3052vxge_get_stats(struct net_device *dev)
3053{
3054 struct vxgedev *vdev;
3055 struct net_device_stats *net_stats;
3056 int k;
3057
3058 vdev = netdev_priv(dev);
3059
3060 net_stats = &vdev->stats.net_stats;
3061
3062 memset(net_stats, 0, sizeof(struct net_device_stats));
3063
3064 for (k = 0; k < vdev->no_of_vpath; k++) {
3065 net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms;
3066 net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes;
3067 net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors;
3068 net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast;
3069 net_stats->rx_dropped +=
3070 vdev->vpaths[k].ring.stats.rx_dropped;
3071
3072 net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms;
3073 net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes;
3074 net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors;
3075 }
3076
3077 return net_stats;
3078}
3079
3080/**
3081 * vxge_ioctl
3082 * @dev: Device pointer.
3083 * @ifr: An IOCTL specific structure, that can contain a pointer to
3084 * a proprietary structure used to pass information to the driver.
3085 * @cmd: This is used to distinguish between the different commands that
3086 * can be passed to the IOCTL functions.
3087 *
3088 * Entry point for the Ioctl.
3089 */
3090static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3091{
3092 return -EOPNOTSUPP;
3093}
3094
3095/**
3096 * vxge_tx_watchdog
3097 * @dev: pointer to net device structure
3098 *
3099 * Watchdog for transmit side.
3100 * This function is triggered if the Tx Queue is stopped
3101 * for a pre-defined amount of time when the Interface is still up.
3102 */
3103static void
3104vxge_tx_watchdog(struct net_device *dev)
3105{
3106 struct vxgedev *vdev;
3107
3108 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3109
3110 vdev = (struct vxgedev *)netdev_priv(dev);
3111
3112 vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
3113
3114 vxge_reset(vdev);
3115 vxge_debug_entryexit(VXGE_TRACE,
3116 "%s:%d Exiting...", __func__, __LINE__);
3117}
3118
3119/**
3120 * vxge_vlan_rx_register
3121 * @dev: net device pointer.
3122 * @grp: vlan group
3123 *
3124 * Vlan group registration
3125 */
3126static void
3127vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
3128{
3129 struct vxgedev *vdev;
3130 struct vxge_vpath *vpath;
3131 int vp;
3132 u64 vid;
3133 enum vxge_hw_status status;
3134 int i;
3135
3136 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3137
3138 vdev = (struct vxgedev *)netdev_priv(dev);
3139
3140 vpath = &vdev->vpaths[0];
3141 if ((NULL == grp) && (vpath->is_open)) {
3142 /* Get the first vlan */
3143 status = vxge_hw_vpath_vid_get(vpath->handle, &vid);
3144
3145 while (status == VXGE_HW_OK) {
3146
3147 /* Delete this vlan from the vid table */
3148 for (vp = 0; vp < vdev->no_of_vpath; vp++) {
3149 vpath = &vdev->vpaths[vp];
3150 if (!vpath->is_open)
3151 continue;
3152
3153 vxge_hw_vpath_vid_delete(vpath->handle, vid);
3154 }
3155
3156 /* Get the next vlan to be deleted */
3157 vpath = &vdev->vpaths[0];
3158 status = vxge_hw_vpath_vid_get(vpath->handle, &vid);
3159 }
3160 }
3161
3162 vdev->vlgrp = grp;
3163
3164 for (i = 0; i < vdev->no_of_vpath; i++) {
3165 if (vdev->vpaths[i].is_configured)
3166 vdev->vpaths[i].ring.vlgrp = grp;
3167 }
3168
3169 vxge_debug_entryexit(VXGE_TRACE,
3170 "%s:%d Exiting...", __func__, __LINE__);
3171}
3172
3173/**
3174 * vxge_vlan_rx_add_vid
3175 * @dev: net device pointer.
3176 * @vid: vid
3177 *
3178 * Add the vlan id to the devices vlan id table
3179 */
3180static void
3181vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
3182{
3183 struct vxgedev *vdev;
3184 struct vxge_vpath *vpath;
3185 int vp_id;
3186
3187 vdev = (struct vxgedev *)netdev_priv(dev);
3188
3189 /* Add these vlan to the vid table */
3190 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3191 vpath = &vdev->vpaths[vp_id];
3192 if (!vpath->is_open)
3193 continue;
3194 vxge_hw_vpath_vid_add(vpath->handle, vid);
3195 }
3196}
3197
3198/**
3199 * vxge_vlan_rx_add_vid
3200 * @dev: net device pointer.
3201 * @vid: vid
3202 *
3203 * Remove the vlan id from the device's vlan id table
3204 */
3205static void
3206vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
3207{
3208 struct vxgedev *vdev;
3209 struct vxge_vpath *vpath;
3210 int vp_id;
3211
3212 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3213
3214 vdev = (struct vxgedev *)netdev_priv(dev);
3215
3216 vlan_group_set_device(vdev->vlgrp, vid, NULL);
3217
3218 /* Delete this vlan from the vid table */
3219 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3220 vpath = &vdev->vpaths[vp_id];
3221 if (!vpath->is_open)
3222 continue;
3223 vxge_hw_vpath_vid_delete(vpath->handle, vid);
3224 }
3225 vxge_debug_entryexit(VXGE_TRACE,
3226 "%s:%d Exiting...", __func__, __LINE__);
3227}
3228
3229static const struct net_device_ops vxge_netdev_ops = {
3230 .ndo_open = vxge_open,
3231 .ndo_stop = vxge_close,
3232 .ndo_get_stats = vxge_get_stats,
3233 .ndo_start_xmit = vxge_xmit,
3234 .ndo_validate_addr = eth_validate_addr,
3235 .ndo_set_multicast_list = vxge_set_multicast,
3236
3237 .ndo_do_ioctl = vxge_ioctl,
3238
3239 .ndo_set_mac_address = vxge_set_mac_addr,
3240 .ndo_change_mtu = vxge_change_mtu,
3241 .ndo_vlan_rx_register = vxge_vlan_rx_register,
3242 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
3243 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
3244
3245 .ndo_tx_timeout = vxge_tx_watchdog,
3246#ifdef CONFIG_NET_POLL_CONTROLLER
3247 .ndo_poll_controller = vxge_netpoll,
3248#endif
3249};
3250
3251int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3252 struct vxge_config *config,
3253 int high_dma, int no_of_vpath,
3254 struct vxgedev **vdev_out)
3255{
3256 struct net_device *ndev;
3257 enum vxge_hw_status status = VXGE_HW_OK;
3258 struct vxgedev *vdev;
3259 int i, ret = 0, no_of_queue = 1;
3260 u64 stat;
3261
3262 *vdev_out = NULL;
3263 if (config->tx_steering_type == TX_MULTIQ_STEERING)
3264 no_of_queue = no_of_vpath;
3265
3266 ndev = alloc_etherdev_mq(sizeof(struct vxgedev),
3267 no_of_queue);
3268 if (ndev == NULL) {
3269 vxge_debug_init(
3270 vxge_hw_device_trace_level_get(hldev),
3271 "%s : device allocation failed", __func__);
3272 ret = -ENODEV;
3273 goto _out0;
3274 }
3275
3276 vxge_debug_entryexit(
3277 vxge_hw_device_trace_level_get(hldev),
3278 "%s: %s:%d Entering...",
3279 ndev->name, __func__, __LINE__);
3280
3281 vdev = netdev_priv(ndev);
3282 memset(vdev, 0, sizeof(struct vxgedev));
3283
3284 vdev->ndev = ndev;
3285 vdev->devh = hldev;
3286 vdev->pdev = hldev->pdev;
3287 memcpy(&vdev->config, config, sizeof(struct vxge_config));
3288 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */
3289
3290 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3291
3292 ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
3293 NETIF_F_HW_VLAN_FILTER;
3294 /* Driver entry points */
3295 ndev->irq = vdev->pdev->irq;
3296 ndev->base_addr = (unsigned long) hldev->bar0;
3297
3298 ndev->netdev_ops = &vxge_netdev_ops;
3299
3300 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
3301
3302 initialize_ethtool_ops(ndev);
3303
3304 /* Allocate memory for vpath */
3305 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
3306 no_of_vpath, GFP_KERNEL);
3307 if (!vdev->vpaths) {
3308 vxge_debug_init(VXGE_ERR,
3309 "%s: vpath memory allocation failed",
3310 vdev->ndev->name);
3311 ret = -ENODEV;
3312 goto _out1;
3313 }
3314
3315 ndev->features |= NETIF_F_SG;
3316
3317 ndev->features |= NETIF_F_HW_CSUM;
3318 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3319 "%s : checksuming enabled", __func__);
3320
3321 if (high_dma) {
3322 ndev->features |= NETIF_F_HIGHDMA;
3323 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3324 "%s : using High DMA", __func__);
3325 }
3326
3327 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
3328
3329 if (vdev->config.gro_enable)
3330 ndev->features |= NETIF_F_GRO;
3331
3332 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
3333 ndev->real_num_tx_queues = no_of_vpath;
3334
3335#ifdef NETIF_F_LLTX
3336 ndev->features |= NETIF_F_LLTX;
3337#endif
3338
3339 for (i = 0; i < no_of_vpath; i++)
3340 spin_lock_init(&vdev->vpaths[i].fifo.tx_lock);
3341
3342 if (register_netdev(ndev)) {
3343 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3344 "%s: %s : device registration failed!",
3345 ndev->name, __func__);
3346 ret = -ENODEV;
3347 goto _out2;
3348 }
3349
3350 /* Set the factory defined MAC address initially */
3351 ndev->addr_len = ETH_ALEN;
3352
3353 /* Make Link state as off at this point, when the Link change
3354 * interrupt comes the state will be automatically changed to
3355 * the right state.
3356 */
3357 netif_carrier_off(ndev);
3358
3359 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3360 "%s: Ethernet device registered",
3361 ndev->name);
3362
3363 *vdev_out = vdev;
3364
3365 /* Resetting the Device stats */
3366 status = vxge_hw_mrpcim_stats_access(
3367 hldev,
3368 VXGE_HW_STATS_OP_CLEAR_ALL_STATS,
3369 0,
3370 0,
3371 &stat);
3372
3373 if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION)
3374 vxge_debug_init(
3375 vxge_hw_device_trace_level_get(hldev),
3376 "%s: device stats clear returns"
3377 "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name);
3378
3379 vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev),
3380 "%s: %s:%d Exiting...",
3381 ndev->name, __func__, __LINE__);
3382
3383 return ret;
3384_out2:
3385 kfree(vdev->vpaths);
3386_out1:
3387 free_netdev(ndev);
3388_out0:
3389 return ret;
3390}
3391
3392/*
3393 * vxge_device_unregister
3394 *
3395 * This function will unregister and free network device
3396 */
3397void
3398vxge_device_unregister(struct __vxge_hw_device *hldev)
3399{
3400 struct vxgedev *vdev;
3401 struct net_device *dev;
3402 char buf[IFNAMSIZ];
3403#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3404 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3405 u32 level_trace;
3406#endif
3407
3408 dev = hldev->ndev;
3409 vdev = netdev_priv(dev);
3410#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3411 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3412 level_trace = vdev->level_trace;
3413#endif
3414 vxge_debug_entryexit(level_trace,
3415 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3416
3417 memcpy(buf, vdev->ndev->name, IFNAMSIZ);
3418
3419 /* in 2.6 will call stop() if device is up */
3420 unregister_netdev(dev);
3421
3422 flush_scheduled_work();
3423
3424 vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf);
3425 vxge_debug_entryexit(level_trace,
3426 "%s: %s:%d Exiting...", buf, __func__, __LINE__);
3427}
3428
3429/*
3430 * vxge_callback_crit_err
3431 *
3432 * This function is called by the alarm handler in interrupt context.
3433 * Driver must analyze it based on the event type.
3434 */
3435static void
3436vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3437 enum vxge_hw_event type, u64 vp_id)
3438{
3439 struct net_device *dev = hldev->ndev;
3440 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
3441 int vpath_idx;
3442
3443 vxge_debug_entryexit(vdev->level_trace,
3444 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3445
3446 /* Note: This event type should be used for device wide
3447 * indications only - Serious errors, Slot freeze and critical errors
3448 */
3449 vdev->cric_err_event = type;
3450
3451 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++)
3452 if (vdev->vpaths[vpath_idx].device_id == vp_id)
3453 break;
3454
3455 if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
3456 if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
3457 vxge_debug_init(VXGE_ERR,
3458 "%s: Slot is frozen", vdev->ndev->name);
3459 } else if (type == VXGE_HW_EVENT_SERR) {
3460 vxge_debug_init(VXGE_ERR,
3461 "%s: Encountered Serious Error",
3462 vdev->ndev->name);
3463 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR)
3464 vxge_debug_init(VXGE_ERR,
3465 "%s: Encountered Critical Error",
3466 vdev->ndev->name);
3467 }
3468
3469 if ((type == VXGE_HW_EVENT_SERR) ||
3470 (type == VXGE_HW_EVENT_SLOT_FREEZE)) {
3471 if (unlikely(vdev->exec_mode))
3472 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3473 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) {
3474 vxge_hw_device_mask_all(hldev);
3475 if (unlikely(vdev->exec_mode))
3476 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3477 } else if ((type == VXGE_HW_EVENT_FIFO_ERR) ||
3478 (type == VXGE_HW_EVENT_VPATH_ERR)) {
3479
3480 if (unlikely(vdev->exec_mode))
3481 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3482 else {
3483 /* check if this vpath is already set for reset */
3484 if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) {
3485
3486 /* disable interrupts for this vpath */
3487 vxge_vpath_intr_disable(vdev, vpath_idx);
3488
3489 /* stop the queue for this vpath */
3490 vxge_stop_tx_queue(&vdev->vpaths[vpath_idx].
3491 fifo);
3492 }
3493 }
3494 }
3495
3496 vxge_debug_entryexit(vdev->level_trace,
3497 "%s: %s:%d Exiting...",
3498 vdev->ndev->name, __func__, __LINE__);
3499}
3500
3501static void verify_bandwidth(void)
3502{
3503 int i, band_width, total = 0, equal_priority = 0;
3504
3505 /* 1. If user enters 0 for some fifo, give equal priority to all */
3506 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3507 if (bw_percentage[i] == 0) {
3508 equal_priority = 1;
3509 break;
3510 }
3511 }
3512
3513 if (!equal_priority) {
3514 /* 2. If sum exceeds 100, give equal priority to all */
3515 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3516 if (bw_percentage[i] == 0xFF)
3517 break;
3518
3519 total += bw_percentage[i];
3520 if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) {
3521 equal_priority = 1;
3522 break;
3523 }
3524 }
3525 }
3526
3527 if (!equal_priority) {
3528 /* Is all the bandwidth consumed? */
3529 if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) {
3530 if (i < VXGE_HW_MAX_VIRTUAL_PATHS) {
3531 /* Split rest of bw equally among next VPs*/
3532 band_width =
3533 (VXGE_HW_VPATH_BANDWIDTH_MAX - total) /
3534 (VXGE_HW_MAX_VIRTUAL_PATHS - i);
3535 if (band_width < 2) /* min of 2% */
3536 equal_priority = 1;
3537 else {
3538 for (; i < VXGE_HW_MAX_VIRTUAL_PATHS;
3539 i++)
3540 bw_percentage[i] =
3541 band_width;
3542 }
3543 }
3544 } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS)
3545 equal_priority = 1;
3546 }
3547
3548 if (equal_priority) {
3549 vxge_debug_init(VXGE_ERR,
3550 "%s: Assigning equal bandwidth to all the vpaths",
3551 VXGE_DRIVER_NAME);
3552 bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX /
3553 VXGE_HW_MAX_VIRTUAL_PATHS;
3554 for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3555 bw_percentage[i] = bw_percentage[0];
3556 }
3557
3558 return;
3559}
3560
3561/*
3562 * Vpath configuration
3563 */
3564static int __devinit vxge_config_vpaths(
3565 struct vxge_hw_device_config *device_config,
3566 u64 vpath_mask, struct vxge_config *config_param)
3567{
3568 int i, no_of_vpaths = 0, default_no_vpath = 0, temp;
3569 u32 txdl_size, txdl_per_memblock;
3570
3571 temp = driver_config->vpath_per_dev;
3572 if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) &&
3573 (max_config_dev == VXGE_MAX_CONFIG_DEV)) {
3574 /* No more CPU. Return vpath number as zero.*/
3575 if (driver_config->g_no_cpus == -1)
3576 return 0;
3577
3578 if (!driver_config->g_no_cpus)
3579 driver_config->g_no_cpus = num_online_cpus();
3580
3581 driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
3582 if (!driver_config->vpath_per_dev)
3583 driver_config->vpath_per_dev = 1;
3584
3585 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3586 if (!vxge_bVALn(vpath_mask, i, 1))
3587 continue;
3588 else
3589 default_no_vpath++;
3590 if (default_no_vpath < driver_config->vpath_per_dev)
3591 driver_config->vpath_per_dev = default_no_vpath;
3592
3593 driver_config->g_no_cpus = driver_config->g_no_cpus -
3594 (driver_config->vpath_per_dev * 2);
3595 if (driver_config->g_no_cpus <= 0)
3596 driver_config->g_no_cpus = -1;
3597 }
3598
3599 if (driver_config->vpath_per_dev == 1) {
3600 vxge_debug_ll_config(VXGE_TRACE,
3601 "%s: Disable tx and rx steering, "
3602 "as single vpath is configured", VXGE_DRIVER_NAME);
3603 config_param->rth_steering = NO_STEERING;
3604 config_param->tx_steering_type = NO_STEERING;
3605 device_config->rth_en = 0;
3606 }
3607
3608 /* configure bandwidth */
3609 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3610 device_config->vp_config[i].min_bandwidth = bw_percentage[i];
3611
3612 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3613 device_config->vp_config[i].vp_id = i;
3614 device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU;
3615 if (no_of_vpaths < driver_config->vpath_per_dev) {
3616 if (!vxge_bVALn(vpath_mask, i, 1)) {
3617 vxge_debug_ll_config(VXGE_TRACE,
3618 "%s: vpath: %d is not available",
3619 VXGE_DRIVER_NAME, i);
3620 continue;
3621 } else {
3622 vxge_debug_ll_config(VXGE_TRACE,
3623 "%s: vpath: %d available",
3624 VXGE_DRIVER_NAME, i);
3625 no_of_vpaths++;
3626 }
3627 } else {
3628 vxge_debug_ll_config(VXGE_TRACE,
3629 "%s: vpath: %d is not configured, "
3630 "max_config_vpath exceeded",
3631 VXGE_DRIVER_NAME, i);
3632 break;
3633 }
3634
3635 /* Configure Tx fifo's */
3636 device_config->vp_config[i].fifo.enable =
3637 VXGE_HW_FIFO_ENABLE;
3638 device_config->vp_config[i].fifo.max_frags =
3639 MAX_SKB_FRAGS;
3640 device_config->vp_config[i].fifo.memblock_size =
3641 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
3642
3643 txdl_size = MAX_SKB_FRAGS * sizeof(struct vxge_hw_fifo_txd);
3644 txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
3645
3646 device_config->vp_config[i].fifo.fifo_blocks =
3647 ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1;
3648
3649 device_config->vp_config[i].fifo.intr =
3650 VXGE_HW_FIFO_QUEUE_INTR_DISABLE;
3651
3652 /* Configure tti properties */
3653 device_config->vp_config[i].tti.intr_enable =
3654 VXGE_HW_TIM_INTR_ENABLE;
3655
3656 device_config->vp_config[i].tti.btimer_val =
3657 (VXGE_TTI_BTIMER_VAL * 1000) / 272;
3658
3659 device_config->vp_config[i].tti.timer_ac_en =
3660 VXGE_HW_TIM_TIMER_AC_ENABLE;
3661
3662 /* For msi-x with napi (each vector
3663 has a handler of its own) -
3664 Set CI to OFF for all vpaths */
3665 device_config->vp_config[i].tti.timer_ci_en =
3666 VXGE_HW_TIM_TIMER_CI_DISABLE;
3667
3668 device_config->vp_config[i].tti.timer_ri_en =
3669 VXGE_HW_TIM_TIMER_RI_DISABLE;
3670
3671 device_config->vp_config[i].tti.util_sel =
3672 VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
3673
3674 device_config->vp_config[i].tti.ltimer_val =
3675 (VXGE_TTI_LTIMER_VAL * 1000) / 272;
3676
3677 device_config->vp_config[i].tti.rtimer_val =
3678 (VXGE_TTI_RTIMER_VAL * 1000) / 272;
3679
3680 device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A;
3681 device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B;
3682 device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C;
3683 device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A;
3684 device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B;
3685 device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C;
3686 device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D;
3687
3688 /* Configure Rx rings */
3689 device_config->vp_config[i].ring.enable =
3690 VXGE_HW_RING_ENABLE;
3691
3692 device_config->vp_config[i].ring.ring_blocks =
3693 VXGE_HW_DEF_RING_BLOCKS;
3694 device_config->vp_config[i].ring.buffer_mode =
3695 VXGE_HW_RING_RXD_BUFFER_MODE_1;
3696 device_config->vp_config[i].ring.rxds_limit =
3697 VXGE_HW_DEF_RING_RXDS_LIMIT;
3698 device_config->vp_config[i].ring.scatter_mode =
3699 VXGE_HW_RING_SCATTER_MODE_A;
3700
3701 /* Configure rti properties */
3702 device_config->vp_config[i].rti.intr_enable =
3703 VXGE_HW_TIM_INTR_ENABLE;
3704
3705 device_config->vp_config[i].rti.btimer_val =
3706 (VXGE_RTI_BTIMER_VAL * 1000)/272;
3707
3708 device_config->vp_config[i].rti.timer_ac_en =
3709 VXGE_HW_TIM_TIMER_AC_ENABLE;
3710
3711 device_config->vp_config[i].rti.timer_ci_en =
3712 VXGE_HW_TIM_TIMER_CI_DISABLE;
3713
3714 device_config->vp_config[i].rti.timer_ri_en =
3715 VXGE_HW_TIM_TIMER_RI_DISABLE;
3716
3717 device_config->vp_config[i].rti.util_sel =
3718 VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
3719
3720 device_config->vp_config[i].rti.urange_a =
3721 RTI_RX_URANGE_A;
3722 device_config->vp_config[i].rti.urange_b =
3723 RTI_RX_URANGE_B;
3724 device_config->vp_config[i].rti.urange_c =
3725 RTI_RX_URANGE_C;
3726 device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A;
3727 device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B;
3728 device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C;
3729 device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D;
3730
3731 device_config->vp_config[i].rti.rtimer_val =
3732 (VXGE_RTI_RTIMER_VAL * 1000) / 272;
3733
3734 device_config->vp_config[i].rti.ltimer_val =
3735 (VXGE_RTI_LTIMER_VAL * 1000) / 272;
3736
3737 device_config->vp_config[i].rpa_strip_vlan_tag =
3738 vlan_tag_strip;
3739 }
3740
3741 driver_config->vpath_per_dev = temp;
3742 return no_of_vpaths;
3743}
3744
3745/* initialize device configuratrions */
3746static void __devinit vxge_device_config_init(
3747 struct vxge_hw_device_config *device_config,
3748 int *intr_type)
3749{
3750 /* Used for CQRQ/SRQ. */
3751 device_config->dma_blockpool_initial =
3752 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
3753
3754 device_config->dma_blockpool_max =
3755 VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
3756
3757 if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
3758 max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
3759
3760#ifndef CONFIG_PCI_MSI
3761 vxge_debug_init(VXGE_ERR,
3762 "%s: This Kernel does not support "
3763 "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
3764 *intr_type = INTA;
3765#endif
3766
3767 /* Configure whether MSI-X or IRQL. */
3768 switch (*intr_type) {
3769 case INTA:
3770 device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
3771 break;
3772
3773 case MSI_X:
3774 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX;
3775 break;
3776 }
3777 /* Timer period between device poll */
3778 device_config->device_poll_millis = VXGE_TIMER_DELAY;
3779
3780 /* Configure mac based steering. */
3781 device_config->rts_mac_en = addr_learn_en;
3782
3783 /* Configure Vpaths */
3784 device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT;
3785
3786 vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
3787 __func__);
3788 vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_initial : %d",
3789 device_config->dma_blockpool_initial);
3790 vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_max : %d",
3791 device_config->dma_blockpool_max);
3792 vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
3793 device_config->intr_mode);
3794 vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
3795 device_config->device_poll_millis);
3796 vxge_debug_ll_config(VXGE_TRACE, "rts_mac_en : %d",
3797 device_config->rts_mac_en);
3798 vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
3799 device_config->rth_en);
3800 vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
3801 device_config->rth_it_type);
3802}
3803
3804static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3805{
3806 int i;
3807
3808 vxge_debug_init(VXGE_TRACE,
3809 "%s: %d Vpath(s) opened",
3810 vdev->ndev->name, vdev->no_of_vpath);
3811
3812 switch (vdev->config.intr_type) {
3813 case INTA:
3814 vxge_debug_init(VXGE_TRACE,
3815 "%s: Interrupt type INTA", vdev->ndev->name);
3816 break;
3817
3818 case MSI_X:
3819 vxge_debug_init(VXGE_TRACE,
3820 "%s: Interrupt type MSI-X", vdev->ndev->name);
3821 break;
3822 }
3823
3824 if (vdev->config.rth_steering) {
3825 vxge_debug_init(VXGE_TRACE,
3826 "%s: RTH steering enabled for TCP_IPV4",
3827 vdev->ndev->name);
3828 } else {
3829 vxge_debug_init(VXGE_TRACE,
3830 "%s: RTH steering disabled", vdev->ndev->name);
3831 }
3832
3833 switch (vdev->config.tx_steering_type) {
3834 case NO_STEERING:
3835 vxge_debug_init(VXGE_TRACE,
3836 "%s: Tx steering disabled", vdev->ndev->name);
3837 break;
3838 case TX_PRIORITY_STEERING:
3839 vxge_debug_init(VXGE_TRACE,
3840 "%s: Unsupported tx steering option",
3841 vdev->ndev->name);
3842 vxge_debug_init(VXGE_TRACE,
3843 "%s: Tx steering disabled", vdev->ndev->name);
3844 vdev->config.tx_steering_type = 0;
3845 break;
3846 case TX_VLAN_STEERING:
3847 vxge_debug_init(VXGE_TRACE,
3848 "%s: Unsupported tx steering option",
3849 vdev->ndev->name);
3850 vxge_debug_init(VXGE_TRACE,
3851 "%s: Tx steering disabled", vdev->ndev->name);
3852 vdev->config.tx_steering_type = 0;
3853 break;
3854 case TX_MULTIQ_STEERING:
3855 vxge_debug_init(VXGE_TRACE,
3856 "%s: Tx multiqueue steering enabled",
3857 vdev->ndev->name);
3858 break;
3859 case TX_PORT_STEERING:
3860 vxge_debug_init(VXGE_TRACE,
3861 "%s: Tx port steering enabled",
3862 vdev->ndev->name);
3863 break;
3864 default:
3865 vxge_debug_init(VXGE_ERR,
3866 "%s: Unsupported tx steering type",
3867 vdev->ndev->name);
3868 vxge_debug_init(VXGE_TRACE,
3869 "%s: Tx steering disabled", vdev->ndev->name);
3870 vdev->config.tx_steering_type = 0;
3871 }
3872
3873 if (vdev->config.gro_enable) {
3874 vxge_debug_init(VXGE_ERR,
3875 "%s: Generic receive offload enabled",
3876 vdev->ndev->name);
3877 } else
3878 vxge_debug_init(VXGE_TRACE,
3879 "%s: Generic receive offload disabled",
3880 vdev->ndev->name);
3881
3882 if (vdev->config.addr_learn_en)
3883 vxge_debug_init(VXGE_TRACE,
3884 "%s: MAC Address learning enabled", vdev->ndev->name);
3885
3886 vxge_debug_init(VXGE_TRACE,
3887 "%s: Rx doorbell mode enabled", vdev->ndev->name);
3888
3889 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3890 if (!vxge_bVALn(vpath_mask, i, 1))
3891 continue;
3892 vxge_debug_ll_config(VXGE_TRACE,
3893 "%s: MTU size - %d", vdev->ndev->name,
3894 ((struct __vxge_hw_device *)(vdev->devh))->
3895 config.vp_config[i].mtu);
3896 vxge_debug_init(VXGE_TRACE,
3897 "%s: VLAN tag stripping %s", vdev->ndev->name,
3898 ((struct __vxge_hw_device *)(vdev->devh))->
3899 config.vp_config[i].rpa_strip_vlan_tag
3900 ? "Enabled" : "Disabled");
3901 vxge_debug_init(VXGE_TRACE,
3902 "%s: Ring blocks : %d", vdev->ndev->name,
3903 ((struct __vxge_hw_device *)(vdev->devh))->
3904 config.vp_config[i].ring.ring_blocks);
3905 vxge_debug_init(VXGE_TRACE,
3906 "%s: Fifo blocks : %d", vdev->ndev->name,
3907 ((struct __vxge_hw_device *)(vdev->devh))->
3908 config.vp_config[i].fifo.fifo_blocks);
3909 vxge_debug_ll_config(VXGE_TRACE,
3910 "%s: Max frags : %d", vdev->ndev->name,
3911 ((struct __vxge_hw_device *)(vdev->devh))->
3912 config.vp_config[i].fifo.max_frags);
3913 break;
3914 }
3915}
3916
3917#ifdef CONFIG_PM
3918/**
3919 * vxge_pm_suspend - vxge power management suspend entry point
3920 *
3921 */
3922static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state)
3923{
3924 return -ENOSYS;
3925}
3926/**
3927 * vxge_pm_resume - vxge power management resume entry point
3928 *
3929 */
3930static int vxge_pm_resume(struct pci_dev *pdev)
3931{
3932 return -ENOSYS;
3933}
3934
3935#endif
3936
3937/**
3938 * vxge_io_error_detected - called when PCI error is detected
3939 * @pdev: Pointer to PCI device
3940 * @state: The current pci connection state
3941 *
3942 * This function is called after a PCI bus error affecting
3943 * this device has been detected.
3944 */
3945static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
3946 pci_channel_state_t state)
3947{
3948 struct __vxge_hw_device *hldev =
3949 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3950 struct net_device *netdev = hldev->ndev;
3951
3952 netif_device_detach(netdev);
3953
3954 if (netif_running(netdev)) {
3955 /* Bring down the card, while avoiding PCI I/O */
3956 do_vxge_close(netdev, 0);
3957 }
3958
3959 pci_disable_device(pdev);
3960
3961 return PCI_ERS_RESULT_NEED_RESET;
3962}
3963
3964/**
3965 * vxge_io_slot_reset - called after the pci bus has been reset.
3966 * @pdev: Pointer to PCI device
3967 *
3968 * Restart the card from scratch, as if from a cold-boot.
3969 * At this point, the card has exprienced a hard reset,
3970 * followed by fixups by BIOS, and has its config space
3971 * set up identically to what it was at cold boot.
3972 */
3973static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
3974{
3975 struct __vxge_hw_device *hldev =
3976 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3977 struct net_device *netdev = hldev->ndev;
3978
3979 struct vxgedev *vdev = netdev_priv(netdev);
3980
3981 if (pci_enable_device(pdev)) {
3982 printk(KERN_ERR "%s: "
3983 "Cannot re-enable device after reset\n",
3984 VXGE_DRIVER_NAME);
3985 return PCI_ERS_RESULT_DISCONNECT;
3986 }
3987
3988 pci_set_master(pdev);
3989 vxge_reset(vdev);
3990
3991 return PCI_ERS_RESULT_RECOVERED;
3992}
3993
3994/**
3995 * vxge_io_resume - called when traffic can start flowing again.
3996 * @pdev: Pointer to PCI device
3997 *
3998 * This callback is called when the error recovery driver tells
3999 * us that its OK to resume normal operation.
4000 */
4001static void vxge_io_resume(struct pci_dev *pdev)
4002{
4003 struct __vxge_hw_device *hldev =
4004 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
4005 struct net_device *netdev = hldev->ndev;
4006
4007 if (netif_running(netdev)) {
4008 if (vxge_open(netdev)) {
4009 printk(KERN_ERR "%s: "
4010 "Can't bring device back up after reset\n",
4011 VXGE_DRIVER_NAME);
4012 return;
4013 }
4014 }
4015
4016 netif_device_attach(netdev);
4017}
4018
4019/**
4020 * vxge_probe
4021 * @pdev : structure containing the PCI related information of the device.
4022 * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
4023 * Description:
4024 * This function is called when a new PCI device gets detected and initializes
4025 * it.
4026 * Return value:
4027 * returns 0 on success and negative on failure.
4028 *
4029 */
4030static int __devinit
4031vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4032{
4033 struct __vxge_hw_device *hldev;
4034 enum vxge_hw_status status;
4035 int ret;
4036 int high_dma = 0;
4037 u64 vpath_mask = 0;
4038 struct vxgedev *vdev;
4039 struct vxge_config ll_config;
4040 struct vxge_hw_device_config *device_config = NULL;
4041 struct vxge_hw_device_attr attr;
4042 int i, j, no_of_vpath = 0, max_vpath_supported = 0;
4043 u8 *macaddr;
4044 struct vxge_mac_addrs *entry;
4045 static int bus = -1, device = -1;
4046 u8 new_device = 0;
4047
4048 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
4049 attr.pdev = pdev;
4050
4051 if (bus != pdev->bus->number)
4052 new_device = 1;
4053 if (device != PCI_SLOT(pdev->devfn))
4054 new_device = 1;
4055
4056 bus = pdev->bus->number;
4057 device = PCI_SLOT(pdev->devfn);
4058
4059 if (new_device) {
4060 if (driver_config->config_dev_cnt &&
4061 (driver_config->config_dev_cnt !=
4062 driver_config->total_dev_cnt))
4063 vxge_debug_init(VXGE_ERR,
4064 "%s: Configured %d of %d devices",
4065 VXGE_DRIVER_NAME,
4066 driver_config->config_dev_cnt,
4067 driver_config->total_dev_cnt);
4068 driver_config->config_dev_cnt = 0;
4069 driver_config->total_dev_cnt = 0;
4070 driver_config->g_no_cpus = 0;
4071 driver_config->vpath_per_dev = max_config_vpath;
4072 }
4073
4074 driver_config->total_dev_cnt++;
4075 if (++driver_config->config_dev_cnt > max_config_dev) {
4076 ret = 0;
4077 goto _exit0;
4078 }
4079
4080 device_config = kzalloc(sizeof(struct vxge_hw_device_config),
4081 GFP_KERNEL);
4082 if (!device_config) {
4083 ret = -ENOMEM;
4084 vxge_debug_init(VXGE_ERR,
4085 "device_config : malloc failed %s %d",
4086 __FILE__, __LINE__);
4087 goto _exit0;
4088 }
4089
4090 memset(&ll_config, 0, sizeof(struct vxge_config));
4091 ll_config.tx_steering_type = TX_MULTIQ_STEERING;
4092 ll_config.intr_type = MSI_X;
4093 ll_config.napi_weight = NEW_NAPI_WEIGHT;
4094 ll_config.rth_steering = RTH_STEERING;
4095
4096 /* get the default configuration parameters */
4097 vxge_hw_device_config_default_get(device_config);
4098
4099 /* initialize configuration parameters */
4100 vxge_device_config_init(device_config, &ll_config.intr_type);
4101
4102 ret = pci_enable_device(pdev);
4103 if (ret) {
4104 vxge_debug_init(VXGE_ERR,
4105 "%s : can not enable PCI device", __func__);
4106 goto _exit0;
4107 }
4108
4109 if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) {
4110 vxge_debug_ll_config(VXGE_TRACE,
4111 "%s : using 64bit DMA", __func__);
4112
4113 high_dma = 1;
4114
4115 if (pci_set_consistent_dma_mask(pdev,
4116 0xffffffffffffffffULL)) {
4117 vxge_debug_init(VXGE_ERR,
4118 "%s : unable to obtain 64bit DMA for "
4119 "consistent allocations", __func__);
4120 ret = -ENOMEM;
4121 goto _exit1;
4122 }
4123 } else if (!pci_set_dma_mask(pdev, 0xffffffffUL)) {
4124 vxge_debug_ll_config(VXGE_TRACE,
4125 "%s : using 32bit DMA", __func__);
4126 } else {
4127 ret = -ENOMEM;
4128 goto _exit1;
4129 }
4130
4131 if (pci_request_regions(pdev, VXGE_DRIVER_NAME)) {
4132 vxge_debug_init(VXGE_ERR,
4133 "%s : request regions failed", __func__);
4134 ret = -ENODEV;
4135 goto _exit1;
4136 }
4137
4138 pci_set_master(pdev);
4139
4140 attr.bar0 = pci_ioremap_bar(pdev, 0);
4141 if (!attr.bar0) {
4142 vxge_debug_init(VXGE_ERR,
4143 "%s : cannot remap io memory bar0", __func__);
4144 ret = -ENODEV;
4145 goto _exit2;
4146 }
4147 vxge_debug_ll_config(VXGE_TRACE,
4148 "pci ioremap bar0: %p:0x%llx",
4149 attr.bar0,
4150 (unsigned long long)pci_resource_start(pdev, 0));
4151
4152 attr.bar1 = pci_ioremap_bar(pdev, 2);
4153 if (!attr.bar1) {
4154 vxge_debug_init(VXGE_ERR,
4155 "%s : cannot remap io memory bar2", __func__);
4156 ret = -ENODEV;
4157 goto _exit3;
4158 }
4159 vxge_debug_ll_config(VXGE_TRACE,
4160 "pci ioremap bar1: %p:0x%llx",
4161 attr.bar1,
4162 (unsigned long long)pci_resource_start(pdev, 2));
4163
4164 status = vxge_hw_device_hw_info_get(attr.bar0,
4165 &ll_config.device_hw_info);
4166 if (status != VXGE_HW_OK) {
4167 vxge_debug_init(VXGE_ERR,
4168 "%s: Reading of hardware info failed."
4169 "Please try upgrading the firmware.", VXGE_DRIVER_NAME);
4170 ret = -EINVAL;
4171 goto _exit4;
4172 }
4173
4174 if (ll_config.device_hw_info.fw_version.major !=
4175 VXGE_DRIVER_VERSION_MAJOR) {
4176 vxge_debug_init(VXGE_ERR,
4177 "FW Ver.(maj): %d not driver's expected version: %d",
4178 ll_config.device_hw_info.fw_version.major,
4179 VXGE_DRIVER_VERSION_MAJOR);
4180 ret = -EINVAL;
4181 goto _exit4;
4182 }
4183
4184 vpath_mask = ll_config.device_hw_info.vpath_mask;
4185 if (vpath_mask == 0) {
4186 vxge_debug_ll_config(VXGE_TRACE,
4187 "%s: No vpaths available in device", VXGE_DRIVER_NAME);
4188 ret = -EINVAL;
4189 goto _exit4;
4190 }
4191
4192 vxge_debug_ll_config(VXGE_TRACE,
4193 "%s:%d Vpath mask = %llx", __func__, __LINE__,
4194 (unsigned long long)vpath_mask);
4195
4196 /* Check how many vpaths are available */
4197 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4198 if (!((vpath_mask) & vxge_mBIT(i)))
4199 continue;
4200 max_vpath_supported++;
4201 }
4202
4203 /*
4204 * Configure vpaths and get driver configured number of vpaths
4205 * which is less than or equal to the maximum vpaths per function.
4206 */
4207 no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, &ll_config);
4208 if (!no_of_vpath) {
4209 vxge_debug_ll_config(VXGE_ERR,
4210 "%s: No more vpaths to configure", VXGE_DRIVER_NAME);
4211 ret = 0;
4212 goto _exit4;
4213 }
4214
4215 /* Setting driver callbacks */
4216 attr.uld_callbacks.link_up = vxge_callback_link_up;
4217 attr.uld_callbacks.link_down = vxge_callback_link_down;
4218 attr.uld_callbacks.crit_err = vxge_callback_crit_err;
4219
4220 status = vxge_hw_device_initialize(&hldev, &attr, device_config);
4221 if (status != VXGE_HW_OK) {
4222 vxge_debug_init(VXGE_ERR,
4223 "Failed to initialize device (%d)", status);
4224 ret = -EINVAL;
4225 goto _exit4;
4226 }
4227
4228 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4229
4230 /* set private device info */
4231 pci_set_drvdata(pdev, hldev);
4232
4233 ll_config.gro_enable = VXGE_GRO_ALWAYS_AGGREGATE;
4234 ll_config.fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
4235 ll_config.addr_learn_en = addr_learn_en;
4236 ll_config.rth_algorithm = RTH_ALG_JENKINS;
4237 ll_config.rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4;
4238 ll_config.rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE;
4239 ll_config.rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
4240 ll_config.rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
4241 ll_config.rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
4242 ll_config.rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
4243 ll_config.rth_bkt_sz = RTH_BUCKET_SIZE;
4244 ll_config.tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4245 ll_config.rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4246
4247 if (vxge_device_register(hldev, &ll_config, high_dma, no_of_vpath,
4248 &vdev)) {
4249 ret = -EINVAL;
4250 goto _exit5;
4251 }
4252
4253 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
4254 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4255 vxge_hw_device_trace_level_get(hldev));
4256
4257 /* set private HW device info */
4258 hldev->ndev = vdev->ndev;
4259 vdev->mtu = VXGE_HW_DEFAULT_MTU;
4260 vdev->bar0 = attr.bar0;
4261 vdev->bar1 = attr.bar1;
4262 vdev->max_vpath_supported = max_vpath_supported;
4263 vdev->no_of_vpath = no_of_vpath;
4264
4265 /* Virtual Path count */
4266 for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4267 if (!vxge_bVALn(vpath_mask, i, 1))
4268 continue;
4269 if (j >= vdev->no_of_vpath)
4270 break;
4271
4272 vdev->vpaths[j].is_configured = 1;
4273 vdev->vpaths[j].device_id = i;
4274 vdev->vpaths[j].fifo.driver_id = j;
4275 vdev->vpaths[j].ring.driver_id = j;
4276 vdev->vpaths[j].vdev = vdev;
4277 vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
4278 memcpy((u8 *)vdev->vpaths[j].macaddr,
4279 (u8 *)ll_config.device_hw_info.mac_addrs[i],
4280 ETH_ALEN);
4281
4282 /* Initialize the mac address list header */
4283 INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list);
4284
4285 vdev->vpaths[j].mac_addr_cnt = 0;
4286 vdev->vpaths[j].mcast_addr_cnt = 0;
4287 j++;
4288 }
4289 vdev->exec_mode = VXGE_EXEC_MODE_DISABLE;
4290 vdev->max_config_port = max_config_port;
4291
4292 vdev->vlan_tag_strip = vlan_tag_strip;
4293
4294 /* map the hashing selector table to the configured vpaths */
4295 for (i = 0; i < vdev->no_of_vpath; i++)
4296 vdev->vpath_selector[i] = vpath_selector[i];
4297
4298 macaddr = (u8 *)vdev->vpaths[0].macaddr;
4299
4300 ll_config.device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
4301 ll_config.device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
4302 ll_config.device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
4303
4304 vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
4305 vdev->ndev->name, ll_config.device_hw_info.serial_number);
4306
4307 vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
4308 vdev->ndev->name, ll_config.device_hw_info.part_number);
4309
4310 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
4311 vdev->ndev->name, ll_config.device_hw_info.product_desc);
4312
4313 vxge_debug_init(VXGE_TRACE,
4314 "%s: MAC ADDR: %02X:%02X:%02X:%02X:%02X:%02X",
4315 vdev->ndev->name, macaddr[0], macaddr[1], macaddr[2],
4316 macaddr[3], macaddr[4], macaddr[5]);
4317
4318 vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
4319 vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
4320
4321 vxge_debug_init(VXGE_TRACE,
4322 "%s: Firmware version : %s Date : %s", vdev->ndev->name,
4323 ll_config.device_hw_info.fw_version.version,
4324 ll_config.device_hw_info.fw_date.date);
4325
4326 vxge_print_parm(vdev, vpath_mask);
4327
4328 /* Store the fw version for ethttool option */
4329 strcpy(vdev->fw_version, ll_config.device_hw_info.fw_version.version);
4330 memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
4331 memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN);
4332
4333 /* Copy the station mac address to the list */
4334 for (i = 0; i < vdev->no_of_vpath; i++) {
4335 entry = (struct vxge_mac_addrs *)
4336 kzalloc(sizeof(struct vxge_mac_addrs),
4337 GFP_KERNEL);
4338 if (NULL == entry) {
4339 vxge_debug_init(VXGE_ERR,
4340 "%s: mac_addr_list : memory allocation failed",
4341 vdev->ndev->name);
4342 ret = -EPERM;
4343 goto _exit6;
4344 }
4345 macaddr = (u8 *)&entry->macaddr;
4346 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
4347 list_add(&entry->item, &vdev->vpaths[i].mac_addr_list);
4348 vdev->vpaths[i].mac_addr_cnt = 1;
4349 }
4350
4351 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
4352 vdev->ndev->name, __func__, __LINE__);
4353
4354 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4355 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4356 vxge_hw_device_trace_level_get(hldev));
4357
4358 return 0;
4359
4360_exit6:
4361 for (i = 0; i < vdev->no_of_vpath; i++)
4362 vxge_free_mac_add_list(&vdev->vpaths[i]);
4363
4364 vxge_device_unregister(hldev);
4365_exit5:
4366 vxge_hw_device_terminate(hldev);
4367_exit4:
4368 iounmap(attr.bar1);
4369_exit3:
4370 iounmap(attr.bar0);
4371_exit2:
4372 pci_release_regions(pdev);
4373_exit1:
4374 pci_disable_device(pdev);
4375_exit0:
4376 kfree(device_config);
4377 driver_config->config_dev_cnt--;
4378 pci_set_drvdata(pdev, NULL);
4379 return ret;
4380}
4381
4382/**
4383 * vxge_rem_nic - Free the PCI device
4384 * @pdev: structure containing the PCI related information of the device.
4385 * Description: This function is called by the Pci subsystem to release a
4386 * PCI device and free up all resource held up by the device.
4387 */
4388static void __devexit
4389vxge_remove(struct pci_dev *pdev)
4390{
4391 struct __vxge_hw_device *hldev;
4392 struct vxgedev *vdev = NULL;
4393 struct net_device *dev;
4394 int i = 0;
4395#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
4396 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4397 u32 level_trace;
4398#endif
4399
4400 hldev = (struct __vxge_hw_device *) pci_get_drvdata(pdev);
4401
4402 if (hldev == NULL)
4403 return;
4404 dev = hldev->ndev;
4405 vdev = netdev_priv(dev);
4406
4407#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
4408 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4409 level_trace = vdev->level_trace;
4410#endif
4411 vxge_debug_entryexit(level_trace,
4412 "%s:%d", __func__, __LINE__);
4413
4414 vxge_debug_init(level_trace,
4415 "%s : removing PCI device...", __func__);
4416 vxge_device_unregister(hldev);
4417
4418 for (i = 0; i < vdev->no_of_vpath; i++) {
4419 vxge_free_mac_add_list(&vdev->vpaths[i]);
4420 vdev->vpaths[i].mcast_addr_cnt = 0;
4421 vdev->vpaths[i].mac_addr_cnt = 0;
4422 }
4423
4424 kfree(vdev->vpaths);
4425
4426 iounmap(vdev->bar0);
4427 iounmap(vdev->bar1);
4428
4429 /* we are safe to free it now */
4430 free_netdev(dev);
4431
4432 vxge_debug_init(level_trace,
4433 "%s:%d Device unregistered", __func__, __LINE__);
4434
4435 vxge_hw_device_terminate(hldev);
4436
4437 pci_disable_device(pdev);
4438 pci_release_regions(pdev);
4439 pci_set_drvdata(pdev, NULL);
4440 vxge_debug_entryexit(level_trace,
4441 "%s:%d Exiting...", __func__, __LINE__);
4442}
4443
4444static struct pci_error_handlers vxge_err_handler = {
4445 .error_detected = vxge_io_error_detected,
4446 .slot_reset = vxge_io_slot_reset,
4447 .resume = vxge_io_resume,
4448};
4449
4450static struct pci_driver vxge_driver = {
4451 .name = VXGE_DRIVER_NAME,
4452 .id_table = vxge_id_table,
4453 .probe = vxge_probe,
4454 .remove = __devexit_p(vxge_remove),
4455#ifdef CONFIG_PM
4456 .suspend = vxge_pm_suspend,
4457 .resume = vxge_pm_resume,
4458#endif
4459 .err_handler = &vxge_err_handler,
4460};
4461
4462static int __init
4463vxge_starter(void)
4464{
4465 int ret = 0;
4466 char version[32];
4467 snprintf(version, 32, "%s", DRV_VERSION);
4468
4469 printk(KERN_CRIT "%s: Copyright(c) 2002-2009 Neterion Inc\n",
4470 VXGE_DRIVER_NAME);
4471 printk(KERN_CRIT "%s: Driver version: %s\n",
4472 VXGE_DRIVER_NAME, version);
4473
4474 verify_bandwidth();
4475
4476 driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL);
4477 if (!driver_config)
4478 return -ENOMEM;
4479
4480 ret = pci_register_driver(&vxge_driver);
4481
4482 if (driver_config->config_dev_cnt &&
4483 (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
4484 vxge_debug_init(VXGE_ERR,
4485 "%s: Configured %d of %d devices",
4486 VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
4487 driver_config->total_dev_cnt);
4488
4489 if (ret)
4490 kfree(driver_config);
4491
4492 return ret;
4493}
4494
4495static void __exit
4496vxge_closer(void)
4497{
4498 pci_unregister_driver(&vxge_driver);
4499 kfree(driver_config);
4500}
4501module_init(vxge_starter);
4502module_exit(vxge_closer);