1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
10 * vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
14 * The module loadable parameters that are supported by the driver and a brief
15 * explanation of all the variables:
17 * Strip VLAN Tag enable/disable. Instructs the device to remove
18 * the VLAN tag from all received tagged frames that are not
19 * replicated at the internal L2 switch.
20 * 0 - Do not strip the VLAN tag.
21 * 1 - Strip the VLAN tag.
24 * Enable learning the mac address of the guest OS interface in
25 * a virtualization environment.
30 * Maximum number of port to be supported.
34 * This configures the maximum no of VPATH configures for each
36 * MIN - 1 and MAX - 17
39 * This configures maximum no of Device function to be enabled.
40 * MIN - 1 and MAX - 17
42 ******************************************************************************/
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46 #include <linux/if_vlan.h>
47 #include <linux/pci.h>
48 #include <linux/slab.h>
49 #include <linux/tcp.h>
51 #include <linux/netdevice.h>
52 #include <linux/etherdevice.h>
53 #include "vxge-main.h"
56 MODULE_LICENSE("Dual BSD/GPL");
57 MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
58 "Virtualized Server Adapter");
60 static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = {
61 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
63 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
68 MODULE_DEVICE_TABLE(pci, vxge_id_table);
70 VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE);
71 VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT);
72 VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT);
73 VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT);
74 VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT);
75 VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV);
77 static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] =
78 {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
79 static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
80 {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF};
81 module_param_array(bw_percentage, uint, NULL, 0);
83 static struct vxge_drv_config *driver_config;
85 static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
87 static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
89 static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
90 static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
91 static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
92 static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
94 static inline int is_vxge_card_up(struct vxgedev *vdev)
96 return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
99 static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
101 struct sk_buff **skb_ptr = NULL;
102 struct sk_buff **temp;
103 #define NR_SKB_COMPLETED 128
104 struct sk_buff *completed[NR_SKB_COMPLETED];
111 if (__netif_tx_trylock(fifo->txq)) {
112 vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
113 NR_SKB_COMPLETED, &more);
114 __netif_tx_unlock(fifo->txq);
118 for (temp = completed; temp != skb_ptr; temp++)
119 dev_kfree_skb_irq(*temp);
123 static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
127 /* Complete all transmits */
128 for (i = 0; i < vdev->no_of_vpath; i++)
129 VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo);
132 static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
135 struct vxge_ring *ring;
137 /* Complete all receives*/
138 for (i = 0; i < vdev->no_of_vpath; i++) {
139 ring = &vdev->vpaths[i].ring;
140 vxge_hw_vpath_poll_rx(ring->handle);
145 * vxge_callback_link_up
147 * This function is called during interrupt context to notify link up state
151 vxge_callback_link_up(struct __vxge_hw_device *hldev)
153 struct net_device *dev = hldev->ndev;
154 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
156 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
157 vdev->ndev->name, __func__, __LINE__);
158 netdev_notice(vdev->ndev, "Link Up\n");
159 vdev->stats.link_up++;
161 netif_carrier_on(vdev->ndev);
162 netif_tx_wake_all_queues(vdev->ndev);
164 vxge_debug_entryexit(VXGE_TRACE,
165 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
169 * vxge_callback_link_down
171 * This function is called during interrupt context to notify link down state
175 vxge_callback_link_down(struct __vxge_hw_device *hldev)
177 struct net_device *dev = hldev->ndev;
178 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
180 vxge_debug_entryexit(VXGE_TRACE,
181 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
182 netdev_notice(vdev->ndev, "Link Down\n");
184 vdev->stats.link_down++;
185 netif_carrier_off(vdev->ndev);
186 netif_tx_stop_all_queues(vdev->ndev);
188 vxge_debug_entryexit(VXGE_TRACE,
189 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
197 static struct sk_buff*
198 vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
200 struct net_device *dev;
202 struct vxge_rx_priv *rx_priv;
205 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
206 ring->ndev->name, __func__, __LINE__);
208 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
210 /* try to allocate skb first. this one may fail */
211 skb = netdev_alloc_skb(dev, skb_size +
212 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
214 vxge_debug_mem(VXGE_ERR,
215 "%s: out of memory to allocate SKB", dev->name);
216 ring->stats.skb_alloc_fail++;
220 vxge_debug_mem(VXGE_TRACE,
221 "%s: %s:%d Skb : 0x%p", ring->ndev->name,
222 __func__, __LINE__, skb);
224 skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
227 rx_priv->skb_data = NULL;
228 rx_priv->data_size = skb_size;
229 vxge_debug_entryexit(VXGE_TRACE,
230 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
238 static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
240 struct vxge_rx_priv *rx_priv;
243 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
244 ring->ndev->name, __func__, __LINE__);
245 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
247 rx_priv->skb_data = rx_priv->skb->data;
248 dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
249 rx_priv->data_size, PCI_DMA_FROMDEVICE);
251 if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) {
252 ring->stats.pci_map_fail++;
255 vxge_debug_mem(VXGE_TRACE,
256 "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
257 ring->ndev->name, __func__, __LINE__,
258 (unsigned long long)dma_addr);
259 vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size);
261 rx_priv->data_dma = dma_addr;
262 vxge_debug_entryexit(VXGE_TRACE,
263 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
269 * vxge_rx_initial_replenish
270 * Allocation of RxD as an initial replenish procedure.
272 static enum vxge_hw_status
273 vxge_rx_initial_replenish(void *dtrh, void *userdata)
275 struct vxge_ring *ring = (struct vxge_ring *)userdata;
276 struct vxge_rx_priv *rx_priv;
278 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
279 ring->ndev->name, __func__, __LINE__);
280 if (vxge_rx_alloc(dtrh, ring,
281 VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL)
284 if (vxge_rx_map(dtrh, ring)) {
285 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
286 dev_kfree_skb(rx_priv->skb);
290 vxge_debug_entryexit(VXGE_TRACE,
291 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
297 vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
298 int pkt_length, struct vxge_hw_ring_rxd_info *ext_info)
301 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
302 ring->ndev->name, __func__, __LINE__);
303 skb_record_rx_queue(skb, ring->driver_id);
304 skb->protocol = eth_type_trans(skb, ring->ndev);
306 ring->stats.rx_frms++;
307 ring->stats.rx_bytes += pkt_length;
309 if (skb->pkt_type == PACKET_MULTICAST)
310 ring->stats.rx_mcast++;
312 vxge_debug_rx(VXGE_TRACE,
313 "%s: %s:%d skb protocol = %d",
314 ring->ndev->name, __func__, __LINE__, skb->protocol);
316 if (ring->gro_enable) {
317 if (ring->vlgrp && ext_info->vlan &&
318 (ring->vlan_tag_strip ==
319 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
320 vlan_gro_receive(ring->napi_p, ring->vlgrp,
321 ext_info->vlan, skb);
323 napi_gro_receive(ring->napi_p, skb);
325 if (ring->vlgrp && vlan &&
326 (ring->vlan_tag_strip ==
327 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
328 vlan_hwaccel_receive_skb(skb, ring->vlgrp, vlan);
330 netif_receive_skb(skb);
332 vxge_debug_entryexit(VXGE_TRACE,
333 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
336 static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
337 struct vxge_rx_priv *rx_priv)
339 pci_dma_sync_single_for_device(ring->pdev,
340 rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE);
342 vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
343 vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
346 static inline void vxge_post(int *dtr_cnt, void **first_dtr,
347 void *post_dtr, struct __vxge_hw_ring *ringh)
349 int dtr_count = *dtr_cnt;
350 if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) {
352 vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr);
353 *first_dtr = post_dtr;
355 vxge_hw_ring_rxd_post_post(ringh, post_dtr);
357 *dtr_cnt = dtr_count;
363 * If the interrupt is because of a received frame or if the receive ring
364 * contains fresh as yet un-processed frames, this function is called.
366 static enum vxge_hw_status
367 vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
368 u8 t_code, void *userdata)
370 struct vxge_ring *ring = (struct vxge_ring *)userdata;
371 struct net_device *dev = ring->ndev;
372 unsigned int dma_sizes;
373 void *first_dtr = NULL;
379 struct vxge_rx_priv *rx_priv;
380 struct vxge_hw_ring_rxd_info ext_info;
381 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
382 ring->ndev->name, __func__, __LINE__);
383 ring->pkts_processed = 0;
385 vxge_hw_ring_replenish(ringh);
388 prefetch((char *)dtr + L1_CACHE_BYTES);
389 rx_priv = vxge_hw_ring_rxd_private_get(dtr);
391 data_size = rx_priv->data_size;
392 data_dma = rx_priv->data_dma;
393 prefetch(rx_priv->skb_data);
395 vxge_debug_rx(VXGE_TRACE,
396 "%s: %s:%d skb = 0x%p",
397 ring->ndev->name, __func__, __LINE__, skb);
399 vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes);
400 pkt_length = dma_sizes;
402 pkt_length -= ETH_FCS_LEN;
404 vxge_debug_rx(VXGE_TRACE,
405 "%s: %s:%d Packet Length = %d",
406 ring->ndev->name, __func__, __LINE__, pkt_length);
408 vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info);
410 /* check skb validity */
413 prefetch((char *)skb + L1_CACHE_BYTES);
414 if (unlikely(t_code)) {
416 if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
419 ring->stats.rx_errors++;
420 vxge_debug_rx(VXGE_TRACE,
421 "%s: %s :%d Rx T_code is %d",
422 ring->ndev->name, __func__,
425 /* If the t_code is not supported and if the
426 * t_code is other than 0x5 (unparseable packet
427 * such as unknown UPV6 header), Drop it !!!
429 vxge_re_pre_post(dtr, ring, rx_priv);
431 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
432 ring->stats.rx_dropped++;
437 if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
439 if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
441 if (!vxge_rx_map(dtr, ring)) {
442 skb_put(skb, pkt_length);
444 pci_unmap_single(ring->pdev, data_dma,
445 data_size, PCI_DMA_FROMDEVICE);
447 vxge_hw_ring_rxd_pre_post(ringh, dtr);
448 vxge_post(&dtr_cnt, &first_dtr, dtr,
451 dev_kfree_skb(rx_priv->skb);
453 rx_priv->data_size = data_size;
454 vxge_re_pre_post(dtr, ring, rx_priv);
456 vxge_post(&dtr_cnt, &first_dtr, dtr,
458 ring->stats.rx_dropped++;
462 vxge_re_pre_post(dtr, ring, rx_priv);
464 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
465 ring->stats.rx_dropped++;
469 struct sk_buff *skb_up;
471 skb_up = netdev_alloc_skb(dev, pkt_length +
472 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
473 if (skb_up != NULL) {
475 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
477 pci_dma_sync_single_for_cpu(ring->pdev,
481 vxge_debug_mem(VXGE_TRACE,
482 "%s: %s:%d skb_up = %p",
483 ring->ndev->name, __func__,
485 memcpy(skb_up->data, skb->data, pkt_length);
487 vxge_re_pre_post(dtr, ring, rx_priv);
489 vxge_post(&dtr_cnt, &first_dtr, dtr,
491 /* will netif_rx small SKB instead */
493 skb_put(skb, pkt_length);
495 vxge_re_pre_post(dtr, ring, rx_priv);
497 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
498 vxge_debug_rx(VXGE_ERR,
499 "%s: vxge_rx_1b_compl: out of "
500 "memory", dev->name);
501 ring->stats.skb_alloc_fail++;
506 if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
507 !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
508 ring->rx_csum && /* Offload Rx side CSUM */
509 ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
510 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
511 skb->ip_summed = CHECKSUM_UNNECESSARY;
513 skb_checksum_none_assert(skb);
515 /* rth_hash_type and rth_it_hit are non-zero regardless of
516 * whether rss is enabled. Only the rth_value is zero/non-zero
517 * if rss is disabled/enabled, so key off of that.
519 if (ext_info.rth_value)
520 skb->rxhash = ext_info.rth_value;
522 vxge_rx_complete(ring, skb, ext_info.vlan,
523 pkt_length, &ext_info);
526 ring->pkts_processed++;
530 } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr,
531 &t_code) == VXGE_HW_OK);
534 vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
536 vxge_debug_entryexit(VXGE_TRACE,
545 * If an interrupt was raised to indicate DMA complete of the Tx packet,
546 * this function is called. It identifies the last TxD whose buffer was
547 * freed and frees all skbs whose data have already DMA'ed into the NICs
550 static enum vxge_hw_status
551 vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
552 enum vxge_hw_fifo_tcode t_code, void *userdata,
553 struct sk_buff ***skb_ptr, int nr_skb, int *more)
555 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
556 struct sk_buff *skb, **done_skb = *skb_ptr;
559 vxge_debug_entryexit(VXGE_TRACE,
560 "%s:%d Entered....", __func__, __LINE__);
566 struct vxge_tx_priv *txd_priv =
567 vxge_hw_fifo_txdl_private_get(dtr);
570 frg_cnt = skb_shinfo(skb)->nr_frags;
571 frag = &skb_shinfo(skb)->frags[0];
573 vxge_debug_tx(VXGE_TRACE,
574 "%s: %s:%d fifo_hw = %p dtr = %p "
575 "tcode = 0x%x", fifo->ndev->name, __func__,
576 __LINE__, fifo_hw, dtr, t_code);
577 /* check skb validity */
579 vxge_debug_tx(VXGE_TRACE,
580 "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
581 fifo->ndev->name, __func__, __LINE__,
582 skb, txd_priv, frg_cnt);
583 if (unlikely(t_code)) {
584 fifo->stats.tx_errors++;
585 vxge_debug_tx(VXGE_ERR,
586 "%s: tx: dtr %p completed due to "
587 "error t_code %01x", fifo->ndev->name,
589 vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code);
592 /* for unfragmented skb */
593 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
594 skb_headlen(skb), PCI_DMA_TODEVICE);
596 for (j = 0; j < frg_cnt; j++) {
597 pci_unmap_page(fifo->pdev,
598 txd_priv->dma_buffers[i++],
599 frag->size, PCI_DMA_TODEVICE);
603 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
605 /* Updating the statistics block */
606 fifo->stats.tx_frms++;
607 fifo->stats.tx_bytes += skb->len;
617 if (pkt_cnt > fifo->indicate_max_pkts)
620 } while (vxge_hw_fifo_txdl_next_completed(fifo_hw,
621 &dtr, &t_code) == VXGE_HW_OK);
624 if (netif_tx_queue_stopped(fifo->txq))
625 netif_tx_wake_queue(fifo->txq);
627 vxge_debug_entryexit(VXGE_TRACE,
628 "%s: %s:%d Exiting...",
629 fifo->ndev->name, __func__, __LINE__);
633 /* select a vpath to transmit the packet */
634 static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
636 u16 queue_len, counter = 0;
637 if (skb->protocol == htons(ETH_P_IP)) {
643 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
644 th = (struct tcphdr *)(((unsigned char *)ip) +
647 queue_len = vdev->no_of_vpath;
648 counter = (ntohs(th->source) +
650 vdev->vpath_selector[queue_len - 1];
651 if (counter >= queue_len)
652 counter = queue_len - 1;
658 static enum vxge_hw_status vxge_search_mac_addr_in_list(
659 struct vxge_vpath *vpath, u64 del_mac)
661 struct list_head *entry, *next;
662 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
663 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac)
669 static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
671 struct macInfo mac_info;
672 u8 *mac_address = NULL;
673 u64 mac_addr = 0, vpath_vector = 0;
675 enum vxge_hw_status status = VXGE_HW_OK;
676 struct vxge_vpath *vpath = NULL;
677 struct __vxge_hw_device *hldev;
679 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
681 mac_address = (u8 *)&mac_addr;
682 memcpy(mac_address, mac_header, ETH_ALEN);
684 /* Is this mac address already in the list? */
685 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
686 vpath = &vdev->vpaths[vpath_idx];
687 if (vxge_search_mac_addr_in_list(vpath, mac_addr))
691 memset(&mac_info, 0, sizeof(struct macInfo));
692 memcpy(mac_info.macaddr, mac_header, ETH_ALEN);
694 /* Any vpath has room to add mac address to its da table? */
695 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
696 vpath = &vdev->vpaths[vpath_idx];
697 if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) {
698 /* Add this mac address to this vpath */
699 mac_info.vpath_no = vpath_idx;
700 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
701 status = vxge_add_mac_addr(vdev, &mac_info);
702 if (status != VXGE_HW_OK)
708 mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST;
710 mac_info.vpath_no = vpath_idx;
711 /* Is the first vpath already selected as catch-basin ? */
712 vpath = &vdev->vpaths[vpath_idx];
713 if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) {
714 /* Add this mac address to this vpath */
715 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
720 /* Select first vpath as catch-basin */
721 vpath_vector = vxge_mBIT(vpath->device_id);
722 status = vxge_hw_mgmt_reg_write(vpath->vdev->devh,
723 vxge_hw_mgmt_reg_type_mrpcim,
726 struct vxge_hw_mrpcim_reg,
729 if (status != VXGE_HW_OK) {
730 vxge_debug_tx(VXGE_ERR,
731 "%s: Unable to set the vpath-%d in catch-basin mode",
732 VXGE_DRIVER_NAME, vpath->device_id);
736 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
744 * @skb : the socket buffer containing the Tx data.
745 * @dev : device pointer.
747 * This function is the Tx entry point of the driver. Neterion NIC supports
748 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
751 vxge_xmit(struct sk_buff *skb, struct net_device *dev)
753 struct vxge_fifo *fifo = NULL;
756 struct vxgedev *vdev = NULL;
757 enum vxge_hw_status status;
758 int frg_cnt, first_frg_len;
760 int i = 0, j = 0, avail;
762 struct vxge_tx_priv *txdl_priv = NULL;
763 struct __vxge_hw_fifo *fifo_hw;
767 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
768 dev->name, __func__, __LINE__);
770 /* A buffer with no data will be dropped */
771 if (unlikely(skb->len <= 0)) {
772 vxge_debug_tx(VXGE_ERR,
773 "%s: Buffer has no data..", dev->name);
778 vdev = (struct vxgedev *)netdev_priv(dev);
780 if (unlikely(!is_vxge_card_up(vdev))) {
781 vxge_debug_tx(VXGE_ERR,
782 "%s: vdev not initialized", dev->name);
787 if (vdev->config.addr_learn_en) {
788 vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN);
789 if (vpath_no == -EPERM) {
790 vxge_debug_tx(VXGE_ERR,
791 "%s: Failed to store the mac address",
798 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
799 vpath_no = skb_get_queue_mapping(skb);
800 else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
801 vpath_no = vxge_get_vpath_no(vdev, skb);
803 vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
805 if (vpath_no >= vdev->no_of_vpath)
808 fifo = &vdev->vpaths[vpath_no].fifo;
809 fifo_hw = fifo->handle;
811 if (netif_tx_queue_stopped(fifo->txq))
812 return NETDEV_TX_BUSY;
814 avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
816 vxge_debug_tx(VXGE_ERR,
817 "%s: No free TXDs available", dev->name);
818 fifo->stats.txd_not_free++;
822 /* Last TXD? Stop tx queue to avoid dropping packets. TX
823 * completion will resume the queue.
826 netif_tx_stop_queue(fifo->txq);
828 status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
829 if (unlikely(status != VXGE_HW_OK)) {
830 vxge_debug_tx(VXGE_ERR,
831 "%s: Out of descriptors .", dev->name);
832 fifo->stats.txd_out_of_desc++;
836 vxge_debug_tx(VXGE_TRACE,
837 "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
838 dev->name, __func__, __LINE__,
839 fifo_hw, dtr, dtr_priv);
841 if (vlan_tx_tag_present(skb)) {
842 u16 vlan_tag = vlan_tx_tag_get(skb);
843 vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
846 first_frg_len = skb_headlen(skb);
848 dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len,
851 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
852 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
853 fifo->stats.pci_map_fail++;
857 txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
858 txdl_priv->skb = skb;
859 txdl_priv->dma_buffers[j] = dma_pointer;
861 frg_cnt = skb_shinfo(skb)->nr_frags;
862 vxge_debug_tx(VXGE_TRACE,
863 "%s: %s:%d skb = %p txdl_priv = %p "
864 "frag_cnt = %d dma_pointer = 0x%llx", dev->name,
865 __func__, __LINE__, skb, txdl_priv,
866 frg_cnt, (unsigned long long)dma_pointer);
868 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
871 frag = &skb_shinfo(skb)->frags[0];
872 for (i = 0; i < frg_cnt; i++) {
873 /* ignore 0 length fragment */
877 dma_pointer = (u64) pci_map_page(fifo->pdev, frag->page,
878 frag->page_offset, frag->size,
881 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer)))
883 vxge_debug_tx(VXGE_TRACE,
884 "%s: %s:%d frag = %d dma_pointer = 0x%llx",
885 dev->name, __func__, __LINE__, i,
886 (unsigned long long)dma_pointer);
888 txdl_priv->dma_buffers[j] = dma_pointer;
889 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
894 offload_type = vxge_offload_type(skb);
896 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
897 int mss = vxge_tcp_mss(skb);
899 vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d",
900 dev->name, __func__, __LINE__, mss);
901 vxge_hw_fifo_txdl_mss_set(dtr, mss);
903 vxge_assert(skb->len <=
904 dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE);
910 if (skb->ip_summed == CHECKSUM_PARTIAL)
911 vxge_hw_fifo_txdl_cksum_set_bits(dtr,
912 VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN |
913 VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN |
914 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
916 vxge_hw_fifo_txdl_post(fifo_hw, dtr);
918 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
919 dev->name, __func__, __LINE__);
923 vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
926 frag = &skb_shinfo(skb)->frags[0];
928 pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++],
929 skb_headlen(skb), PCI_DMA_TODEVICE);
932 pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j],
933 frag->size, PCI_DMA_TODEVICE);
937 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
939 netif_tx_stop_queue(fifo->txq);
948 * Function will be called by hw function to abort all outstanding receive
952 vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
954 struct vxge_ring *ring = (struct vxge_ring *)userdata;
955 struct vxge_rx_priv *rx_priv =
956 vxge_hw_ring_rxd_private_get(dtrh);
958 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
959 ring->ndev->name, __func__, __LINE__);
960 if (state != VXGE_HW_RXD_STATE_POSTED)
963 pci_unmap_single(ring->pdev, rx_priv->data_dma,
964 rx_priv->data_size, PCI_DMA_FROMDEVICE);
966 dev_kfree_skb(rx_priv->skb);
967 rx_priv->skb_data = NULL;
969 vxge_debug_entryexit(VXGE_TRACE,
970 "%s: %s:%d Exiting...",
971 ring->ndev->name, __func__, __LINE__);
977 * Function will be called to abort all outstanding tx descriptors
980 vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
982 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
984 int i = 0, j, frg_cnt;
985 struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh);
986 struct sk_buff *skb = txd_priv->skb;
988 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
990 if (state != VXGE_HW_TXDL_STATE_POSTED)
993 /* check skb validity */
995 frg_cnt = skb_shinfo(skb)->nr_frags;
996 frag = &skb_shinfo(skb)->frags[0];
998 /* for unfragmented skb */
999 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
1000 skb_headlen(skb), PCI_DMA_TODEVICE);
1002 for (j = 0; j < frg_cnt; j++) {
1003 pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++],
1004 frag->size, PCI_DMA_TODEVICE);
1010 vxge_debug_entryexit(VXGE_TRACE,
1011 "%s:%d Exiting...", __func__, __LINE__);
1015 * vxge_set_multicast
1016 * @dev: pointer to the device structure
1018 * Entry point for multicast address enable/disable
1019 * This function is a driver entry point which gets called by the kernel
1020 * whenever multicast addresses must be enabled/disabled. This also gets
1021 * called to set/reset promiscuous mode. Depending on the deivce flag, we
1022 * determine, if multicast address must be enabled or if promiscuous mode
1023 * is to be disabled etc.
1025 static void vxge_set_multicast(struct net_device *dev)
1027 struct netdev_hw_addr *ha;
1028 struct vxgedev *vdev;
1029 int i, mcast_cnt = 0;
1030 struct __vxge_hw_device *hldev;
1031 struct vxge_vpath *vpath;
1032 enum vxge_hw_status status = VXGE_HW_OK;
1033 struct macInfo mac_info;
1035 struct vxge_mac_addrs *mac_entry;
1036 struct list_head *list_head;
1037 struct list_head *entry, *next;
1038 u8 *mac_address = NULL;
1040 vxge_debug_entryexit(VXGE_TRACE,
1041 "%s:%d", __func__, __LINE__);
1043 vdev = (struct vxgedev *)netdev_priv(dev);
1044 hldev = (struct __vxge_hw_device *)vdev->devh;
1046 if (unlikely(!is_vxge_card_up(vdev)))
1049 if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
1050 for (i = 0; i < vdev->no_of_vpath; i++) {
1051 vpath = &vdev->vpaths[i];
1052 vxge_assert(vpath->is_open);
1053 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1054 if (status != VXGE_HW_OK)
1055 vxge_debug_init(VXGE_ERR, "failed to enable "
1056 "multicast, status %d", status);
1057 vdev->all_multi_flg = 1;
1059 } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
1060 for (i = 0; i < vdev->no_of_vpath; i++) {
1061 vpath = &vdev->vpaths[i];
1062 vxge_assert(vpath->is_open);
1063 status = vxge_hw_vpath_mcast_disable(vpath->handle);
1064 if (status != VXGE_HW_OK)
1065 vxge_debug_init(VXGE_ERR, "failed to disable "
1066 "multicast, status %d", status);
1067 vdev->all_multi_flg = 0;
1072 if (!vdev->config.addr_learn_en) {
1073 for (i = 0; i < vdev->no_of_vpath; i++) {
1074 vpath = &vdev->vpaths[i];
1075 vxge_assert(vpath->is_open);
1077 if (dev->flags & IFF_PROMISC)
1078 status = vxge_hw_vpath_promisc_enable(
1081 status = vxge_hw_vpath_promisc_disable(
1083 if (status != VXGE_HW_OK)
1084 vxge_debug_init(VXGE_ERR, "failed to %s promisc"
1085 ", status %d", dev->flags&IFF_PROMISC ?
1086 "enable" : "disable", status);
1090 memset(&mac_info, 0, sizeof(struct macInfo));
1091 /* Update individual M_CAST address list */
1092 if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
1093 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1094 list_head = &vdev->vpaths[0].mac_addr_list;
1095 if ((netdev_mc_count(dev) +
1096 (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
1097 vdev->vpaths[0].max_mac_addr_cnt)
1098 goto _set_all_mcast;
1100 /* Delete previous MC's */
1101 for (i = 0; i < mcast_cnt; i++) {
1102 list_for_each_safe(entry, next, list_head) {
1103 mac_entry = (struct vxge_mac_addrs *) entry;
1104 /* Copy the mac address to delete */
1105 mac_address = (u8 *)&mac_entry->macaddr;
1106 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1108 /* Is this a multicast address */
1109 if (0x01 & mac_info.macaddr[0]) {
1110 for (vpath_idx = 0; vpath_idx <
1113 mac_info.vpath_no = vpath_idx;
1114 status = vxge_del_mac_addr(
1123 netdev_for_each_mc_addr(ha, dev) {
1124 memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
1125 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1127 mac_info.vpath_no = vpath_idx;
1128 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1129 status = vxge_add_mac_addr(vdev, &mac_info);
1130 if (status != VXGE_HW_OK) {
1131 vxge_debug_init(VXGE_ERR,
1132 "%s:%d Setting individual"
1133 "multicast address failed",
1134 __func__, __LINE__);
1135 goto _set_all_mcast;
1142 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1143 /* Delete previous MC's */
1144 for (i = 0; i < mcast_cnt; i++) {
1145 list_for_each_safe(entry, next, list_head) {
1146 mac_entry = (struct vxge_mac_addrs *) entry;
1147 /* Copy the mac address to delete */
1148 mac_address = (u8 *)&mac_entry->macaddr;
1149 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1151 /* Is this a multicast address */
1152 if (0x01 & mac_info.macaddr[0])
1156 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1158 mac_info.vpath_no = vpath_idx;
1159 status = vxge_del_mac_addr(vdev, &mac_info);
1163 /* Enable all multicast */
1164 for (i = 0; i < vdev->no_of_vpath; i++) {
1165 vpath = &vdev->vpaths[i];
1166 vxge_assert(vpath->is_open);
1168 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1169 if (status != VXGE_HW_OK) {
1170 vxge_debug_init(VXGE_ERR,
1171 "%s:%d Enabling all multicasts failed",
1172 __func__, __LINE__);
1174 vdev->all_multi_flg = 1;
1176 dev->flags |= IFF_ALLMULTI;
1179 vxge_debug_entryexit(VXGE_TRACE,
1180 "%s:%d Exiting...", __func__, __LINE__);
1185 * @dev: pointer to the device structure
1187 * Update entry "0" (default MAC addr)
1189 static int vxge_set_mac_addr(struct net_device *dev, void *p)
1191 struct sockaddr *addr = p;
1192 struct vxgedev *vdev;
1193 struct __vxge_hw_device *hldev;
1194 enum vxge_hw_status status = VXGE_HW_OK;
1195 struct macInfo mac_info_new, mac_info_old;
1198 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1200 vdev = (struct vxgedev *)netdev_priv(dev);
1203 if (!is_valid_ether_addr(addr->sa_data))
1206 memset(&mac_info_new, 0, sizeof(struct macInfo));
1207 memset(&mac_info_old, 0, sizeof(struct macInfo));
1209 vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...",
1210 __func__, __LINE__);
1212 /* Get the old address */
1213 memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len);
1215 /* Copy the new address */
1216 memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len);
1218 /* First delete the old mac address from all the vpaths
1219 as we can't specify the index while adding new mac address */
1220 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1221 struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx];
1222 if (!vpath->is_open) {
1223 /* This can happen when this interface is added/removed
1224 to the bonding interface. Delete this station address
1225 from the linked list */
1226 vxge_mac_list_del(vpath, &mac_info_old);
1228 /* Add this new address to the linked list
1229 for later restoring */
1230 vxge_mac_list_add(vpath, &mac_info_new);
1234 /* Delete the station address */
1235 mac_info_old.vpath_no = vpath_idx;
1236 status = vxge_del_mac_addr(vdev, &mac_info_old);
1239 if (unlikely(!is_vxge_card_up(vdev))) {
1240 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1244 /* Set this mac address to all the vpaths */
1245 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1246 mac_info_new.vpath_no = vpath_idx;
1247 mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1248 status = vxge_add_mac_addr(vdev, &mac_info_new);
1249 if (status != VXGE_HW_OK)
1253 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1259 * vxge_vpath_intr_enable
1260 * @vdev: pointer to vdev
1261 * @vp_id: vpath for which to enable the interrupts
1263 * Enables the interrupts for the vpath
1265 static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1267 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1269 int tim_msix_id[4] = {0, 1, 0, 0};
1270 int alarm_msix_id = VXGE_ALARM_MSIX_ID;
1272 vxge_hw_vpath_intr_enable(vpath->handle);
1274 if (vdev->config.intr_type == INTA)
1275 vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
1277 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
1280 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1281 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1282 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
1284 /* enable the alarm vector */
1285 msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1286 VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
1287 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1292 * vxge_vpath_intr_disable
1293 * @vdev: pointer to vdev
1294 * @vp_id: vpath for which to disable the interrupts
1296 * Disables the interrupts for the vpath
1298 static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1300 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1301 struct __vxge_hw_device *hldev;
1304 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
1306 vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
1308 vxge_hw_vpath_intr_disable(vpath->handle);
1310 if (vdev->config.intr_type == INTA)
1311 vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
1313 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1314 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1315 vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
1317 /* disable the alarm vector */
1318 msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1319 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
1320 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1326 * @vdev: pointer to vdev
1327 * @vp_id: vpath to reset
1331 static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1333 enum vxge_hw_status status = VXGE_HW_OK;
1334 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1337 /* check if device is down already */
1338 if (unlikely(!is_vxge_card_up(vdev)))
1341 /* is device reset already scheduled */
1342 if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1345 if (vpath->handle) {
1346 if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
1347 if (is_vxge_card_up(vdev) &&
1348 vxge_hw_vpath_recover_from_reset(vpath->handle)
1350 vxge_debug_init(VXGE_ERR,
1351 "vxge_hw_vpath_recover_from_reset"
1352 "failed for vpath:%d", vp_id);
1356 vxge_debug_init(VXGE_ERR,
1357 "vxge_hw_vpath_reset failed for"
1362 return VXGE_HW_FAIL;
1364 vxge_restore_vpath_mac_addr(vpath);
1365 vxge_restore_vpath_vid_table(vpath);
1367 /* Enable all broadcast */
1368 vxge_hw_vpath_bcast_enable(vpath->handle);
1370 /* Enable all multicast */
1371 if (vdev->all_multi_flg) {
1372 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1373 if (status != VXGE_HW_OK)
1374 vxge_debug_init(VXGE_ERR,
1375 "%s:%d Enabling multicast failed",
1376 __func__, __LINE__);
1379 /* Enable the interrupts */
1380 vxge_vpath_intr_enable(vdev, vp_id);
1384 /* Enable the flow of traffic through the vpath */
1385 vxge_hw_vpath_enable(vpath->handle);
1388 vxge_hw_vpath_rx_doorbell_init(vpath->handle);
1389 vpath->ring.last_status = VXGE_HW_OK;
1391 /* Vpath reset done */
1392 clear_bit(vp_id, &vdev->vp_reset);
1394 /* Start the vpath queue */
1395 if (netif_tx_queue_stopped(vpath->fifo.txq))
1396 netif_tx_wake_queue(vpath->fifo.txq);
1401 static int do_vxge_reset(struct vxgedev *vdev, int event)
1403 enum vxge_hw_status status;
1404 int ret = 0, vp_id, i;
1406 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1408 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) {
1409 /* check if device is down already */
1410 if (unlikely(!is_vxge_card_up(vdev)))
1413 /* is reset already scheduled */
1414 if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1418 if (event == VXGE_LL_FULL_RESET) {
1419 /* wait for all the vpath reset to complete */
1420 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1421 while (test_bit(vp_id, &vdev->vp_reset))
1425 /* if execution mode is set to debug, don't reset the adapter */
1426 if (unlikely(vdev->exec_mode)) {
1427 vxge_debug_init(VXGE_ERR,
1428 "%s: execution mode is debug, returning..",
1430 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1431 netif_tx_stop_all_queues(vdev->ndev);
1436 if (event == VXGE_LL_FULL_RESET) {
1437 vxge_hw_device_wait_receive_idle(vdev->devh);
1438 vxge_hw_device_intr_disable(vdev->devh);
1440 switch (vdev->cric_err_event) {
1441 case VXGE_HW_EVENT_UNKNOWN:
1442 netif_tx_stop_all_queues(vdev->ndev);
1443 vxge_debug_init(VXGE_ERR,
1444 "fatal: %s: Disabling device due to"
1449 case VXGE_HW_EVENT_RESET_START:
1451 case VXGE_HW_EVENT_RESET_COMPLETE:
1452 case VXGE_HW_EVENT_LINK_DOWN:
1453 case VXGE_HW_EVENT_LINK_UP:
1454 case VXGE_HW_EVENT_ALARM_CLEARED:
1455 case VXGE_HW_EVENT_ECCERR:
1456 case VXGE_HW_EVENT_MRPCIM_ECCERR:
1459 case VXGE_HW_EVENT_FIFO_ERR:
1460 case VXGE_HW_EVENT_VPATH_ERR:
1462 case VXGE_HW_EVENT_CRITICAL_ERR:
1463 netif_tx_stop_all_queues(vdev->ndev);
1464 vxge_debug_init(VXGE_ERR,
1465 "fatal: %s: Disabling device due to"
1468 /* SOP or device reset required */
1469 /* This event is not currently used */
1472 case VXGE_HW_EVENT_SERR:
1473 netif_tx_stop_all_queues(vdev->ndev);
1474 vxge_debug_init(VXGE_ERR,
1475 "fatal: %s: Disabling device due to"
1480 case VXGE_HW_EVENT_SRPCIM_SERR:
1481 case VXGE_HW_EVENT_MRPCIM_SERR:
1484 case VXGE_HW_EVENT_SLOT_FREEZE:
1485 netif_tx_stop_all_queues(vdev->ndev);
1486 vxge_debug_init(VXGE_ERR,
1487 "fatal: %s: Disabling device due to"
1498 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET))
1499 netif_tx_stop_all_queues(vdev->ndev);
1501 if (event == VXGE_LL_FULL_RESET) {
1502 status = vxge_reset_all_vpaths(vdev);
1503 if (status != VXGE_HW_OK) {
1504 vxge_debug_init(VXGE_ERR,
1505 "fatal: %s: can not reset vpaths",
1512 if (event == VXGE_LL_COMPL_RESET) {
1513 for (i = 0; i < vdev->no_of_vpath; i++)
1514 if (vdev->vpaths[i].handle) {
1515 if (vxge_hw_vpath_recover_from_reset(
1516 vdev->vpaths[i].handle)
1518 vxge_debug_init(VXGE_ERR,
1519 "vxge_hw_vpath_recover_"
1520 "from_reset failed for vpath: "
1526 vxge_debug_init(VXGE_ERR,
1527 "vxge_hw_vpath_reset failed for "
1534 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) {
1535 /* Reprogram the DA table with populated mac addresses */
1536 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1537 vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
1538 vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
1541 /* enable vpath interrupts */
1542 for (i = 0; i < vdev->no_of_vpath; i++)
1543 vxge_vpath_intr_enable(vdev, i);
1545 vxge_hw_device_intr_enable(vdev->devh);
1549 /* Indicate card up */
1550 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1552 /* Get the traffic to flow through the vpaths */
1553 for (i = 0; i < vdev->no_of_vpath; i++) {
1554 vxge_hw_vpath_enable(vdev->vpaths[i].handle);
1556 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
1559 netif_tx_wake_all_queues(vdev->ndev);
1563 vxge_debug_entryexit(VXGE_TRACE,
1564 "%s:%d Exiting...", __func__, __LINE__);
1566 /* Indicate reset done */
1567 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET))
1568 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
1574 * @vdev: pointer to ll device
1576 * driver may reset the chip on events of serr, eccerr, etc
1578 static int vxge_reset(struct vxgedev *vdev)
1580 return do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
1584 * vxge_poll - Receive handler when Receive Polling is used.
1585 * @dev: pointer to the device structure.
1586 * @budget: Number of packets budgeted to be processed in this iteration.
1588 * This function comes into picture only if Receive side is being handled
1589 * through polling (called NAPI in linux). It mostly does what the normal
1590 * Rx interrupt handler does in terms of descriptor and packet processing
1591 * but not in an interrupt context. Also it will process a specified number
1592 * of packets at most in one iteration. This value is passed down by the
1593 * kernel as the function argument 'budget'.
1595 static int vxge_poll_msix(struct napi_struct *napi, int budget)
1597 struct vxge_ring *ring =
1598 container_of(napi, struct vxge_ring, napi);
1599 int budget_org = budget;
1600 ring->budget = budget;
1602 vxge_hw_vpath_poll_rx(ring->handle);
1604 if (ring->pkts_processed < budget_org) {
1605 napi_complete(napi);
1606 /* Re enable the Rx interrupts for the vpath */
1607 vxge_hw_channel_msix_unmask(
1608 (struct __vxge_hw_channel *)ring->handle,
1609 ring->rx_vector_no);
1612 return ring->pkts_processed;
1615 static int vxge_poll_inta(struct napi_struct *napi, int budget)
1617 struct vxgedev *vdev = container_of(napi, struct vxgedev, napi);
1618 int pkts_processed = 0;
1620 int budget_org = budget;
1621 struct vxge_ring *ring;
1623 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
1624 pci_get_drvdata(vdev->pdev);
1626 for (i = 0; i < vdev->no_of_vpath; i++) {
1627 ring = &vdev->vpaths[i].ring;
1628 ring->budget = budget;
1629 vxge_hw_vpath_poll_rx(ring->handle);
1630 pkts_processed += ring->pkts_processed;
1631 budget -= ring->pkts_processed;
1636 VXGE_COMPLETE_ALL_TX(vdev);
1638 if (pkts_processed < budget_org) {
1639 napi_complete(napi);
1640 /* Re enable the Rx interrupts for the ring */
1641 vxge_hw_device_unmask_all(hldev);
1642 vxge_hw_device_flush_io(hldev);
1645 return pkts_processed;
1648 #ifdef CONFIG_NET_POLL_CONTROLLER
1650 * vxge_netpoll - netpoll event handler entry point
1651 * @dev : pointer to the device structure.
1653 * This function will be called by upper layer to check for events on the
1654 * interface in situations where interrupts are disabled. It is used for
1655 * specific in-kernel networking tasks, such as remote consoles and kernel
1656 * debugging over the network (example netdump in RedHat).
1658 static void vxge_netpoll(struct net_device *dev)
1660 struct __vxge_hw_device *hldev;
1661 struct vxgedev *vdev;
1663 vdev = (struct vxgedev *)netdev_priv(dev);
1664 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
1666 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1668 if (pci_channel_offline(vdev->pdev))
1671 disable_irq(dev->irq);
1672 vxge_hw_device_clear_tx_rx(hldev);
1674 vxge_hw_device_clear_tx_rx(hldev);
1675 VXGE_COMPLETE_ALL_RX(vdev);
1676 VXGE_COMPLETE_ALL_TX(vdev);
1678 enable_irq(dev->irq);
1680 vxge_debug_entryexit(VXGE_TRACE,
1681 "%s:%d Exiting...", __func__, __LINE__);
1685 /* RTH configuration */
1686 static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1688 enum vxge_hw_status status = VXGE_HW_OK;
1689 struct vxge_hw_rth_hash_types hash_types;
1690 u8 itable[256] = {0}; /* indirection table */
1691 u8 mtable[256] = {0}; /* CPU to vpath mapping */
1696 * - itable with bucket numbers
1697 * - mtable with bucket-to-vpath mapping
1699 for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) {
1700 itable[index] = index;
1701 mtable[index] = index % vdev->no_of_vpath;
1704 /* set indirection table, bucket-to-vpath mapping */
1705 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
1708 vdev->config.rth_bkt_sz);
1709 if (status != VXGE_HW_OK) {
1710 vxge_debug_init(VXGE_ERR,
1711 "RTH indirection table configuration failed "
1712 "for vpath:%d", vdev->vpaths[0].device_id);
1716 /* Fill RTH hash types */
1717 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1718 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1719 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1720 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1721 hash_types.hash_type_tcpipv6ex_en =
1722 vdev->config.rth_hash_type_tcpipv6ex;
1723 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1726 * Because the itable_set() method uses the active_table field
1727 * for the target virtual path the RTH config should be updated
1728 * for all VPATHs. The h/w only uses the lowest numbered VPATH
1729 * when steering frames.
1731 for (index = 0; index < vdev->no_of_vpath; index++) {
1732 status = vxge_hw_vpath_rts_rth_set(
1733 vdev->vpaths[index].handle,
1734 vdev->config.rth_algorithm,
1736 vdev->config.rth_bkt_sz);
1738 if (status != VXGE_HW_OK) {
1739 vxge_debug_init(VXGE_ERR,
1740 "RTH configuration failed for vpath:%d",
1741 vdev->vpaths[index].device_id);
1749 static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
1751 struct vxge_mac_addrs *new_mac_entry;
1752 u8 *mac_address = NULL;
1754 if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
1757 new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
1758 if (!new_mac_entry) {
1759 vxge_debug_mem(VXGE_ERR,
1760 "%s: memory allocation failed",
1765 list_add(&new_mac_entry->item, &vpath->mac_addr_list);
1767 /* Copy the new mac address to the list */
1768 mac_address = (u8 *)&new_mac_entry->macaddr;
1769 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1771 new_mac_entry->state = mac->state;
1772 vpath->mac_addr_cnt++;
1774 /* Is this a multicast address */
1775 if (0x01 & mac->macaddr[0])
1776 vpath->mcast_addr_cnt++;
1781 /* Add a mac address to DA table */
1782 static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
1783 struct macInfo *mac)
1785 enum vxge_hw_status status = VXGE_HW_OK;
1786 struct vxge_vpath *vpath;
1787 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
1789 if (0x01 & mac->macaddr[0]) /* multicast address */
1790 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
1792 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
1794 vpath = &vdev->vpaths[mac->vpath_no];
1795 status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
1796 mac->macmask, duplicate_mode);
1797 if (status != VXGE_HW_OK) {
1798 vxge_debug_init(VXGE_ERR,
1799 "DA config add entry failed for vpath:%d",
1802 if (FALSE == vxge_mac_list_add(vpath, mac))
1808 static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1810 struct list_head *entry, *next;
1812 u8 *mac_address = (u8 *) (&del_mac);
1814 /* Copy the mac address to delete from the list */
1815 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1817 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1818 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
1820 kfree((struct vxge_mac_addrs *)entry);
1821 vpath->mac_addr_cnt--;
1823 /* Is this a multicast address */
1824 if (0x01 & mac->macaddr[0])
1825 vpath->mcast_addr_cnt--;
1832 /* delete a mac address from DA table */
1833 static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
1834 struct macInfo *mac)
1836 enum vxge_hw_status status = VXGE_HW_OK;
1837 struct vxge_vpath *vpath;
1839 vpath = &vdev->vpaths[mac->vpath_no];
1840 status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
1842 if (status != VXGE_HW_OK) {
1843 vxge_debug_init(VXGE_ERR,
1844 "DA config delete entry failed for vpath:%d",
1847 vxge_mac_list_del(vpath, mac);
1851 /* list all mac addresses from DA table */
1853 static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
1854 struct macInfo *mac)
1856 enum vxge_hw_status status = VXGE_HW_OK;
1857 unsigned char macmask[ETH_ALEN];
1858 unsigned char macaddr[ETH_ALEN];
1860 status = vxge_hw_vpath_mac_addr_get(vpath->handle,
1862 if (status != VXGE_HW_OK) {
1863 vxge_debug_init(VXGE_ERR,
1864 "DA config list entry failed for vpath:%d",
1869 while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
1871 status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
1873 if (status != VXGE_HW_OK)
1880 /* Store all vlan ids from the list to the vid table */
1881 static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1883 enum vxge_hw_status status = VXGE_HW_OK;
1884 struct vxgedev *vdev = vpath->vdev;
1887 if (vdev->vlgrp && vpath->is_open) {
1889 for (vid = 0; vid < VLAN_N_VID; vid++) {
1890 if (!vlan_group_get_device(vdev->vlgrp, vid))
1892 /* Add these vlan to the vid table */
1893 status = vxge_hw_vpath_vid_add(vpath->handle, vid);
1900 /* Store all mac addresses from the list to the DA table */
1901 static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1903 enum vxge_hw_status status = VXGE_HW_OK;
1904 struct macInfo mac_info;
1905 u8 *mac_address = NULL;
1906 struct list_head *entry, *next;
1908 memset(&mac_info, 0, sizeof(struct macInfo));
1910 if (vpath->is_open) {
1912 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1915 ((struct vxge_mac_addrs *)entry)->macaddr;
1916 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1917 ((struct vxge_mac_addrs *)entry)->state =
1918 VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1919 /* does this mac address already exist in da table? */
1920 status = vxge_search_mac_addr_in_da_table(vpath,
1922 if (status != VXGE_HW_OK) {
1923 /* Add this mac address to the DA table */
1924 status = vxge_hw_vpath_mac_addr_add(
1925 vpath->handle, mac_info.macaddr,
1927 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
1928 if (status != VXGE_HW_OK) {
1929 vxge_debug_init(VXGE_ERR,
1930 "DA add entry failed for vpath:%d",
1932 ((struct vxge_mac_addrs *)entry)->state
1933 = VXGE_LL_MAC_ADDR_IN_LIST;
1943 enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
1945 enum vxge_hw_status status = VXGE_HW_OK;
1946 struct vxge_vpath *vpath;
1949 for (i = 0; i < vdev->no_of_vpath; i++) {
1950 vpath = &vdev->vpaths[i];
1951 if (vpath->handle) {
1952 if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
1953 if (is_vxge_card_up(vdev) &&
1954 vxge_hw_vpath_recover_from_reset(
1955 vpath->handle) != VXGE_HW_OK) {
1956 vxge_debug_init(VXGE_ERR,
1957 "vxge_hw_vpath_recover_"
1958 "from_reset failed for vpath: "
1963 vxge_debug_init(VXGE_ERR,
1964 "vxge_hw_vpath_reset failed for "
1975 static void vxge_close_vpaths(struct vxgedev *vdev, int index)
1977 struct vxge_vpath *vpath;
1980 for (i = index; i < vdev->no_of_vpath; i++) {
1981 vpath = &vdev->vpaths[i];
1983 if (vpath->handle && vpath->is_open) {
1984 vxge_hw_vpath_close(vpath->handle);
1985 vdev->stats.vpaths_open--;
1988 vpath->handle = NULL;
1993 static int vxge_open_vpaths(struct vxgedev *vdev)
1995 struct vxge_hw_vpath_attr attr;
1996 enum vxge_hw_status status;
1997 struct vxge_vpath *vpath;
2001 for (i = 0; i < vdev->no_of_vpath; i++) {
2002 vpath = &vdev->vpaths[i];
2004 vxge_assert(vpath->is_configured);
2005 attr.vp_id = vpath->device_id;
2006 attr.fifo_attr.callback = vxge_xmit_compl;
2007 attr.fifo_attr.txdl_term = vxge_tx_term;
2008 attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
2009 attr.fifo_attr.userdata = &vpath->fifo;
2011 attr.ring_attr.callback = vxge_rx_1b_compl;
2012 attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
2013 attr.ring_attr.rxd_term = vxge_rx_term;
2014 attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
2015 attr.ring_attr.userdata = &vpath->ring;
2017 vpath->ring.ndev = vdev->ndev;
2018 vpath->ring.pdev = vdev->pdev;
2019 status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
2020 if (status == VXGE_HW_OK) {
2021 vpath->fifo.handle =
2022 (struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
2023 vpath->ring.handle =
2024 (struct __vxge_hw_ring *)attr.ring_attr.userdata;
2025 vpath->fifo.tx_steering_type =
2026 vdev->config.tx_steering_type;
2027 vpath->fifo.ndev = vdev->ndev;
2028 vpath->fifo.pdev = vdev->pdev;
2029 if (vdev->config.tx_steering_type)
2031 netdev_get_tx_queue(vdev->ndev, i);
2034 netdev_get_tx_queue(vdev->ndev, 0);
2035 vpath->fifo.indicate_max_pkts =
2036 vdev->config.fifo_indicate_max_pkts;
2037 vpath->ring.rx_vector_no = 0;
2038 vpath->ring.rx_csum = vdev->rx_csum;
2040 vdev->vp_handles[i] = vpath->handle;
2041 vpath->ring.gro_enable = vdev->config.gro_enable;
2042 vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
2043 vdev->stats.vpaths_open++;
2045 vdev->stats.vpath_open_fail++;
2046 vxge_debug_init(VXGE_ERR,
2047 "%s: vpath: %d failed to open "
2049 vdev->ndev->name, vpath->device_id,
2051 vxge_close_vpaths(vdev, 0);
2055 vp_id = vpath->handle->vpath->vp_id;
2056 vdev->vpaths_deployed |= vxge_mBIT(vp_id);
2063 * @irq: the irq of the device.
2064 * @dev_id: a void pointer to the hldev structure of the Titan device
2065 * @ptregs: pointer to the registers pushed on the stack.
2067 * This function is the ISR handler of the device when napi is enabled. It
2068 * identifies the reason for the interrupt and calls the relevant service
2071 static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2073 struct net_device *dev;
2074 struct __vxge_hw_device *hldev;
2076 enum vxge_hw_status status;
2077 struct vxgedev *vdev = (struct vxgedev *) dev_id;;
2079 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
2082 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
2084 if (pci_channel_offline(vdev->pdev))
2087 if (unlikely(!is_vxge_card_up(vdev)))
2090 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode,
2092 if (status == VXGE_HW_OK) {
2093 vxge_hw_device_mask_all(hldev);
2096 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
2097 vdev->vpaths_deployed >>
2098 (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) {
2100 vxge_hw_device_clear_tx_rx(hldev);
2101 napi_schedule(&vdev->napi);
2102 vxge_debug_intr(VXGE_TRACE,
2103 "%s:%d Exiting...", __func__, __LINE__);
2106 vxge_hw_device_unmask_all(hldev);
2107 } else if (unlikely((status == VXGE_HW_ERR_VPATH) ||
2108 (status == VXGE_HW_ERR_CRITICAL) ||
2109 (status == VXGE_HW_ERR_FIFO))) {
2110 vxge_hw_device_mask_all(hldev);
2111 vxge_hw_device_flush_io(hldev);
2113 } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE))
2116 vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__);
2120 #ifdef CONFIG_PCI_MSI
2123 vxge_tx_msix_handle(int irq, void *dev_id)
2125 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
2127 VXGE_COMPLETE_VPATH_TX(fifo);
2133 vxge_rx_msix_napi_handle(int irq, void *dev_id)
2135 struct vxge_ring *ring = (struct vxge_ring *)dev_id;
2137 /* MSIX_IDX for Rx is 1 */
2138 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
2139 ring->rx_vector_no);
2141 napi_schedule(&ring->napi);
2146 vxge_alarm_msix_handle(int irq, void *dev_id)
2149 enum vxge_hw_status status;
2150 struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
2151 struct vxgedev *vdev = vpath->vdev;
2152 int msix_id = (vpath->handle->vpath->vp_id *
2153 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2155 for (i = 0; i < vdev->no_of_vpath; i++) {
2156 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
2158 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2160 if (status == VXGE_HW_OK) {
2162 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
2166 vxge_debug_intr(VXGE_ERR,
2167 "%s: vxge_hw_vpath_alarm_process failed %x ",
2168 VXGE_DRIVER_NAME, status);
2173 static int vxge_alloc_msix(struct vxgedev *vdev)
2176 int msix_intr_vect = 0, temp;
2180 /* Tx/Rx MSIX Vectors count */
2181 vdev->intr_cnt = vdev->no_of_vpath * 2;
2183 /* Alarm MSIX Vectors count */
2186 vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry),
2188 if (!vdev->entries) {
2189 vxge_debug_init(VXGE_ERR,
2190 "%s: memory allocation failed",
2193 goto alloc_entries_failed;
2196 vdev->vxge_entries = kcalloc(vdev->intr_cnt,
2197 sizeof(struct vxge_msix_entry),
2199 if (!vdev->vxge_entries) {
2200 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
2203 goto alloc_vxge_entries_failed;
2206 for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
2208 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
2210 /* Initialize the fifo vector */
2211 vdev->entries[j].entry = msix_intr_vect;
2212 vdev->vxge_entries[j].entry = msix_intr_vect;
2213 vdev->vxge_entries[j].in_use = 0;
2216 /* Initialize the ring vector */
2217 vdev->entries[j].entry = msix_intr_vect + 1;
2218 vdev->vxge_entries[j].entry = msix_intr_vect + 1;
2219 vdev->vxge_entries[j].in_use = 0;
2223 /* Initialize the alarm vector */
2224 vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
2225 vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
2226 vdev->vxge_entries[j].in_use = 0;
2228 ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
2230 vxge_debug_init(VXGE_ERR,
2231 "%s: MSI-X enable failed for %d vectors, ret: %d",
2232 VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
2233 if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) {
2235 goto enable_msix_failed;
2238 kfree(vdev->entries);
2239 kfree(vdev->vxge_entries);
2240 vdev->entries = NULL;
2241 vdev->vxge_entries = NULL;
2242 /* Try with less no of vector by reducing no of vpaths count */
2244 vxge_close_vpaths(vdev, temp);
2245 vdev->no_of_vpath = temp;
2247 } else if (ret < 0) {
2249 goto enable_msix_failed;
2254 kfree(vdev->vxge_entries);
2255 alloc_vxge_entries_failed:
2256 kfree(vdev->entries);
2257 alloc_entries_failed:
2261 static int vxge_enable_msix(struct vxgedev *vdev)
2265 /* 0 - Tx, 1 - Rx */
2266 int tim_msix_id[4] = {0, 1, 0, 0};
2270 /* allocate msix vectors */
2271 ret = vxge_alloc_msix(vdev);
2273 for (i = 0; i < vdev->no_of_vpath; i++) {
2274 struct vxge_vpath *vpath = &vdev->vpaths[i];
2276 /* If fifo or ring are not enabled, the MSIX vector for
2277 * it should be set to 0.
2279 vpath->ring.rx_vector_no = (vpath->device_id *
2280 VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
2282 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
2283 VXGE_ALARM_MSIX_ID);
2290 static void vxge_rem_msix_isr(struct vxgedev *vdev)
2294 for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
2296 if (vdev->vxge_entries[intr_cnt].in_use) {
2297 synchronize_irq(vdev->entries[intr_cnt].vector);
2298 free_irq(vdev->entries[intr_cnt].vector,
2299 vdev->vxge_entries[intr_cnt].arg);
2300 vdev->vxge_entries[intr_cnt].in_use = 0;
2304 kfree(vdev->entries);
2305 kfree(vdev->vxge_entries);
2306 vdev->entries = NULL;
2307 vdev->vxge_entries = NULL;
2309 if (vdev->config.intr_type == MSI_X)
2310 pci_disable_msix(vdev->pdev);
2314 static void vxge_rem_isr(struct vxgedev *vdev)
2316 struct __vxge_hw_device *hldev;
2317 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
2319 #ifdef CONFIG_PCI_MSI
2320 if (vdev->config.intr_type == MSI_X) {
2321 vxge_rem_msix_isr(vdev);
2324 if (vdev->config.intr_type == INTA) {
2325 synchronize_irq(vdev->pdev->irq);
2326 free_irq(vdev->pdev->irq, vdev);
2330 static int vxge_add_isr(struct vxgedev *vdev)
2333 #ifdef CONFIG_PCI_MSI
2334 int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
2335 int pci_fun = PCI_FUNC(vdev->pdev->devfn);
2337 if (vdev->config.intr_type == MSI_X)
2338 ret = vxge_enable_msix(vdev);
2341 vxge_debug_init(VXGE_ERR,
2342 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
2343 vxge_debug_init(VXGE_ERR,
2344 "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
2345 vdev->config.intr_type = INTA;
2348 if (vdev->config.intr_type == MSI_X) {
2350 intr_idx < (vdev->no_of_vpath *
2351 VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
2353 msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE;
2358 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2359 "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
2361 vdev->entries[intr_cnt].entry,
2364 vdev->entries[intr_cnt].vector,
2365 vxge_tx_msix_handle, 0,
2366 vdev->desc[intr_cnt],
2367 &vdev->vpaths[vp_idx].fifo);
2368 vdev->vxge_entries[intr_cnt].arg =
2369 &vdev->vpaths[vp_idx].fifo;
2373 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2374 "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
2376 vdev->entries[intr_cnt].entry,
2379 vdev->entries[intr_cnt].vector,
2380 vxge_rx_msix_napi_handle,
2382 vdev->desc[intr_cnt],
2383 &vdev->vpaths[vp_idx].ring);
2384 vdev->vxge_entries[intr_cnt].arg =
2385 &vdev->vpaths[vp_idx].ring;
2391 vxge_debug_init(VXGE_ERR,
2392 "%s: MSIX - %d Registration failed",
2393 vdev->ndev->name, intr_cnt);
2394 vxge_rem_msix_isr(vdev);
2395 vdev->config.intr_type = INTA;
2396 vxge_debug_init(VXGE_ERR,
2397 "%s: Defaulting to INTA"
2398 , vdev->ndev->name);
2403 /* We requested for this msix interrupt */
2404 vdev->vxge_entries[intr_cnt].in_use = 1;
2405 msix_idx += vdev->vpaths[vp_idx].device_id *
2406 VXGE_HW_VPATH_MSIX_ACTIVE;
2407 vxge_hw_vpath_msix_unmask(
2408 vdev->vpaths[vp_idx].handle,
2413 /* Point to next vpath handler */
2414 if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) &&
2415 (vp_idx < (vdev->no_of_vpath - 1)))
2419 intr_cnt = vdev->no_of_vpath * 2;
2420 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2421 "%s:vxge:MSI-X %d - Alarm - fn:%d",
2423 vdev->entries[intr_cnt].entry,
2425 /* For Alarm interrupts */
2426 ret = request_irq(vdev->entries[intr_cnt].vector,
2427 vxge_alarm_msix_handle, 0,
2428 vdev->desc[intr_cnt],
2431 vxge_debug_init(VXGE_ERR,
2432 "%s: MSIX - %d Registration failed",
2433 vdev->ndev->name, intr_cnt);
2434 vxge_rem_msix_isr(vdev);
2435 vdev->config.intr_type = INTA;
2436 vxge_debug_init(VXGE_ERR,
2437 "%s: Defaulting to INTA",
2442 msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
2443 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2444 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
2446 vdev->vxge_entries[intr_cnt].in_use = 1;
2447 vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
2452 if (vdev->config.intr_type == INTA) {
2453 snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
2454 "%s:vxge:INTA", vdev->ndev->name);
2455 vxge_hw_device_set_intr_type(vdev->devh,
2456 VXGE_HW_INTR_MODE_IRQLINE);
2457 vxge_hw_vpath_tti_ci_set(vdev->devh,
2458 vdev->vpaths[0].device_id);
2459 ret = request_irq((int) vdev->pdev->irq,
2461 IRQF_SHARED, vdev->desc[0], vdev);
2463 vxge_debug_init(VXGE_ERR,
2464 "%s %s-%d: ISR registration failed",
2465 VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq);
2468 vxge_debug_init(VXGE_TRACE,
2469 "new %s-%d line allocated",
2470 "IRQ", vdev->pdev->irq);
2476 static void vxge_poll_vp_reset(unsigned long data)
2478 struct vxgedev *vdev = (struct vxgedev *)data;
2481 for (i = 0; i < vdev->no_of_vpath; i++) {
2482 if (test_bit(i, &vdev->vp_reset)) {
2483 vxge_reset_vpath(vdev, i);
2487 if (j && (vdev->config.intr_type != MSI_X)) {
2488 vxge_hw_device_unmask_all(vdev->devh);
2489 vxge_hw_device_flush_io(vdev->devh);
2492 mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2);
2495 static void vxge_poll_vp_lockup(unsigned long data)
2497 struct vxgedev *vdev = (struct vxgedev *)data;
2498 enum vxge_hw_status status = VXGE_HW_OK;
2499 struct vxge_vpath *vpath;
2500 struct vxge_ring *ring;
2503 for (i = 0; i < vdev->no_of_vpath; i++) {
2504 ring = &vdev->vpaths[i].ring;
2505 /* Did this vpath received any packets */
2506 if (ring->stats.prev_rx_frms == ring->stats.rx_frms) {
2507 status = vxge_hw_vpath_check_leak(ring->handle);
2509 /* Did it received any packets last time */
2510 if ((VXGE_HW_FAIL == status) &&
2511 (VXGE_HW_FAIL == ring->last_status)) {
2513 /* schedule vpath reset */
2514 if (!test_and_set_bit(i, &vdev->vp_reset)) {
2515 vpath = &vdev->vpaths[i];
2517 /* disable interrupts for this vpath */
2518 vxge_vpath_intr_disable(vdev, i);
2520 /* stop the queue for this vpath */
2521 netif_tx_stop_queue(vpath->fifo.txq);
2526 ring->stats.prev_rx_frms = ring->stats.rx_frms;
2527 ring->last_status = status;
2530 /* Check every 1 milli second */
2531 mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
2536 * @dev: pointer to the device structure.
2538 * This function is the open entry point of the driver. It mainly calls a
2539 * function to allocate Rx buffers and inserts them into the buffer
2540 * descriptors and then enables the Rx part of the NIC.
2541 * Return value: '0' on success and an appropriate (-)ve integer as
2542 * defined in errno.h file on failure.
2545 vxge_open(struct net_device *dev)
2547 enum vxge_hw_status status;
2548 struct vxgedev *vdev;
2549 struct __vxge_hw_device *hldev;
2550 struct vxge_vpath *vpath;
2553 u64 val64, function_mode;
2554 vxge_debug_entryexit(VXGE_TRACE,
2555 "%s: %s:%d", dev->name, __func__, __LINE__);
2557 vdev = (struct vxgedev *)netdev_priv(dev);
2558 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
2559 function_mode = vdev->config.device_hw_info.function_mode;
2561 /* make sure you have link off by default every time Nic is
2563 netif_carrier_off(dev);
2566 status = vxge_open_vpaths(vdev);
2567 if (status != VXGE_HW_OK) {
2568 vxge_debug_init(VXGE_ERR,
2569 "%s: fatal: Vpath open failed", vdev->ndev->name);
2574 vdev->mtu = dev->mtu;
2576 status = vxge_add_isr(vdev);
2577 if (status != VXGE_HW_OK) {
2578 vxge_debug_init(VXGE_ERR,
2579 "%s: fatal: ISR add failed", dev->name);
2584 if (vdev->config.intr_type != MSI_X) {
2585 netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
2586 vdev->config.napi_weight);
2587 napi_enable(&vdev->napi);
2588 for (i = 0; i < vdev->no_of_vpath; i++) {
2589 vpath = &vdev->vpaths[i];
2590 vpath->ring.napi_p = &vdev->napi;
2593 for (i = 0; i < vdev->no_of_vpath; i++) {
2594 vpath = &vdev->vpaths[i];
2595 netif_napi_add(dev, &vpath->ring.napi,
2596 vxge_poll_msix, vdev->config.napi_weight);
2597 napi_enable(&vpath->ring.napi);
2598 vpath->ring.napi_p = &vpath->ring.napi;
2603 if (vdev->config.rth_steering) {
2604 status = vxge_rth_configure(vdev);
2605 if (status != VXGE_HW_OK) {
2606 vxge_debug_init(VXGE_ERR,
2607 "%s: fatal: RTH configuration failed",
2613 printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
2614 hldev->config.rth_en ? "enabled" : "disabled");
2616 for (i = 0; i < vdev->no_of_vpath; i++) {
2617 vpath = &vdev->vpaths[i];
2619 /* set initial mtu before enabling the device */
2620 status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu);
2621 if (status != VXGE_HW_OK) {
2622 vxge_debug_init(VXGE_ERR,
2623 "%s: fatal: can not set new MTU", dev->name);
2629 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev);
2630 vxge_debug_init(vdev->level_trace,
2631 "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
2632 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
2634 /* Restore the DA, VID table and also multicast and promiscuous mode
2637 if (vdev->all_multi_flg) {
2638 for (i = 0; i < vdev->no_of_vpath; i++) {
2639 vpath = &vdev->vpaths[i];
2640 vxge_restore_vpath_mac_addr(vpath);
2641 vxge_restore_vpath_vid_table(vpath);
2643 status = vxge_hw_vpath_mcast_enable(vpath->handle);
2644 if (status != VXGE_HW_OK)
2645 vxge_debug_init(VXGE_ERR,
2646 "%s:%d Enabling multicast failed",
2647 __func__, __LINE__);
2651 /* Enable vpath to sniff all unicast/multicast traffic that not
2652 * addressed to them. We allow promiscous mode for PF only
2656 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
2657 val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i);
2659 vxge_hw_mgmt_reg_write(vdev->devh,
2660 vxge_hw_mgmt_reg_type_mrpcim,
2662 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2663 rxmac_authorize_all_addr),
2666 vxge_hw_mgmt_reg_write(vdev->devh,
2667 vxge_hw_mgmt_reg_type_mrpcim,
2669 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2670 rxmac_authorize_all_vid),
2673 vxge_set_multicast(dev);
2675 /* Enabling Bcast and mcast for all vpath */
2676 for (i = 0; i < vdev->no_of_vpath; i++) {
2677 vpath = &vdev->vpaths[i];
2678 status = vxge_hw_vpath_bcast_enable(vpath->handle);
2679 if (status != VXGE_HW_OK)
2680 vxge_debug_init(VXGE_ERR,
2681 "%s : Can not enable bcast for vpath "
2682 "id %d", dev->name, i);
2683 if (vdev->config.addr_learn_en) {
2684 status = vxge_hw_vpath_mcast_enable(vpath->handle);
2685 if (status != VXGE_HW_OK)
2686 vxge_debug_init(VXGE_ERR,
2687 "%s : Can not enable mcast for vpath "
2688 "id %d", dev->name, i);
2692 vxge_hw_device_setpause_data(vdev->devh, 0,
2693 vdev->config.tx_pause_enable,
2694 vdev->config.rx_pause_enable);
2696 if (vdev->vp_reset_timer.function == NULL)
2697 vxge_os_timer(vdev->vp_reset_timer,
2698 vxge_poll_vp_reset, vdev, (HZ/2));
2700 if (vdev->vp_lockup_timer.function == NULL)
2701 vxge_os_timer(vdev->vp_lockup_timer,
2702 vxge_poll_vp_lockup, vdev, (HZ/2));
2704 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2708 if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
2709 netif_carrier_on(vdev->ndev);
2710 netdev_notice(vdev->ndev, "Link Up\n");
2711 vdev->stats.link_up++;
2714 vxge_hw_device_intr_enable(vdev->devh);
2718 for (i = 0; i < vdev->no_of_vpath; i++) {
2719 vpath = &vdev->vpaths[i];
2721 vxge_hw_vpath_enable(vpath->handle);
2723 vxge_hw_vpath_rx_doorbell_init(vpath->handle);
2726 netif_tx_start_all_queues(vdev->ndev);
2733 if (vdev->config.intr_type != MSI_X)
2734 napi_disable(&vdev->napi);
2736 for (i = 0; i < vdev->no_of_vpath; i++)
2737 napi_disable(&vdev->vpaths[i].ring.napi);
2741 vxge_close_vpaths(vdev, 0);
2743 vxge_debug_entryexit(VXGE_TRACE,
2744 "%s: %s:%d Exiting...",
2745 dev->name, __func__, __LINE__);
2749 /* Loop throught the mac address list and delete all the entries */
2750 static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
2753 struct list_head *entry, *next;
2754 if (list_empty(&vpath->mac_addr_list))
2757 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
2759 kfree((struct vxge_mac_addrs *)entry);
2763 static void vxge_napi_del_all(struct vxgedev *vdev)
2766 if (vdev->config.intr_type != MSI_X)
2767 netif_napi_del(&vdev->napi);
2769 for (i = 0; i < vdev->no_of_vpath; i++)
2770 netif_napi_del(&vdev->vpaths[i].ring.napi);
2774 static int do_vxge_close(struct net_device *dev, int do_io)
2776 enum vxge_hw_status status;
2777 struct vxgedev *vdev;
2778 struct __vxge_hw_device *hldev;
2780 u64 val64, vpath_vector;
2781 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
2782 dev->name, __func__, __LINE__);
2784 vdev = (struct vxgedev *)netdev_priv(dev);
2785 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
2787 if (unlikely(!is_vxge_card_up(vdev)))
2790 /* If vxge_handle_crit_err task is executing,
2791 * wait till it completes. */
2792 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
2796 /* Put the vpath back in normal mode */
2797 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
2798 status = vxge_hw_mgmt_reg_read(vdev->devh,
2799 vxge_hw_mgmt_reg_type_mrpcim,
2802 struct vxge_hw_mrpcim_reg,
2803 rts_mgr_cbasin_cfg),
2806 if (status == VXGE_HW_OK) {
2807 val64 &= ~vpath_vector;
2808 status = vxge_hw_mgmt_reg_write(vdev->devh,
2809 vxge_hw_mgmt_reg_type_mrpcim,
2812 struct vxge_hw_mrpcim_reg,
2813 rts_mgr_cbasin_cfg),
2817 /* Remove the function 0 from promiscous mode */
2818 vxge_hw_mgmt_reg_write(vdev->devh,
2819 vxge_hw_mgmt_reg_type_mrpcim,
2821 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2822 rxmac_authorize_all_addr),
2825 vxge_hw_mgmt_reg_write(vdev->devh,
2826 vxge_hw_mgmt_reg_type_mrpcim,
2828 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2829 rxmac_authorize_all_vid),
2834 del_timer_sync(&vdev->vp_lockup_timer);
2836 del_timer_sync(&vdev->vp_reset_timer);
2839 vxge_hw_device_wait_receive_idle(hldev);
2841 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2844 if (vdev->config.intr_type != MSI_X)
2845 napi_disable(&vdev->napi);
2847 for (i = 0; i < vdev->no_of_vpath; i++)
2848 napi_disable(&vdev->vpaths[i].ring.napi);
2851 netif_carrier_off(vdev->ndev);
2852 netdev_notice(vdev->ndev, "Link Down\n");
2853 netif_tx_stop_all_queues(vdev->ndev);
2855 /* Note that at this point xmit() is stopped by upper layer */
2857 vxge_hw_device_intr_disable(vdev->devh);
2861 vxge_napi_del_all(vdev);
2864 vxge_reset_all_vpaths(vdev);
2866 vxge_close_vpaths(vdev, 0);
2868 vxge_debug_entryexit(VXGE_TRACE,
2869 "%s: %s:%d Exiting...", dev->name, __func__, __LINE__);
2871 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
2878 * @dev: device pointer.
2880 * This is the stop entry point of the driver. It needs to undo exactly
2881 * whatever was done by the open entry point, thus it's usually referred to
2882 * as the close function.Among other things this function mainly stops the
2883 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2884 * Return value: '0' on success and an appropriate (-)ve integer as
2885 * defined in errno.h file on failure.
2888 vxge_close(struct net_device *dev)
2890 do_vxge_close(dev, 1);
2896 * @dev: net device pointer.
2897 * @new_mtu :the new MTU size for the device.
2899 * A driver entry point to change MTU size for the device. Before changing
2900 * the MTU the device must be stopped.
2902 static int vxge_change_mtu(struct net_device *dev, int new_mtu)
2904 struct vxgedev *vdev = netdev_priv(dev);
2906 vxge_debug_entryexit(vdev->level_trace,
2907 "%s:%d", __func__, __LINE__);
2908 if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > VXGE_HW_MAX_MTU)) {
2909 vxge_debug_init(vdev->level_err,
2910 "%s: mtu size is invalid", dev->name);
2914 /* check if device is down already */
2915 if (unlikely(!is_vxge_card_up(vdev))) {
2916 /* just store new value, will use later on open() */
2918 vxge_debug_init(vdev->level_err,
2919 "%s", "device is down on MTU change");
2923 vxge_debug_init(vdev->level_trace,
2924 "trying to apply new MTU %d", new_mtu);
2926 if (vxge_close(dev))
2930 vdev->mtu = new_mtu;
2935 vxge_debug_init(vdev->level_trace,
2936 "%s: MTU changed to %d", vdev->ndev->name, new_mtu);
2938 vxge_debug_entryexit(vdev->level_trace,
2939 "%s:%d Exiting...", __func__, __LINE__);
2946 * @dev: pointer to the device structure
2947 * @stats: pointer to struct rtnl_link_stats64
2950 static struct rtnl_link_stats64 *
2951 vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
2953 struct vxgedev *vdev = netdev_priv(dev);
2956 /* net_stats already zeroed by caller */
2957 for (k = 0; k < vdev->no_of_vpath; k++) {
2958 net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms;
2959 net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes;
2960 net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors;
2961 net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast;
2962 net_stats->rx_dropped +=
2963 vdev->vpaths[k].ring.stats.rx_dropped;
2965 net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms;
2966 net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes;
2967 net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors;
2975 * @dev: Device pointer.
2976 * @ifr: An IOCTL specific structure, that can contain a pointer to
2977 * a proprietary structure used to pass information to the driver.
2978 * @cmd: This is used to distinguish between the different commands that
2979 * can be passed to the IOCTL functions.
2981 * Entry point for the Ioctl.
2983 static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2990 * @dev: pointer to net device structure
2992 * Watchdog for transmit side.
2993 * This function is triggered if the Tx Queue is stopped
2994 * for a pre-defined amount of time when the Interface is still up.
2997 vxge_tx_watchdog(struct net_device *dev)
2999 struct vxgedev *vdev;
3001 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3003 vdev = (struct vxgedev *)netdev_priv(dev);
3005 vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
3008 vxge_debug_entryexit(VXGE_TRACE,
3009 "%s:%d Exiting...", __func__, __LINE__);
3013 * vxge_vlan_rx_register
3014 * @dev: net device pointer.
3017 * Vlan group registration
3020 vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
3022 struct vxgedev *vdev;
3023 struct vxge_vpath *vpath;
3026 enum vxge_hw_status status;
3029 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3031 vdev = (struct vxgedev *)netdev_priv(dev);
3033 vpath = &vdev->vpaths[0];
3034 if ((NULL == grp) && (vpath->is_open)) {
3035 /* Get the first vlan */
3036 status = vxge_hw_vpath_vid_get(vpath->handle, &vid);
3038 while (status == VXGE_HW_OK) {
3040 /* Delete this vlan from the vid table */
3041 for (vp = 0; vp < vdev->no_of_vpath; vp++) {
3042 vpath = &vdev->vpaths[vp];
3043 if (!vpath->is_open)
3046 vxge_hw_vpath_vid_delete(vpath->handle, vid);
3049 /* Get the next vlan to be deleted */
3050 vpath = &vdev->vpaths[0];
3051 status = vxge_hw_vpath_vid_get(vpath->handle, &vid);
3057 for (i = 0; i < vdev->no_of_vpath; i++) {
3058 if (vdev->vpaths[i].is_configured)
3059 vdev->vpaths[i].ring.vlgrp = grp;
3062 vxge_debug_entryexit(VXGE_TRACE,
3063 "%s:%d Exiting...", __func__, __LINE__);
3067 * vxge_vlan_rx_add_vid
3068 * @dev: net device pointer.
3071 * Add the vlan id to the devices vlan id table
3074 vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
3076 struct vxgedev *vdev;
3077 struct vxge_vpath *vpath;
3080 vdev = (struct vxgedev *)netdev_priv(dev);
3082 /* Add these vlan to the vid table */
3083 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3084 vpath = &vdev->vpaths[vp_id];
3085 if (!vpath->is_open)
3087 vxge_hw_vpath_vid_add(vpath->handle, vid);
3092 * vxge_vlan_rx_add_vid
3093 * @dev: net device pointer.
3096 * Remove the vlan id from the device's vlan id table
3099 vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
3101 struct vxgedev *vdev;
3102 struct vxge_vpath *vpath;
3105 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3107 vdev = (struct vxgedev *)netdev_priv(dev);
3109 vlan_group_set_device(vdev->vlgrp, vid, NULL);
3111 /* Delete this vlan from the vid table */
3112 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3113 vpath = &vdev->vpaths[vp_id];
3114 if (!vpath->is_open)
3116 vxge_hw_vpath_vid_delete(vpath->handle, vid);
3118 vxge_debug_entryexit(VXGE_TRACE,
3119 "%s:%d Exiting...", __func__, __LINE__);
3122 static const struct net_device_ops vxge_netdev_ops = {
3123 .ndo_open = vxge_open,
3124 .ndo_stop = vxge_close,
3125 .ndo_get_stats64 = vxge_get_stats64,
3126 .ndo_start_xmit = vxge_xmit,
3127 .ndo_validate_addr = eth_validate_addr,
3128 .ndo_set_multicast_list = vxge_set_multicast,
3130 .ndo_do_ioctl = vxge_ioctl,
3132 .ndo_set_mac_address = vxge_set_mac_addr,
3133 .ndo_change_mtu = vxge_change_mtu,
3134 .ndo_vlan_rx_register = vxge_vlan_rx_register,
3135 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
3136 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
3138 .ndo_tx_timeout = vxge_tx_watchdog,
3139 #ifdef CONFIG_NET_POLL_CONTROLLER
3140 .ndo_poll_controller = vxge_netpoll,
3144 static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3145 struct vxge_config *config,
3146 int high_dma, int no_of_vpath,
3147 struct vxgedev **vdev_out)
3149 struct net_device *ndev;
3150 enum vxge_hw_status status = VXGE_HW_OK;
3151 struct vxgedev *vdev;
3152 int ret = 0, no_of_queue = 1;
3156 if (config->tx_steering_type)
3157 no_of_queue = no_of_vpath;
3159 ndev = alloc_etherdev_mq(sizeof(struct vxgedev),
3163 vxge_hw_device_trace_level_get(hldev),
3164 "%s : device allocation failed", __func__);
3169 vxge_debug_entryexit(
3170 vxge_hw_device_trace_level_get(hldev),
3171 "%s: %s:%d Entering...",
3172 ndev->name, __func__, __LINE__);
3174 vdev = netdev_priv(ndev);
3175 memset(vdev, 0, sizeof(struct vxgedev));
3179 vdev->pdev = hldev->pdev;
3180 memcpy(&vdev->config, config, sizeof(struct vxge_config));
3181 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */
3183 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3185 ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
3186 NETIF_F_HW_VLAN_FILTER;
3187 /* Driver entry points */
3188 ndev->irq = vdev->pdev->irq;
3189 ndev->base_addr = (unsigned long) hldev->bar0;
3191 ndev->netdev_ops = &vxge_netdev_ops;
3193 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
3195 vxge_initialize_ethtool_ops(ndev);
3197 if (vdev->config.rth_steering != NO_STEERING) {
3198 ndev->features |= NETIF_F_RXHASH;
3199 hldev->config.rth_en = VXGE_HW_RTH_ENABLE;
3202 /* Allocate memory for vpath */
3203 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
3204 no_of_vpath, GFP_KERNEL);
3205 if (!vdev->vpaths) {
3206 vxge_debug_init(VXGE_ERR,
3207 "%s: vpath memory allocation failed",
3213 ndev->features |= NETIF_F_SG;
3215 ndev->features |= NETIF_F_HW_CSUM;
3216 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3217 "%s : checksuming enabled", __func__);
3220 ndev->features |= NETIF_F_HIGHDMA;
3221 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3222 "%s : using High DMA", __func__);
3225 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
3227 if (vdev->config.gro_enable)
3228 ndev->features |= NETIF_F_GRO;
3230 if (register_netdev(ndev)) {
3231 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3232 "%s: %s : device registration failed!",
3233 ndev->name, __func__);
3238 /* Set the factory defined MAC address initially */
3239 ndev->addr_len = ETH_ALEN;
3241 /* Make Link state as off at this point, when the Link change
3242 * interrupt comes the state will be automatically changed to
3245 netif_carrier_off(ndev);
3247 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3248 "%s: Ethernet device registered",
3253 /* Resetting the Device stats */
3254 status = vxge_hw_mrpcim_stats_access(
3256 VXGE_HW_STATS_OP_CLEAR_ALL_STATS,
3261 if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION)
3263 vxge_hw_device_trace_level_get(hldev),
3264 "%s: device stats clear returns"
3265 "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name);
3267 vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev),
3268 "%s: %s:%d Exiting...",
3269 ndev->name, __func__, __LINE__);
3273 kfree(vdev->vpaths);
3281 * vxge_device_unregister
3283 * This function will unregister and free network device
3286 vxge_device_unregister(struct __vxge_hw_device *hldev)
3288 struct vxgedev *vdev;
3289 struct net_device *dev;
3291 #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3292 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3297 vdev = netdev_priv(dev);
3298 #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3299 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3300 level_trace = vdev->level_trace;
3302 vxge_debug_entryexit(level_trace,
3303 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3305 memcpy(buf, vdev->ndev->name, IFNAMSIZ);
3307 /* in 2.6 will call stop() if device is up */
3308 unregister_netdev(dev);
3310 flush_scheduled_work();
3312 vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf);
3313 vxge_debug_entryexit(level_trace,
3314 "%s: %s:%d Exiting...", buf, __func__, __LINE__);
3318 * vxge_callback_crit_err
3320 * This function is called by the alarm handler in interrupt context.
3321 * Driver must analyze it based on the event type.
3324 vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3325 enum vxge_hw_event type, u64 vp_id)
3327 struct net_device *dev = hldev->ndev;
3328 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
3329 struct vxge_vpath *vpath = NULL;
3332 vxge_debug_entryexit(vdev->level_trace,
3333 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3335 /* Note: This event type should be used for device wide
3336 * indications only - Serious errors, Slot freeze and critical errors
3338 vdev->cric_err_event = type;
3340 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
3341 vpath = &vdev->vpaths[vpath_idx];
3342 if (vpath->device_id == vp_id)
3346 if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
3347 if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
3348 vxge_debug_init(VXGE_ERR,
3349 "%s: Slot is frozen", vdev->ndev->name);
3350 } else if (type == VXGE_HW_EVENT_SERR) {
3351 vxge_debug_init(VXGE_ERR,
3352 "%s: Encountered Serious Error",
3354 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR)
3355 vxge_debug_init(VXGE_ERR,
3356 "%s: Encountered Critical Error",
3360 if ((type == VXGE_HW_EVENT_SERR) ||
3361 (type == VXGE_HW_EVENT_SLOT_FREEZE)) {
3362 if (unlikely(vdev->exec_mode))
3363 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3364 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) {
3365 vxge_hw_device_mask_all(hldev);
3366 if (unlikely(vdev->exec_mode))
3367 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3368 } else if ((type == VXGE_HW_EVENT_FIFO_ERR) ||
3369 (type == VXGE_HW_EVENT_VPATH_ERR)) {
3371 if (unlikely(vdev->exec_mode))
3372 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3374 /* check if this vpath is already set for reset */
3375 if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) {
3377 /* disable interrupts for this vpath */
3378 vxge_vpath_intr_disable(vdev, vpath_idx);
3380 /* stop the queue for this vpath */
3381 netif_tx_stop_queue(vpath->fifo.txq);
3386 vxge_debug_entryexit(vdev->level_trace,
3387 "%s: %s:%d Exiting...",
3388 vdev->ndev->name, __func__, __LINE__);
3391 static void verify_bandwidth(void)
3393 int i, band_width, total = 0, equal_priority = 0;
3395 /* 1. If user enters 0 for some fifo, give equal priority to all */
3396 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3397 if (bw_percentage[i] == 0) {
3403 if (!equal_priority) {
3404 /* 2. If sum exceeds 100, give equal priority to all */
3405 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3406 if (bw_percentage[i] == 0xFF)
3409 total += bw_percentage[i];
3410 if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) {
3417 if (!equal_priority) {
3418 /* Is all the bandwidth consumed? */
3419 if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) {
3420 if (i < VXGE_HW_MAX_VIRTUAL_PATHS) {
3421 /* Split rest of bw equally among next VPs*/
3423 (VXGE_HW_VPATH_BANDWIDTH_MAX - total) /
3424 (VXGE_HW_MAX_VIRTUAL_PATHS - i);
3425 if (band_width < 2) /* min of 2% */
3428 for (; i < VXGE_HW_MAX_VIRTUAL_PATHS;
3434 } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS)
3438 if (equal_priority) {
3439 vxge_debug_init(VXGE_ERR,
3440 "%s: Assigning equal bandwidth to all the vpaths",
3442 bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX /
3443 VXGE_HW_MAX_VIRTUAL_PATHS;
3444 for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3445 bw_percentage[i] = bw_percentage[0];
3450 * Vpath configuration
3452 static int __devinit vxge_config_vpaths(
3453 struct vxge_hw_device_config *device_config,
3454 u64 vpath_mask, struct vxge_config *config_param)
3456 int i, no_of_vpaths = 0, default_no_vpath = 0, temp;
3457 u32 txdl_size, txdl_per_memblock;
3459 temp = driver_config->vpath_per_dev;
3460 if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) &&
3461 (max_config_dev == VXGE_MAX_CONFIG_DEV)) {
3462 /* No more CPU. Return vpath number as zero.*/
3463 if (driver_config->g_no_cpus == -1)
3466 if (!driver_config->g_no_cpus)
3467 driver_config->g_no_cpus = num_online_cpus();
3469 driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
3470 if (!driver_config->vpath_per_dev)
3471 driver_config->vpath_per_dev = 1;
3473 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3474 if (!vxge_bVALn(vpath_mask, i, 1))
3478 if (default_no_vpath < driver_config->vpath_per_dev)
3479 driver_config->vpath_per_dev = default_no_vpath;
3481 driver_config->g_no_cpus = driver_config->g_no_cpus -
3482 (driver_config->vpath_per_dev * 2);
3483 if (driver_config->g_no_cpus <= 0)
3484 driver_config->g_no_cpus = -1;
3487 if (driver_config->vpath_per_dev == 1) {
3488 vxge_debug_ll_config(VXGE_TRACE,
3489 "%s: Disable tx and rx steering, "
3490 "as single vpath is configured", VXGE_DRIVER_NAME);
3491 config_param->rth_steering = NO_STEERING;
3492 config_param->tx_steering_type = NO_STEERING;
3493 device_config->rth_en = 0;
3496 /* configure bandwidth */
3497 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3498 device_config->vp_config[i].min_bandwidth = bw_percentage[i];
3500 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3501 device_config->vp_config[i].vp_id = i;
3502 device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU;
3503 if (no_of_vpaths < driver_config->vpath_per_dev) {
3504 if (!vxge_bVALn(vpath_mask, i, 1)) {
3505 vxge_debug_ll_config(VXGE_TRACE,
3506 "%s: vpath: %d is not available",
3507 VXGE_DRIVER_NAME, i);
3510 vxge_debug_ll_config(VXGE_TRACE,
3511 "%s: vpath: %d available",
3512 VXGE_DRIVER_NAME, i);
3516 vxge_debug_ll_config(VXGE_TRACE,
3517 "%s: vpath: %d is not configured, "
3518 "max_config_vpath exceeded",
3519 VXGE_DRIVER_NAME, i);
3523 /* Configure Tx fifo's */
3524 device_config->vp_config[i].fifo.enable =
3525 VXGE_HW_FIFO_ENABLE;
3526 device_config->vp_config[i].fifo.max_frags =
3528 device_config->vp_config[i].fifo.memblock_size =
3529 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
3531 txdl_size = device_config->vp_config[i].fifo.max_frags *
3532 sizeof(struct vxge_hw_fifo_txd);
3533 txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
3535 device_config->vp_config[i].fifo.fifo_blocks =
3536 ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1;
3538 device_config->vp_config[i].fifo.intr =
3539 VXGE_HW_FIFO_QUEUE_INTR_DISABLE;
3541 /* Configure tti properties */
3542 device_config->vp_config[i].tti.intr_enable =
3543 VXGE_HW_TIM_INTR_ENABLE;
3545 device_config->vp_config[i].tti.btimer_val =
3546 (VXGE_TTI_BTIMER_VAL * 1000) / 272;
3548 device_config->vp_config[i].tti.timer_ac_en =
3549 VXGE_HW_TIM_TIMER_AC_ENABLE;
3551 /* For msi-x with napi (each vector
3552 has a handler of its own) -
3553 Set CI to OFF for all vpaths */
3554 device_config->vp_config[i].tti.timer_ci_en =
3555 VXGE_HW_TIM_TIMER_CI_DISABLE;
3557 device_config->vp_config[i].tti.timer_ri_en =
3558 VXGE_HW_TIM_TIMER_RI_DISABLE;
3560 device_config->vp_config[i].tti.util_sel =
3561 VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
3563 device_config->vp_config[i].tti.ltimer_val =
3564 (VXGE_TTI_LTIMER_VAL * 1000) / 272;
3566 device_config->vp_config[i].tti.rtimer_val =
3567 (VXGE_TTI_RTIMER_VAL * 1000) / 272;
3569 device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A;
3570 device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B;
3571 device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C;
3572 device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A;
3573 device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B;
3574 device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C;
3575 device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D;
3577 /* Configure Rx rings */
3578 device_config->vp_config[i].ring.enable =
3579 VXGE_HW_RING_ENABLE;
3581 device_config->vp_config[i].ring.ring_blocks =
3582 VXGE_HW_DEF_RING_BLOCKS;
3583 device_config->vp_config[i].ring.buffer_mode =
3584 VXGE_HW_RING_RXD_BUFFER_MODE_1;
3585 device_config->vp_config[i].ring.rxds_limit =
3586 VXGE_HW_DEF_RING_RXDS_LIMIT;
3587 device_config->vp_config[i].ring.scatter_mode =
3588 VXGE_HW_RING_SCATTER_MODE_A;
3590 /* Configure rti properties */
3591 device_config->vp_config[i].rti.intr_enable =
3592 VXGE_HW_TIM_INTR_ENABLE;
3594 device_config->vp_config[i].rti.btimer_val =
3595 (VXGE_RTI_BTIMER_VAL * 1000)/272;
3597 device_config->vp_config[i].rti.timer_ac_en =
3598 VXGE_HW_TIM_TIMER_AC_ENABLE;
3600 device_config->vp_config[i].rti.timer_ci_en =
3601 VXGE_HW_TIM_TIMER_CI_DISABLE;
3603 device_config->vp_config[i].rti.timer_ri_en =
3604 VXGE_HW_TIM_TIMER_RI_DISABLE;
3606 device_config->vp_config[i].rti.util_sel =
3607 VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
3609 device_config->vp_config[i].rti.urange_a =
3611 device_config->vp_config[i].rti.urange_b =
3613 device_config->vp_config[i].rti.urange_c =
3615 device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A;
3616 device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B;
3617 device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C;
3618 device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D;
3620 device_config->vp_config[i].rti.rtimer_val =
3621 (VXGE_RTI_RTIMER_VAL * 1000) / 272;
3623 device_config->vp_config[i].rti.ltimer_val =
3624 (VXGE_RTI_LTIMER_VAL * 1000) / 272;
3626 device_config->vp_config[i].rpa_strip_vlan_tag =
3630 driver_config->vpath_per_dev = temp;
3631 return no_of_vpaths;
3634 /* initialize device configuratrions */
3635 static void __devinit vxge_device_config_init(
3636 struct vxge_hw_device_config *device_config,
3639 /* Used for CQRQ/SRQ. */
3640 device_config->dma_blockpool_initial =
3641 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
3643 device_config->dma_blockpool_max =
3644 VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
3646 if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
3647 max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
3649 #ifndef CONFIG_PCI_MSI
3650 vxge_debug_init(VXGE_ERR,
3651 "%s: This Kernel does not support "
3652 "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
3656 /* Configure whether MSI-X or IRQL. */
3657 switch (*intr_type) {
3659 device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
3663 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX;
3666 /* Timer period between device poll */
3667 device_config->device_poll_millis = VXGE_TIMER_DELAY;
3669 /* Configure mac based steering. */
3670 device_config->rts_mac_en = addr_learn_en;
3672 /* Configure Vpaths */
3673 device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT;
3675 vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
3677 vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_initial : %d",
3678 device_config->dma_blockpool_initial);
3679 vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_max : %d",
3680 device_config->dma_blockpool_max);
3681 vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
3682 device_config->intr_mode);
3683 vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
3684 device_config->device_poll_millis);
3685 vxge_debug_ll_config(VXGE_TRACE, "rts_mac_en : %d",
3686 device_config->rts_mac_en);
3687 vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
3688 device_config->rth_en);
3689 vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
3690 device_config->rth_it_type);
3693 static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3697 vxge_debug_init(VXGE_TRACE,
3698 "%s: %d Vpath(s) opened",
3699 vdev->ndev->name, vdev->no_of_vpath);
3701 switch (vdev->config.intr_type) {
3703 vxge_debug_init(VXGE_TRACE,
3704 "%s: Interrupt type INTA", vdev->ndev->name);
3708 vxge_debug_init(VXGE_TRACE,
3709 "%s: Interrupt type MSI-X", vdev->ndev->name);
3713 if (vdev->config.rth_steering) {
3714 vxge_debug_init(VXGE_TRACE,
3715 "%s: RTH steering enabled for TCP_IPV4",
3718 vxge_debug_init(VXGE_TRACE,
3719 "%s: RTH steering disabled", vdev->ndev->name);
3722 switch (vdev->config.tx_steering_type) {
3724 vxge_debug_init(VXGE_TRACE,
3725 "%s: Tx steering disabled", vdev->ndev->name);
3727 case TX_PRIORITY_STEERING:
3728 vxge_debug_init(VXGE_TRACE,
3729 "%s: Unsupported tx steering option",
3731 vxge_debug_init(VXGE_TRACE,
3732 "%s: Tx steering disabled", vdev->ndev->name);
3733 vdev->config.tx_steering_type = 0;
3735 case TX_VLAN_STEERING:
3736 vxge_debug_init(VXGE_TRACE,
3737 "%s: Unsupported tx steering option",
3739 vxge_debug_init(VXGE_TRACE,
3740 "%s: Tx steering disabled", vdev->ndev->name);
3741 vdev->config.tx_steering_type = 0;
3743 case TX_MULTIQ_STEERING:
3744 vxge_debug_init(VXGE_TRACE,
3745 "%s: Tx multiqueue steering enabled",
3748 case TX_PORT_STEERING:
3749 vxge_debug_init(VXGE_TRACE,
3750 "%s: Tx port steering enabled",
3754 vxge_debug_init(VXGE_ERR,
3755 "%s: Unsupported tx steering type",
3757 vxge_debug_init(VXGE_TRACE,
3758 "%s: Tx steering disabled", vdev->ndev->name);
3759 vdev->config.tx_steering_type = 0;
3762 if (vdev->config.gro_enable) {
3763 vxge_debug_init(VXGE_ERR,
3764 "%s: Generic receive offload enabled",
3767 vxge_debug_init(VXGE_TRACE,
3768 "%s: Generic receive offload disabled",
3771 if (vdev->config.addr_learn_en)
3772 vxge_debug_init(VXGE_TRACE,
3773 "%s: MAC Address learning enabled", vdev->ndev->name);
3775 vxge_debug_init(VXGE_TRACE,
3776 "%s: Rx doorbell mode enabled", vdev->ndev->name);
3778 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3779 if (!vxge_bVALn(vpath_mask, i, 1))
3781 vxge_debug_ll_config(VXGE_TRACE,
3782 "%s: MTU size - %d", vdev->ndev->name,
3783 ((struct __vxge_hw_device *)(vdev->devh))->
3784 config.vp_config[i].mtu);
3785 vxge_debug_init(VXGE_TRACE,
3786 "%s: VLAN tag stripping %s", vdev->ndev->name,
3787 ((struct __vxge_hw_device *)(vdev->devh))->
3788 config.vp_config[i].rpa_strip_vlan_tag
3789 ? "Enabled" : "Disabled");
3790 vxge_debug_init(VXGE_TRACE,
3791 "%s: Ring blocks : %d", vdev->ndev->name,
3792 ((struct __vxge_hw_device *)(vdev->devh))->
3793 config.vp_config[i].ring.ring_blocks);
3794 vxge_debug_init(VXGE_TRACE,
3795 "%s: Fifo blocks : %d", vdev->ndev->name,
3796 ((struct __vxge_hw_device *)(vdev->devh))->
3797 config.vp_config[i].fifo.fifo_blocks);
3798 vxge_debug_ll_config(VXGE_TRACE,
3799 "%s: Max frags : %d", vdev->ndev->name,
3800 ((struct __vxge_hw_device *)(vdev->devh))->
3801 config.vp_config[i].fifo.max_frags);
3808 * vxge_pm_suspend - vxge power management suspend entry point
3811 static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state)
3816 * vxge_pm_resume - vxge power management resume entry point
3819 static int vxge_pm_resume(struct pci_dev *pdev)
3827 * vxge_io_error_detected - called when PCI error is detected
3828 * @pdev: Pointer to PCI device
3829 * @state: The current pci connection state
3831 * This function is called after a PCI bus error affecting
3832 * this device has been detected.
3834 static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
3835 pci_channel_state_t state)
3837 struct __vxge_hw_device *hldev =
3838 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3839 struct net_device *netdev = hldev->ndev;
3841 netif_device_detach(netdev);
3843 if (state == pci_channel_io_perm_failure)
3844 return PCI_ERS_RESULT_DISCONNECT;
3846 if (netif_running(netdev)) {
3847 /* Bring down the card, while avoiding PCI I/O */
3848 do_vxge_close(netdev, 0);
3851 pci_disable_device(pdev);
3853 return PCI_ERS_RESULT_NEED_RESET;
3857 * vxge_io_slot_reset - called after the pci bus has been reset.
3858 * @pdev: Pointer to PCI device
3860 * Restart the card from scratch, as if from a cold-boot.
3861 * At this point, the card has exprienced a hard reset,
3862 * followed by fixups by BIOS, and has its config space
3863 * set up identically to what it was at cold boot.
3865 static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
3867 struct __vxge_hw_device *hldev =
3868 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3869 struct net_device *netdev = hldev->ndev;
3871 struct vxgedev *vdev = netdev_priv(netdev);
3873 if (pci_enable_device(pdev)) {
3874 netdev_err(netdev, "Cannot re-enable device after reset\n");
3875 return PCI_ERS_RESULT_DISCONNECT;
3878 pci_set_master(pdev);
3881 return PCI_ERS_RESULT_RECOVERED;
3885 * vxge_io_resume - called when traffic can start flowing again.
3886 * @pdev: Pointer to PCI device
3888 * This callback is called when the error recovery driver tells
3889 * us that its OK to resume normal operation.
3891 static void vxge_io_resume(struct pci_dev *pdev)
3893 struct __vxge_hw_device *hldev =
3894 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3895 struct net_device *netdev = hldev->ndev;
3897 if (netif_running(netdev)) {
3898 if (vxge_open(netdev)) {
3900 "Can't bring device back up after reset\n");
3905 netif_device_attach(netdev);
3908 static inline u32 vxge_get_num_vfs(u64 function_mode)
3910 u32 num_functions = 0;
3912 switch (function_mode) {
3913 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
3914 case VXGE_HW_FUNCTION_MODE_SRIOV_8:
3917 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
3920 case VXGE_HW_FUNCTION_MODE_SRIOV:
3921 case VXGE_HW_FUNCTION_MODE_MRIOV:
3922 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
3925 case VXGE_HW_FUNCTION_MODE_SRIOV_4:
3928 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
3931 case VXGE_HW_FUNCTION_MODE_MRIOV_8:
3932 num_functions = 8; /* TODO */
3935 return num_functions;
3940 * @pdev : structure containing the PCI related information of the device.
3941 * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
3943 * This function is called when a new PCI device gets detected and initializes
3946 * returns 0 on success and negative on failure.
3949 static int __devinit
3950 vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
3952 struct __vxge_hw_device *hldev;
3953 enum vxge_hw_status status;
3957 struct vxgedev *vdev;
3958 struct vxge_config *ll_config = NULL;
3959 struct vxge_hw_device_config *device_config = NULL;
3960 struct vxge_hw_device_attr attr;
3961 int i, j, no_of_vpath = 0, max_vpath_supported = 0;
3963 struct vxge_mac_addrs *entry;
3964 static int bus = -1, device = -1;
3967 enum vxge_hw_status is_privileged;
3971 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3974 /* In SRIOV-17 mode, functions of the same adapter
3975 * can be deployed on different buses */
3976 if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) ||
3977 (device != PCI_SLOT(pdev->devfn))))
3980 bus = pdev->bus->number;
3981 device = PCI_SLOT(pdev->devfn);
3984 if (driver_config->config_dev_cnt &&
3985 (driver_config->config_dev_cnt !=
3986 driver_config->total_dev_cnt))
3987 vxge_debug_init(VXGE_ERR,
3988 "%s: Configured %d of %d devices",
3990 driver_config->config_dev_cnt,
3991 driver_config->total_dev_cnt);
3992 driver_config->config_dev_cnt = 0;
3993 driver_config->total_dev_cnt = 0;
3995 /* Now making the CPU based no of vpath calculation
3996 * applicable for individual functions as well.
3998 driver_config->g_no_cpus = 0;
3999 driver_config->vpath_per_dev = max_config_vpath;
4001 driver_config->total_dev_cnt++;
4002 if (++driver_config->config_dev_cnt > max_config_dev) {
4007 device_config = kzalloc(sizeof(struct vxge_hw_device_config),
4009 if (!device_config) {
4011 vxge_debug_init(VXGE_ERR,
4012 "device_config : malloc failed %s %d",
4013 __FILE__, __LINE__);
4017 ll_config = kzalloc(sizeof(*ll_config), GFP_KERNEL);
4020 vxge_debug_init(VXGE_ERR,
4021 "ll_config : malloc failed %s %d",
4022 __FILE__, __LINE__);
4025 ll_config->tx_steering_type = TX_MULTIQ_STEERING;
4026 ll_config->intr_type = MSI_X;
4027 ll_config->napi_weight = NEW_NAPI_WEIGHT;
4028 ll_config->rth_steering = RTH_STEERING;
4030 /* get the default configuration parameters */
4031 vxge_hw_device_config_default_get(device_config);
4033 /* initialize configuration parameters */
4034 vxge_device_config_init(device_config, &ll_config->intr_type);
4036 ret = pci_enable_device(pdev);
4038 vxge_debug_init(VXGE_ERR,
4039 "%s : can not enable PCI device", __func__);
4043 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4044 vxge_debug_ll_config(VXGE_TRACE,
4045 "%s : using 64bit DMA", __func__);
4049 if (pci_set_consistent_dma_mask(pdev,
4050 DMA_BIT_MASK(64))) {
4051 vxge_debug_init(VXGE_ERR,
4052 "%s : unable to obtain 64bit DMA for "
4053 "consistent allocations", __func__);
4057 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
4058 vxge_debug_ll_config(VXGE_TRACE,
4059 "%s : using 32bit DMA", __func__);
4065 if (pci_request_regions(pdev, VXGE_DRIVER_NAME)) {
4066 vxge_debug_init(VXGE_ERR,
4067 "%s : request regions failed", __func__);
4072 pci_set_master(pdev);
4074 attr.bar0 = pci_ioremap_bar(pdev, 0);
4076 vxge_debug_init(VXGE_ERR,
4077 "%s : cannot remap io memory bar0", __func__);
4081 vxge_debug_ll_config(VXGE_TRACE,
4082 "pci ioremap bar0: %p:0x%llx",
4084 (unsigned long long)pci_resource_start(pdev, 0));
4086 status = vxge_hw_device_hw_info_get(attr.bar0,
4087 &ll_config->device_hw_info);
4088 if (status != VXGE_HW_OK) {
4089 vxge_debug_init(VXGE_ERR,
4090 "%s: Reading of hardware info failed."
4091 "Please try upgrading the firmware.", VXGE_DRIVER_NAME);
4096 if (ll_config->device_hw_info.fw_version.major !=
4097 VXGE_DRIVER_FW_VERSION_MAJOR) {
4098 vxge_debug_init(VXGE_ERR,
4099 "%s: Incorrect firmware version."
4100 "Please upgrade the firmware to version 1.x.x",
4106 vpath_mask = ll_config->device_hw_info.vpath_mask;
4107 if (vpath_mask == 0) {
4108 vxge_debug_ll_config(VXGE_TRACE,
4109 "%s: No vpaths available in device", VXGE_DRIVER_NAME);
4114 vxge_debug_ll_config(VXGE_TRACE,
4115 "%s:%d Vpath mask = %llx", __func__, __LINE__,
4116 (unsigned long long)vpath_mask);
4118 function_mode = ll_config->device_hw_info.function_mode;
4119 host_type = ll_config->device_hw_info.host_type;
4120 is_privileged = __vxge_hw_device_is_privilaged(host_type,
4121 ll_config->device_hw_info.func_id);
4123 /* Check how many vpaths are available */
4124 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4125 if (!((vpath_mask) & vxge_mBIT(i)))
4127 max_vpath_supported++;
4131 num_vfs = vxge_get_num_vfs(function_mode) - 1;
4133 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
4134 if (is_sriov(function_mode) && (max_config_dev > 1) &&
4135 (ll_config->intr_type != INTA) &&
4136 (is_privileged == VXGE_HW_OK)) {
4137 ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs)
4138 ? (max_config_dev - 1) : num_vfs);
4140 vxge_debug_ll_config(VXGE_ERR,
4141 "Failed in enabling SRIOV mode: %d\n", ret);
4145 * Configure vpaths and get driver configured number of vpaths
4146 * which is less than or equal to the maximum vpaths per function.
4148 no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config);
4150 vxge_debug_ll_config(VXGE_ERR,
4151 "%s: No more vpaths to configure", VXGE_DRIVER_NAME);
4156 /* Setting driver callbacks */
4157 attr.uld_callbacks.link_up = vxge_callback_link_up;
4158 attr.uld_callbacks.link_down = vxge_callback_link_down;
4159 attr.uld_callbacks.crit_err = vxge_callback_crit_err;
4161 status = vxge_hw_device_initialize(&hldev, &attr, device_config);
4162 if (status != VXGE_HW_OK) {
4163 vxge_debug_init(VXGE_ERR,
4164 "Failed to initialize device (%d)", status);
4169 /* if FCS stripping is not disabled in MAC fail driver load */
4170 if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) {
4171 vxge_debug_init(VXGE_ERR,
4172 "%s: FCS stripping is not disabled in MAC"
4173 " failing driver load", VXGE_DRIVER_NAME);
4178 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4180 /* set private device info */
4181 pci_set_drvdata(pdev, hldev);
4183 ll_config->gro_enable = VXGE_GRO_ALWAYS_AGGREGATE;
4184 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
4185 ll_config->addr_learn_en = addr_learn_en;
4186 ll_config->rth_algorithm = RTH_ALG_JENKINS;
4187 ll_config->rth_hash_type_tcpipv4 = 1;
4188 ll_config->rth_hash_type_ipv4 = 0;
4189 ll_config->rth_hash_type_tcpipv6 = 0;
4190 ll_config->rth_hash_type_ipv6 = 0;
4191 ll_config->rth_hash_type_tcpipv6ex = 0;
4192 ll_config->rth_hash_type_ipv6ex = 0;
4193 ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
4194 ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4195 ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4197 if (vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
4203 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
4204 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4205 vxge_hw_device_trace_level_get(hldev));
4207 /* set private HW device info */
4208 hldev->ndev = vdev->ndev;
4209 vdev->mtu = VXGE_HW_DEFAULT_MTU;
4210 vdev->bar0 = attr.bar0;
4211 vdev->max_vpath_supported = max_vpath_supported;
4212 vdev->no_of_vpath = no_of_vpath;
4214 /* Virtual Path count */
4215 for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4216 if (!vxge_bVALn(vpath_mask, i, 1))
4218 if (j >= vdev->no_of_vpath)
4221 vdev->vpaths[j].is_configured = 1;
4222 vdev->vpaths[j].device_id = i;
4223 vdev->vpaths[j].ring.driver_id = j;
4224 vdev->vpaths[j].vdev = vdev;
4225 vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
4226 memcpy((u8 *)vdev->vpaths[j].macaddr,
4227 ll_config->device_hw_info.mac_addrs[i],
4230 /* Initialize the mac address list header */
4231 INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list);
4233 vdev->vpaths[j].mac_addr_cnt = 0;
4234 vdev->vpaths[j].mcast_addr_cnt = 0;
4237 vdev->exec_mode = VXGE_EXEC_MODE_DISABLE;
4238 vdev->max_config_port = max_config_port;
4240 vdev->vlan_tag_strip = vlan_tag_strip;
4242 /* map the hashing selector table to the configured vpaths */
4243 for (i = 0; i < vdev->no_of_vpath; i++)
4244 vdev->vpath_selector[i] = vpath_selector[i];
4246 macaddr = (u8 *)vdev->vpaths[0].macaddr;
4248 ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
4249 ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
4250 ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
4252 vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
4253 vdev->ndev->name, ll_config->device_hw_info.serial_number);
4255 vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
4256 vdev->ndev->name, ll_config->device_hw_info.part_number);
4258 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
4259 vdev->ndev->name, ll_config->device_hw_info.product_desc);
4261 vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
4262 vdev->ndev->name, macaddr);
4264 vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
4265 vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
4267 vxge_debug_init(VXGE_TRACE,
4268 "%s: Firmware version : %s Date : %s", vdev->ndev->name,
4269 ll_config->device_hw_info.fw_version.version,
4270 ll_config->device_hw_info.fw_date.date);
4273 switch (ll_config->device_hw_info.function_mode) {
4274 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
4275 vxge_debug_init(VXGE_TRACE,
4276 "%s: Single Function Mode Enabled", vdev->ndev->name);
4278 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
4279 vxge_debug_init(VXGE_TRACE,
4280 "%s: Multi Function Mode Enabled", vdev->ndev->name);
4282 case VXGE_HW_FUNCTION_MODE_SRIOV:
4283 vxge_debug_init(VXGE_TRACE,
4284 "%s: Single Root IOV Mode Enabled", vdev->ndev->name);
4286 case VXGE_HW_FUNCTION_MODE_MRIOV:
4287 vxge_debug_init(VXGE_TRACE,
4288 "%s: Multi Root IOV Mode Enabled", vdev->ndev->name);
4293 vxge_print_parm(vdev, vpath_mask);
4295 /* Store the fw version for ethttool option */
4296 strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
4297 memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
4298 memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN);
4300 /* Copy the station mac address to the list */
4301 for (i = 0; i < vdev->no_of_vpath; i++) {
4302 entry = (struct vxge_mac_addrs *)
4303 kzalloc(sizeof(struct vxge_mac_addrs),
4305 if (NULL == entry) {
4306 vxge_debug_init(VXGE_ERR,
4307 "%s: mac_addr_list : memory allocation failed",
4312 macaddr = (u8 *)&entry->macaddr;
4313 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
4314 list_add(&entry->item, &vdev->vpaths[i].mac_addr_list);
4315 vdev->vpaths[i].mac_addr_cnt = 1;
4318 kfree(device_config);
4321 * INTA is shared in multi-function mode. This is unlike the INTA
4322 * implementation in MR mode, where each VH has its own INTA message.
4323 * - INTA is masked (disabled) as long as at least one function sets
4324 * its TITAN_MASK_ALL_INT.ALARM bit.
4325 * - INTA is unmasked (enabled) when all enabled functions have cleared
4326 * their own TITAN_MASK_ALL_INT.ALARM bit.
4327 * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
4328 * Though this driver leaves the top level interrupts unmasked while
4329 * leaving the required module interrupt bits masked on exit, there
4330 * could be a rougue driver around that does not follow this procedure
4331 * resulting in a failure to generate interrupts. The following code is
4332 * present to prevent such a failure.
4335 if (ll_config->device_hw_info.function_mode ==
4336 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
4337 if (vdev->config.intr_type == INTA)
4338 vxge_hw_device_unmask_all(hldev);
4340 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
4341 vdev->ndev->name, __func__, __LINE__);
4343 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4344 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4345 vxge_hw_device_trace_level_get(hldev));
4351 for (i = 0; i < vdev->no_of_vpath; i++)
4352 vxge_free_mac_add_list(&vdev->vpaths[i]);
4354 vxge_device_unregister(hldev);
4356 pci_disable_sriov(pdev);
4357 vxge_hw_device_terminate(hldev);
4361 pci_release_regions(pdev);
4363 pci_disable_device(pdev);
4366 kfree(device_config);
4367 driver_config->config_dev_cnt--;
4368 pci_set_drvdata(pdev, NULL);
4373 * vxge_rem_nic - Free the PCI device
4374 * @pdev: structure containing the PCI related information of the device.
4375 * Description: This function is called by the Pci subsystem to release a
4376 * PCI device and free up all resource held up by the device.
4378 static void __devexit
4379 vxge_remove(struct pci_dev *pdev)
4381 struct __vxge_hw_device *hldev;
4382 struct vxgedev *vdev = NULL;
4383 struct net_device *dev;
4385 #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
4386 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4390 hldev = (struct __vxge_hw_device *) pci_get_drvdata(pdev);
4395 vdev = netdev_priv(dev);
4397 #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
4398 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4399 level_trace = vdev->level_trace;
4401 vxge_debug_entryexit(level_trace,
4402 "%s:%d", __func__, __LINE__);
4404 vxge_debug_init(level_trace,
4405 "%s : removing PCI device...", __func__);
4406 vxge_device_unregister(hldev);
4408 for (i = 0; i < vdev->no_of_vpath; i++) {
4409 vxge_free_mac_add_list(&vdev->vpaths[i]);
4410 vdev->vpaths[i].mcast_addr_cnt = 0;
4411 vdev->vpaths[i].mac_addr_cnt = 0;
4414 kfree(vdev->vpaths);
4416 iounmap(vdev->bar0);
4418 pci_disable_sriov(pdev);
4420 /* we are safe to free it now */
4423 vxge_debug_init(level_trace,
4424 "%s:%d Device unregistered", __func__, __LINE__);
4426 vxge_hw_device_terminate(hldev);
4428 pci_disable_device(pdev);
4429 pci_release_regions(pdev);
4430 pci_set_drvdata(pdev, NULL);
4431 vxge_debug_entryexit(level_trace,
4432 "%s:%d Exiting...", __func__, __LINE__);
4435 static struct pci_error_handlers vxge_err_handler = {
4436 .error_detected = vxge_io_error_detected,
4437 .slot_reset = vxge_io_slot_reset,
4438 .resume = vxge_io_resume,
4441 static struct pci_driver vxge_driver = {
4442 .name = VXGE_DRIVER_NAME,
4443 .id_table = vxge_id_table,
4444 .probe = vxge_probe,
4445 .remove = __devexit_p(vxge_remove),
4447 .suspend = vxge_pm_suspend,
4448 .resume = vxge_pm_resume,
4450 .err_handler = &vxge_err_handler,
4458 pr_info("Copyright(c) 2002-2010 Exar Corp.\n");
4459 pr_info("Driver version: %s\n", DRV_VERSION);
4463 driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL);
4467 ret = pci_register_driver(&vxge_driver);
4469 if (driver_config->config_dev_cnt &&
4470 (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
4471 vxge_debug_init(VXGE_ERR,
4472 "%s: Configured %d of %d devices",
4473 VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
4474 driver_config->total_dev_cnt);
4477 kfree(driver_config);
4485 pci_unregister_driver(&vxge_driver);
4486 kfree(driver_config);
4488 module_init(vxge_starter);
4489 module_exit(vxge_closer);