};
-static const struct pci_device_id bnx2x_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
}
}
-static void bnx2x_free_irq(struct bnx2x *bp)
+static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
{
if (bp->flags & USING_MSIX_FLAG) {
- bnx2x_free_msix_irqs(bp);
+ if (!disable_only)
+ bnx2x_free_msix_irqs(bp);
pci_disable_msix(bp->pdev);
bp->flags &= ~USING_MSIX_FLAG;
} else if (bp->flags & USING_MSI_FLAG) {
- free_irq(bp->pdev->irq, bp->dev);
+ if (!disable_only)
+ free_irq(bp->pdev->irq, bp->dev);
pci_disable_msi(bp->pdev);
bp->flags &= ~USING_MSI_FLAG;
- } else
+ } else if (!disable_only)
free_irq(bp->pdev->irq, bp->dev);
}
rc = bnx2x_set_num_queues(bp);
- if (bnx2x_alloc_mem(bp))
+ if (bnx2x_alloc_mem(bp)) {
+ bnx2x_free_irq(bp, true);
return -ENOMEM;
+ }
for_each_queue(bp, i)
bnx2x_fp(bp, i, disable_tpa) =
if (bp->flags & USING_MSIX_FLAG) {
rc = bnx2x_req_msix_irqs(bp);
if (rc) {
- pci_disable_msix(bp->pdev);
+ bnx2x_free_irq(bp, true);
goto load_error1;
}
} else {
rc = bnx2x_req_irq(bp);
if (rc) {
BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
- if (bp->flags & USING_MSI_FLAG)
- pci_disable_msi(bp->pdev);
+ bnx2x_free_irq(bp, true);
goto load_error1;
}
if (bp->flags & USING_MSI_FLAG) {
rc = bnx2x_init_hw(bp, load_code);
if (rc) {
BNX2X_ERR("HW init failed, aborting\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
goto load_error2;
}
bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
load_error2:
/* Release IRQs */
- bnx2x_free_irq(bp);
+ bnx2x_free_irq(bp, false);
load_error1:
bnx2x_napi_disable(bp);
for_each_queue(bp, i)
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
/* Release IRQs */
- bnx2x_free_irq(bp);
+ bnx2x_free_irq(bp, false);
/* Wait until tx fastpath tasks complete */
for_each_queue(bp, i) {
/* TPA requires Rx CSUM offloading */
if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
- if (!(dev->features & NETIF_F_LRO)) {
- dev->features |= NETIF_F_LRO;
- bp->flags |= TPA_ENABLE_FLAG;
- changed = 1;
- }
-
+ if (!disable_tpa) {
+ if (!(dev->features & NETIF_F_LRO)) {
+ dev->features |= NETIF_F_LRO;
+ bp->flags |= TPA_ENABLE_FLAG;
+ changed = 1;
+ }
+ } else
+ rc = -EINVAL;
} else if (dev->features & NETIF_F_LRO) {
dev->features &= ~NETIF_F_LRO;
bp->flags &= ~TPA_ENABLE_FLAG;
config->hdr.length = 0;
if (CHIP_IS_E1(bp))
- config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
+ /* use last unicast entries */
+ config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
else
config->hdr.offset = BP_FUNC(bp);
config->hdr.client_id = bp->fp->cl_id;
rx_mode = BNX2X_RX_MODE_PROMISC;
else if ((dev->flags & IFF_ALLMULTI) ||
- ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
+ ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
+ CHIP_IS_E1(bp)))
rx_mode = BNX2X_RX_MODE_ALLMULTI;
else { /* some multicasts */
bnx2x_sp(bp, mcast_config);
for (i = 0, mclist = dev->mc_list;
- mclist && (i < dev->mc_count);
+ mclist && (i < netdev_mc_count(dev));
i++, mclist = mclist->next) {
config->config_table[i].
memset(mc_filter, 0, 4 * MC_HASH_SIZE);
for (i = 0, mclist = dev->mc_list;
- mclist && (i < dev->mc_count);
+ mclist && (i < netdev_mc_count(dev));
i++, mclist = mclist->next) {
DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
#endif
-#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
+#ifdef CONFIG_NET_POLL_CONTROLLER
static void poll_bnx2x(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
#ifdef BCM_VLAN
.ndo_vlan_rx_register = bnx2x_vlan_rx_register,
#endif
-#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
+#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = poll_bnx2x,
#endif
};
DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
/* Release IRQs */
- bnx2x_free_irq(bp);
+ bnx2x_free_irq(bp, false);
if (CHIP_IS_E1(bp)) {
struct mac_configuration_cmd *config =