]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/linville...
authorDavid S. Miller <davem@davemloft.net>
Wed, 20 Oct 2010 08:59:48 +0000 (01:59 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 20 Oct 2010 08:59:48 +0000 (01:59 -0700)
98 files changed:
drivers/infiniband/hw/mlx4/Kconfig
drivers/net/3c523.c
drivers/net/3c527.c
drivers/net/Kconfig
drivers/net/bnx2x/bnx2x.h
drivers/net/bnx2x/bnx2x_cmn.c
drivers/net/bnx2x/bnx2x_cmn.h
drivers/net/bnx2x/bnx2x_main.c
drivers/net/bnx2x/bnx2x_reg.h
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_sysfs.c
drivers/net/bonding/bonding.h
drivers/net/can/mcp251x.c
drivers/net/cxgb4vf/t4vf_common.h
drivers/net/dnet.c
drivers/net/e1000e/ethtool.c
drivers/net/igb/igb.h
drivers/net/igb/igb_ethtool.c
drivers/net/igb/igb_main.c
drivers/net/igbvf/netdev.c
drivers/net/netconsole.c
drivers/net/netxen/netxen_nic.h
drivers/net/netxen/netxen_nic_hw.c
drivers/net/netxen/netxen_nic_init.c
drivers/net/netxen/netxen_nic_main.c
drivers/net/niu.c
drivers/net/ns83820.c
drivers/net/pch_gbe/pch_gbe_ethtool.c
drivers/net/pch_gbe/pch_gbe_main.c
drivers/net/qlcnic/qlcnic_ethtool.c
drivers/net/r8169.c
drivers/net/stmmac/common.h
drivers/net/stmmac/dwmac100.h
drivers/net/stmmac/dwmac1000.h
drivers/net/stmmac/dwmac1000_core.c
drivers/net/stmmac/dwmac1000_dma.c
drivers/net/stmmac/dwmac100_core.c
drivers/net/stmmac/dwmac100_dma.c
drivers/net/stmmac/enh_desc.c
drivers/net/stmmac/norm_desc.c
drivers/net/stmmac/stmmac.h
drivers/net/stmmac/stmmac_ethtool.c
drivers/net/sundance.c
drivers/net/tg3.c
drivers/net/tg3.h
drivers/net/via-velocity.c
drivers/net/via-velocity.h
include/linux/can/platform/mcp251x.h
include/linux/netpoll.h
include/linux/skbuff.h
include/net/net_namespace.h
include/net/netns/xfrm.h
include/net/tipc/tipc.h
include/net/tipc/tipc_port.h
net/core/fib_rules.c
net/core/netpoll.c
net/core/skbuff.c
net/ipv4/devinet.c
net/ipv4/fib_frontend.c
net/ipv4/fib_hash.c
net/ipv4/fib_lookup.h
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/igmp.c
net/ipv4/ip_gre.c
net/ipv4/route.c
net/ipv4/tcp_input.c
net/ipv4/tcp_timer.c
net/ipv6/fib6_rules.c
net/ipv6/ip6_fib.c
net/phonet/pep.c
net/tipc/addr.c
net/tipc/bcast.c
net/tipc/bcast.h
net/tipc/bearer.c
net/tipc/cluster.c
net/tipc/cluster.h
net/tipc/config.c
net/tipc/config.h
net/tipc/core.c
net/tipc/core.h
net/tipc/dbg.c
net/tipc/dbg.h
net/tipc/discover.c
net/tipc/discover.h
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.c
net/tipc/name_distr.c
net/tipc/node.c
net/tipc/node.h
net/tipc/port.c
net/tipc/port.h
net/tipc/ref.c
net/tipc/ref.h
net/tipc/subscr.c
net/tipc/zone.c
net/tipc/zone.h

index 4175a4bd0c78841e8d9e121a53a4481069195ef6..bd995b2b50d8da143d721aa4d04683844fda410a 100644 (file)
@@ -1,5 +1,6 @@
 config MLX4_INFINIBAND
        tristate "Mellanox ConnectX HCA support"
+       depends on NETDEVICES && NETDEV_10000 && PCI
        select MLX4_CORE
        ---help---
          This driver provides low-level InfiniBand support for
index ca00f0a1121762b28c0c093c0735096b48ffdded..de579d0431697b400dadeab44cb5d7126795054b 100644 (file)
@@ -287,7 +287,7 @@ static int elmc_open(struct net_device *dev)
 
        elmc_id_attn586();      /* disable interrupts */
 
-       ret = request_irq(dev->irq, elmc_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM,
+       ret = request_irq(dev->irq, elmc_interrupt, IRQF_SHARED,
                          dev->name, dev);
        if (ret) {
                pr_err("%s: couldn't get irq %d\n", dev->name, dev->irq);
index 70705d1306b93e161260852d39f6adf6fe308226..0d6ca1e407d0d9534b616fe277a661d462118b1c 100644 (file)
@@ -443,7 +443,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
         *      Grab the IRQ
         */
 
-       err = request_irq(dev->irq, mc32_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM, DRV_NAME, dev);
+       err = request_irq(dev->irq, mc32_interrupt, IRQF_SHARED, DRV_NAME, dev);
        if (err) {
                release_region(dev->base_addr, MC32_IO_EXTENT);
                pr_err("%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq);
index 13d01f358f341ad60d6a3e2421c42c4b5ab1e0ed..d24f54b8c19a3b73ee5d526eddf2169fa7c2db80 100644 (file)
@@ -177,6 +177,13 @@ config NET_SB1000
 
 source "drivers/net/arcnet/Kconfig"
 
+config MII
+       tristate "Generic Media Independent Interface device support"
+       help
+         Most ethernet controllers have MII transceiver either as an external
+         or internal device.  It is safe to say Y or M here even if your
+         ethernet card lacks MII.
+
 source "drivers/net/phy/Kconfig"
 
 #
@@ -212,13 +219,6 @@ menuconfig NET_ETHERNET
 
 if NET_ETHERNET
 
-config MII
-       tristate "Generic Media Independent Interface device support"
-       help
-         Most ethernet controllers have MII transceiver either as an external
-         or internal device.  It is safe to say Y or M here even if your
-         ethernet card lack MII.
-
 config MACB
        tristate "Atmel MACB support"
        depends on AVR32 || ARCH_AT91SAM9260 || ARCH_AT91SAM9263 || ARCH_AT91SAM9G20 || ARCH_AT91SAM9G45 || ARCH_AT91CAP9
index c49b643e009b89430086f3e041a77a0cd3766edc..3bf236b160dd3efac3aa644c6eeae56066a6714b 100644 (file)
@@ -20,8 +20,8 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.60.00-1"
-#define DRV_MODULE_RELDATE      "2010/10/06"
+#define DRV_MODULE_VERSION      "1.60.00-3"
+#define DRV_MODULE_RELDATE      "2010/10/19"
 #define BNX2X_BC_VER            0x040200
 
 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
@@ -1180,15 +1180,10 @@ struct bnx2x {
        TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
 
 /* func init flags */
-#define FUNC_FLG_RSS           0x0001
-#define FUNC_FLG_STATS         0x0002
-/* removed  FUNC_FLG_UNMATCHED 0x0004 */
-#define FUNC_FLG_TPA           0x0008
-#define FUNC_FLG_SPQ           0x0010
-#define FUNC_FLG_LEADING       0x0020  /* PF only */
-
-#define FUNC_CONFIG(flgs)      ((flgs) & (FUNC_FLG_RSS | FUNC_FLG_TPA | \
-                                       FUNC_FLG_LEADING))
+#define FUNC_FLG_STATS         0x0001
+#define FUNC_FLG_TPA           0x0002
+#define FUNC_FLG_SPQ           0x0004
+#define FUNC_FLG_LEADING       0x0008  /* PF only */
 
 struct rxq_pause_params {
        u16             bd_th_lo;
index 97ef674dcc3471252ce105899afe5f50b170e829..1966ceeefcd4980d1ff11b25e780348c7244cdda 100644 (file)
@@ -507,8 +507,11 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
                        len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
                        pad = cqe->fast_path_cqe.placement_offset;
 
-                       /* If CQE is marked both TPA_START and TPA_END
-                          it is a non-TPA CQE */
+                       /* - If CQE is marked both TPA_START and TPA_END it is
+                        *   a non-TPA CQE.
+                        * - FP CQE will always have either TPA_START or/and
+                        *   TPA_STOP flags set.
+                        */
                        if ((!fp->disable_tpa) &&
                            (TPA_TYPE(cqe_fp_flags) !=
                                        (TPA_TYPE_START | TPA_TYPE_END))) {
@@ -526,9 +529,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
                                        bnx2x_set_skb_rxhash(bp, cqe, skb);
 
                                        goto next_rx;
-                               }
-
-                               if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
+                               } else { /* TPA_STOP */
                                        DP(NETIF_MSG_RX_STATUS,
                                           "calling tpa_stop on queue %d\n",
                                           queue);
@@ -830,7 +831,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
        int i, j;
 
        bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
-               BNX2X_FW_IP_HDR_ALIGN_PAD;
+               IP_HEADER_ALIGNMENT_PADDING;
 
        DP(NETIF_MSG_IFUP,
           "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
@@ -1288,8 +1289,6 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
        if (rc) {
                BNX2X_ERR("HW init failed, aborting\n");
                bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
-               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
-               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
                goto load_error2;
        }
 
@@ -1522,6 +1521,12 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
 {
        u16 pmcsr;
 
+       /* If there is no power capability, silently succeed */
+       if (!bp->pm_cap) {
+               DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
+               return 0;
+       }
+
        pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
 
        switch (state) {
index 7f52cec9bb99d82036fa1594c823c9026812443d..5bfe0ab1d2d4d71cf1f801149f40de07b5f9ffad 100644 (file)
@@ -1032,6 +1032,4 @@ static inline void storm_memset_cmng(struct bnx2x *bp,
 void bnx2x_acquire_phy_lock(struct bnx2x *bp);
 void bnx2x_release_phy_lock(struct bnx2x *bp);
 
-#define BNX2X_FW_IP_HDR_ALIGN_PAD      2 /* FW places hdr with this padding */
-
 #endif /* BNX2X_CMN_H */
index ead524bca8f2893d3c4c8bd884a93a3b9af5edee..f22e283cabef848a163dab1e43defe571178fa72 100644 (file)
@@ -1111,14 +1111,19 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
                        HC_CONFIG_0_REG_INT_LINE_EN_0 |
                        HC_CONFIG_0_REG_ATTN_BIT_EN_0);
 
-               DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
-                  val, port, addr);
+               if (!CHIP_IS_E1(bp)) {
+                       DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
+                          val, port, addr);
 
-               REG_WR(bp, addr, val);
+                       REG_WR(bp, addr, val);
 
-               val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
+                       val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
+               }
        }
 
+       if (CHIP_IS_E1(bp))
+               REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
+
        DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
           val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
 
@@ -1212,10 +1217,26 @@ static void bnx2x_hc_int_disable(struct bnx2x *bp)
        u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
        u32 val = REG_RD(bp, addr);
 
-       val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
-                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
-                HC_CONFIG_0_REG_INT_LINE_EN_0 |
-                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+       /*
+        * in E1 we must use only PCI configuration space to disable
+        * MSI/MSIX capablility
+        * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
+        */
+       if (CHIP_IS_E1(bp)) {
+               /*  Since IGU_PF_CONF_MSI_MSIX_EN still always on
+                *  Use mask register to prevent from HC sending interrupts
+                *  after we exit the function
+                */
+               REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
+
+               val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+                        HC_CONFIG_0_REG_INT_LINE_EN_0 |
+                        HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+       } else
+               val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+                        HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+                        HC_CONFIG_0_REG_INT_LINE_EN_0 |
+                        HC_CONFIG_0_REG_ATTN_BIT_EN_0);
 
        DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
           val, port, addr);
@@ -2284,35 +2305,31 @@ void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
 
 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
 {
-       if (FUNC_CONFIG(p->func_flgs)) {
-               struct tstorm_eth_function_common_config tcfg = {0};
-
-               /* tpa */
-               if (p->func_flgs & FUNC_FLG_TPA)
-                       tcfg.config_flags |=
-                       TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
+       struct tstorm_eth_function_common_config tcfg = {0};
+       u16 rss_flgs;
 
-               /* set rss flags */
-               if (p->func_flgs & FUNC_FLG_RSS) {
-                       u16 rss_flgs = (p->rss->mode <<
-                       TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
+       /* tpa */
+       if (p->func_flgs & FUNC_FLG_TPA)
+               tcfg.config_flags |=
+               TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
 
-                       if (p->rss->cap & RSS_IPV4_CAP)
-                               rss_flgs |= RSS_IPV4_CAP_MASK;
-                       if (p->rss->cap & RSS_IPV4_TCP_CAP)
-                               rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
-                       if (p->rss->cap & RSS_IPV6_CAP)
-                               rss_flgs |= RSS_IPV6_CAP_MASK;
-                       if (p->rss->cap & RSS_IPV6_TCP_CAP)
-                               rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
+       /* set rss flags */
+       rss_flgs = (p->rss->mode <<
+               TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
 
-                       tcfg.config_flags |= rss_flgs;
-                       tcfg.rss_result_mask = p->rss->result_mask;
+       if (p->rss->cap & RSS_IPV4_CAP)
+               rss_flgs |= RSS_IPV4_CAP_MASK;
+       if (p->rss->cap & RSS_IPV4_TCP_CAP)
+               rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
+       if (p->rss->cap & RSS_IPV6_CAP)
+               rss_flgs |= RSS_IPV6_CAP_MASK;
+       if (p->rss->cap & RSS_IPV6_TCP_CAP)
+               rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
 
-               }
+       tcfg.config_flags |= rss_flgs;
+       tcfg.rss_result_mask = p->rss->result_mask;
 
-               storm_memset_func_cfg(bp, &tcfg, p->func_id);
-       }
+       storm_memset_func_cfg(bp, &tcfg, p->func_id);
 
        /* Enable the function in the FW */
        storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
@@ -2479,23 +2496,17 @@ void bnx2x_pf_init(struct bnx2x *bp)
        else
                flags |= FUNC_FLG_TPA;
 
+       /* function setup */
+
        /**
         * Although RSS is meaningless when there is a single HW queue we
         * still need it enabled in order to have HW Rx hash generated.
-        *
-        * if (is_eth_multi(bp))
-        *      flags |= FUNC_FLG_RSS;
         */
-       flags |= FUNC_FLG_RSS;
-
-       /* function setup */
-       if (flags & FUNC_FLG_RSS) {
-               rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
-                          RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
-               rss.mode = bp->multi_mode;
-               rss.result_mask = MULTI_MASK;
-               func_init.rss = &rss;
-       }
+       rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
+                  RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
+       rss.mode = bp->multi_mode;
+       rss.result_mask = MULTI_MASK;
+       func_init.rss = &rss;
 
        func_init.func_flgs = flags;
        func_init.pf_id = BP_FUNC(bp);
@@ -5446,7 +5457,8 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
        struct bnx2x_ilt *ilt = BP_ILT(bp);
        u16 cdu_ilt_start;
        u32 addr, val;
-       int i;
+       u32 main_mem_base, main_mem_size, main_mem_prty_clr;
+       int i, main_mem_width;
 
        DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
 
@@ -5695,6 +5707,31 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
        bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
        bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
 
+       if (CHIP_IS_E1x(bp)) {
+               main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
+               main_mem_base = HC_REG_MAIN_MEMORY +
+                               BP_PORT(bp) * (main_mem_size * 4);
+               main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
+               main_mem_width = 8;
+
+               val = REG_RD(bp, main_mem_prty_clr);
+               if (val)
+                       DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
+                                         "block during "
+                                         "function init (0x%x)!\n", val);
+
+               /* Clear "false" parity errors in MSI-X table */
+               for (i = main_mem_base;
+                    i < main_mem_base + main_mem_size * 4;
+                    i += main_mem_width) {
+                       bnx2x_read_dmae(bp, i, main_mem_width / 4);
+                       bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
+                                        i, main_mem_width / 4);
+               }
+               /* Clear HC parity attention */
+               REG_RD(bp, main_mem_prty_clr);
+       }
+
        bnx2x_phy_probe(&bp->link_params);
 
        return 0;
index 18a86284ebcc4b4b32ceb0f275c5b2792e51152d..1cefe489a9553b62ff6b8b2618efebf9b440ba4e 100644 (file)
 #define HC_REG_HC_PRTY_MASK                                     0x1080a0
 /* [R 3] Parity register #0 read */
 #define HC_REG_HC_PRTY_STS                                      0x108094
-#define HC_REG_INT_MASK                                         0x108108
+/* [RC 3] Parity register #0 read clear */
+#define HC_REG_HC_PRTY_STS_CLR                                  0x108098
+#define HC_REG_INT_MASK                                                 0x108108
 #define HC_REG_LEADING_EDGE_0                                   0x108040
 #define HC_REG_LEADING_EDGE_1                                   0x108048
+#define HC_REG_MAIN_MEMORY                                      0x108800
+#define HC_REG_MAIN_MEMORY_SIZE                                         152
 #define HC_REG_P0_PROD_CONS                                     0x108200
 #define HC_REG_P1_PROD_CONS                                     0x108400
 #define HC_REG_PBA_COMMAND                                      0x108140
index 7703d35de65d6979fba486d6f238f8322bc65d06..6b9a7bd8ec14363226c312b1642180956d2c071f 100644 (file)
@@ -76,6 +76,7 @@
 #include <linux/if_vlan.h>
 #include <linux/if_bonding.h>
 #include <linux/jiffies.h>
+#include <linux/preempt.h>
 #include <net/route.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
@@ -169,6 +170,10 @@ MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on link
 
 /*----------------------------- Global variables ----------------------------*/
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+cpumask_var_t netpoll_block_tx;
+#endif
+
 static const char * const version =
        DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n";
 
@@ -179,9 +184,6 @@ static int arp_ip_count;
 static int bond_mode   = BOND_MODE_ROUNDROBIN;
 static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
 static int lacp_fast;
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static int disable_netpoll = 1;
-#endif
 
 const struct bond_parm_tbl bond_lacp_tbl[] = {
 {      "slow",         AD_LACP_SLOW},
@@ -310,6 +312,7 @@ static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id)
 
        pr_debug("bond: %s, vlan id %d\n", bond->dev->name, vlan_id);
 
+       block_netpoll_tx();
        write_lock_bh(&bond->lock);
 
        list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
@@ -344,6 +347,7 @@ static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id)
 
 out:
        write_unlock_bh(&bond->lock);
+       unblock_netpoll_tx();
        return res;
 }
 
@@ -449,11 +453,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
        if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) {
                struct netpoll *np = bond->dev->npinfo->netpoll;
                slave_dev->npinfo = bond->dev->npinfo;
-               np->real_dev = np->dev = skb->dev;
                slave_dev->priv_flags |= IFF_IN_NETPOLL;
-               netpoll_send_skb(np, skb);
+               netpoll_send_skb_on_dev(np, skb, slave_dev);
                slave_dev->priv_flags &= ~IFF_IN_NETPOLL;
-               np->dev = bond->dev;
        } else
 #endif
                dev_queue_xmit(skb);
@@ -1332,9 +1334,14 @@ static bool slaves_support_netpoll(struct net_device *bond_dev)
 
 static void bond_poll_controller(struct net_device *bond_dev)
 {
-       struct net_device *dev = bond_dev->npinfo->netpoll->real_dev;
-       if (dev != bond_dev)
-               netpoll_poll_dev(dev);
+       struct bonding *bond = netdev_priv(bond_dev);
+       struct slave *slave;
+       int i;
+
+       bond_for_each_slave(bond, slave, i) {
+               if (slave->dev && IS_UP(slave->dev))
+                       netpoll_poll_dev(slave->dev);
+       }
 }
 
 static void bond_netpoll_cleanup(struct net_device *bond_dev)
@@ -1801,23 +1808,15 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        bond_set_carrier(bond);
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-       /*
-        * Netpoll and bonding is broken, make sure it is not initialized
-        * until it is fixed.
-        */
-       if (disable_netpoll) {
+       if (slaves_support_netpoll(bond_dev)) {
+               bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
+               if (bond_dev->npinfo)
+                       slave_dev->npinfo = bond_dev->npinfo;
+       } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) {
                bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
-       } else {
-               if (slaves_support_netpoll(bond_dev)) {
-                       bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
-                       if (bond_dev->npinfo)
-                               slave_dev->npinfo = bond_dev->npinfo;
-               } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) {
-                       bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
-                       pr_info("New slave device %s does not support netpoll\n",
-                               slave_dev->name);
-                       pr_info("Disabling netpoll support for %s\n", bond_dev->name);
-               }
+               pr_info("New slave device %s does not support netpoll\n",
+                       slave_dev->name);
+               pr_info("Disabling netpoll support for %s\n", bond_dev->name);
        }
 #endif
        read_unlock(&bond->lock);
@@ -1889,6 +1888,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
                return -EINVAL;
        }
 
+       block_netpoll_tx();
        netdev_bonding_change(bond_dev, NETDEV_BONDING_DESLAVE);
        write_lock_bh(&bond->lock);
 
@@ -1898,6 +1898,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
                pr_info("%s: %s not enslaved\n",
                        bond_dev->name, slave_dev->name);
                write_unlock_bh(&bond->lock);
+               unblock_netpoll_tx();
                return -EINVAL;
        }
 
@@ -1991,6 +1992,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
        }
 
        write_unlock_bh(&bond->lock);
+       unblock_netpoll_tx();
 
        /* must do this from outside any spinlocks */
        bond_destroy_slave_symlinks(bond_dev, slave_dev);
@@ -2021,10 +2023,8 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
 #ifdef CONFIG_NET_POLL_CONTROLLER
        read_lock_bh(&bond->lock);
 
-        /* Make sure netpoll over stays disabled until fixed. */
-       if (!disable_netpoll)
-               if (slaves_support_netpoll(bond_dev))
-                               bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
+       if (slaves_support_netpoll(bond_dev))
+               bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
        read_unlock_bh(&bond->lock);
        if (slave_dev->netdev_ops->ndo_netpoll_cleanup)
                slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev);
@@ -2180,7 +2180,6 @@ static int bond_release_all(struct net_device *bond_dev)
 
 out:
        write_unlock_bh(&bond->lock);
-
        return 0;
 }
 
@@ -2229,9 +2228,11 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi
            (old_active) &&
            (new_active->link == BOND_LINK_UP) &&
            IS_UP(new_active->dev)) {
+               block_netpoll_tx();
                write_lock_bh(&bond->curr_slave_lock);
                bond_change_active_slave(bond, new_active);
                write_unlock_bh(&bond->curr_slave_lock);
+               unblock_netpoll_tx();
        } else
                res = -EINVAL;
 
@@ -2463,9 +2464,11 @@ static void bond_miimon_commit(struct bonding *bond)
 
 do_failover:
                ASSERT_RTNL();
+               block_netpoll_tx();
                write_lock_bh(&bond->curr_slave_lock);
                bond_select_active_slave(bond);
                write_unlock_bh(&bond->curr_slave_lock);
+               unblock_netpoll_tx();
        }
 
        bond_set_carrier(bond);
@@ -2908,11 +2911,13 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
        }
 
        if (do_failover) {
+               block_netpoll_tx();
                write_lock_bh(&bond->curr_slave_lock);
 
                bond_select_active_slave(bond);
 
                write_unlock_bh(&bond->curr_slave_lock);
+               unblock_netpoll_tx();
        }
 
 re_arm:
@@ -3071,9 +3076,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
 
 do_failover:
                ASSERT_RTNL();
+               block_netpoll_tx();
                write_lock_bh(&bond->curr_slave_lock);
                bond_select_active_slave(bond);
                write_unlock_bh(&bond->curr_slave_lock);
+               unblock_netpoll_tx();
        }
 
        bond_set_carrier(bond);
@@ -4561,6 +4568,13 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct bonding *bond = netdev_priv(dev);
 
+       /*
+        * If we risk deadlock from transmitting this in the
+        * netpoll path, tell netpoll to queue the frame for later tx
+        */
+       if (is_netpoll_tx_blocked(dev))
+               return NETDEV_TX_BUSY;
+
        if (TX_QUEUE_OVERRIDE(bond->params.mode)) {
                if (!bond_slave_override(bond, skb))
                        return NETDEV_TX_OK;
@@ -5283,6 +5297,13 @@ static int __init bonding_init(void)
        if (res)
                goto out;
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       if (!alloc_cpumask_var(&netpoll_block_tx, GFP_KERNEL)) {
+               res = -ENOMEM;
+               goto out;
+       }
+#endif
+
        res = register_pernet_subsys(&bond_net_ops);
        if (res)
                goto out;
@@ -5301,6 +5322,7 @@ static int __init bonding_init(void)
        if (res)
                goto err;
 
+
        register_netdevice_notifier(&bond_netdev_notifier);
        register_inetaddr_notifier(&bond_inetaddr_notifier);
        bond_register_ipv6_notifier();
@@ -5310,6 +5332,9 @@ err:
        rtnl_link_unregister(&bond_link_ops);
 err_link:
        unregister_pernet_subsys(&bond_net_ops);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       free_cpumask_var(netpoll_block_tx);
+#endif
        goto out;
 
 }
@@ -5324,6 +5349,10 @@ static void __exit bonding_exit(void)
 
        rtnl_link_unregister(&bond_link_ops);
        unregister_pernet_subsys(&bond_net_ops);
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       free_cpumask_var(netpoll_block_tx);
+#endif
 }
 
 module_init(bonding_init);
index 01b4c3f5d9e743d29101fca7668853ec23a3aecf..8fd0174c5380499f5a87178ca2a6024c4cc124e9 100644 (file)
@@ -1066,6 +1066,7 @@ static ssize_t bonding_store_primary(struct device *d,
 
        if (!rtnl_trylock())
                return restart_syscall();
+       block_netpoll_tx();
        read_lock(&bond->lock);
        write_lock_bh(&bond->curr_slave_lock);
 
@@ -1101,6 +1102,7 @@ static ssize_t bonding_store_primary(struct device *d,
 out:
        write_unlock_bh(&bond->curr_slave_lock);
        read_unlock(&bond->lock);
+       unblock_netpoll_tx();
        rtnl_unlock();
 
        return count;
@@ -1146,11 +1148,13 @@ static ssize_t bonding_store_primary_reselect(struct device *d,
                bond->dev->name, pri_reselect_tbl[new_value].modename,
                new_value);
 
+       block_netpoll_tx();
        read_lock(&bond->lock);
        write_lock_bh(&bond->curr_slave_lock);
        bond_select_active_slave(bond);
        write_unlock_bh(&bond->curr_slave_lock);
        read_unlock(&bond->lock);
+       unblock_netpoll_tx();
 out:
        rtnl_unlock();
        return ret;
@@ -1232,6 +1236,8 @@ static ssize_t bonding_store_active_slave(struct device *d,
 
        if (!rtnl_trylock())
                return restart_syscall();
+
+       block_netpoll_tx();
        read_lock(&bond->lock);
        write_lock_bh(&bond->curr_slave_lock);
 
@@ -1288,6 +1294,8 @@ static ssize_t bonding_store_active_slave(struct device *d,
  out:
        write_unlock_bh(&bond->curr_slave_lock);
        read_unlock(&bond->lock);
+       unblock_netpoll_tx();
+
        rtnl_unlock();
 
        return count;
index c15f21347486586322c95c36316a88d2669d76ac..2c12a5f812f4b015d260127d4606234559832742 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/proc_fs.h>
 #include <linux/if_bonding.h>
 #include <linux/kobject.h>
+#include <linux/cpumask.h>
 #include <linux/in6.h>
 #include "bond_3ad.h"
 #include "bond_alb.h"
                bond_for_each_slave_from(bond, pos, cnt, (bond)->first_slave)
 
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+extern cpumask_var_t netpoll_block_tx;
+
+static inline void block_netpoll_tx(void)
+{
+       preempt_disable();
+       BUG_ON(cpumask_test_and_set_cpu(smp_processor_id(),
+                                       netpoll_block_tx));
+}
+
+static inline void unblock_netpoll_tx(void)
+{
+       BUG_ON(!cpumask_test_and_clear_cpu(smp_processor_id(),
+                                          netpoll_block_tx));
+       preempt_enable();
+}
+
+static inline int is_netpoll_tx_blocked(struct net_device *dev)
+{
+       if (unlikely(dev->priv_flags & IFF_IN_NETPOLL))
+               return cpumask_test_cpu(smp_processor_id(), netpoll_block_tx);
+       return 0;
+}
+#else
+#define block_netpoll_tx()
+#define unblock_netpoll_tx()
+#define is_netpoll_tx_blocked(dev) (0)
+#endif
+
 struct bond_params {
        int mode;
        int xmit_policy;
index b11a0cb5ed81eb9ffabaa067dcdc544edabadc22..c664be261e98b1bcb93eda8bea211ecb574adbfb 100644 (file)
  * static struct mcp251x_platform_data mcp251x_info = {
  *         .oscillator_frequency = 8000000,
  *         .board_specific_setup = &mcp251x_setup,
- *         .model = CAN_MCP251X_MCP2510,
  *         .power_enable = mcp251x_power_enable,
  *         .transceiver_enable = NULL,
  * };
  *
  * static struct spi_board_info spi_board_info[] = {
  *         {
- *                 .modalias = "mcp251x",
+ *                 .modalias = "mcp2510",
+ *                     // or "mcp2515" depending on your controller
  *                 .platform_data = &mcp251x_info,
  *                 .irq = IRQ_EINT13,
  *                 .max_speed_hz = 2*1000*1000,
 #  define CANINTF_TX0IF 0x04
 #  define CANINTF_RX1IF 0x02
 #  define CANINTF_RX0IF 0x01
+#  define CANINTF_ERR_TX \
+       (CANINTF_ERRIF | CANINTF_TX2IF | CANINTF_TX1IF | CANINTF_TX0IF)
 #define EFLG         0x2d
 #  define EFLG_EWARN   0x01
 #  define EFLG_RXWAR   0x02
@@ -222,10 +224,16 @@ static struct can_bittiming_const mcp251x_bittiming_const = {
        .brp_inc = 1,
 };
 
+enum mcp251x_model {
+       CAN_MCP251X_MCP2510     = 0x2510,
+       CAN_MCP251X_MCP2515     = 0x2515,
+};
+
 struct mcp251x_priv {
        struct can_priv    can;
        struct net_device *net;
        struct spi_device *spi;
+       enum mcp251x_model model;
 
        struct mutex mcp_lock; /* SPI device lock */
 
@@ -250,6 +258,16 @@ struct mcp251x_priv {
        int restart_tx;
 };
 
+#define MCP251X_IS(_model) \
+static inline int mcp251x_is_##_model(struct spi_device *spi) \
+{ \
+       struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); \
+       return priv->model == CAN_MCP251X_MCP##_model; \
+}
+
+MCP251X_IS(2510);
+MCP251X_IS(2515);
+
 static void mcp251x_clean(struct net_device *net)
 {
        struct mcp251x_priv *priv = netdev_priv(net);
@@ -319,6 +337,20 @@ static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
        return val;
 }
 
+static void mcp251x_read_2regs(struct spi_device *spi, uint8_t reg,
+               uint8_t *v1, uint8_t *v2)
+{
+       struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
+
+       priv->spi_tx_buf[0] = INSTRUCTION_READ;
+       priv->spi_tx_buf[1] = reg;
+
+       mcp251x_spi_trans(spi, 4);
+
+       *v1 = priv->spi_rx_buf[2];
+       *v2 = priv->spi_rx_buf[3];
+}
+
 static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
 {
        struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
@@ -346,10 +378,9 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
 static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
                                int len, int tx_buf_idx)
 {
-       struct mcp251x_platform_data *pdata = spi->dev.platform_data;
        struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
 
-       if (pdata->model == CAN_MCP251X_MCP2510) {
+       if (mcp251x_is_2510(spi)) {
                int i;
 
                for (i = 1; i < TXBDAT_OFF + len; i++)
@@ -392,9 +423,8 @@ static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
                                int buf_idx)
 {
        struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
-       struct mcp251x_platform_data *pdata = spi->dev.platform_data;
 
-       if (pdata->model == CAN_MCP251X_MCP2510) {
+       if (mcp251x_is_2510(spi)) {
                int i, len;
 
                for (i = 1; i < RXBDAT_OFF; i++)
@@ -451,7 +481,7 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
 
        priv->net->stats.rx_packets++;
        priv->net->stats.rx_bytes += frame->can_dlc;
-       netif_rx(skb);
+       netif_rx_ni(skb);
 }
 
 static void mcp251x_hw_sleep(struct spi_device *spi)
@@ -676,7 +706,7 @@ static void mcp251x_error_skb(struct net_device *net, int can_id, int data1)
        if (skb) {
                frame->can_id = can_id;
                frame->data[1] = data1;
-               netif_rx(skb);
+               netif_rx_ni(skb);
        } else {
                dev_err(&net->dev,
                        "cannot allocate error skb\n");
@@ -754,24 +784,39 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
        mutex_lock(&priv->mcp_lock);
        while (!priv->force_quit) {
                enum can_state new_state;
-               u8 intf = mcp251x_read_reg(spi, CANINTF);
-               u8 eflag;
+               u8 intf, eflag;
+               u8 clear_intf = 0;
                int can_id = 0, data1 = 0;
 
+               mcp251x_read_2regs(spi, CANINTF, &intf, &eflag);
+
+               /* receive buffer 0 */
                if (intf & CANINTF_RX0IF) {
                        mcp251x_hw_rx(spi, 0);
-                       /* Free one buffer ASAP */
-                       mcp251x_write_bits(spi, CANINTF, intf & CANINTF_RX0IF,
-                                          0x00);
+                       /*
+                        * Free one buffer ASAP
+                        * (The MCP2515 does this automatically.)
+                        */
+                       if (mcp251x_is_2510(spi))
+                               mcp251x_write_bits(spi, CANINTF, CANINTF_RX0IF, 0x00);
                }
 
-               if (intf & CANINTF_RX1IF)
+               /* receive buffer 1 */
+               if (intf & CANINTF_RX1IF) {
                        mcp251x_hw_rx(spi, 1);
+                       /* the MCP2515 does this automatically */
+                       if (mcp251x_is_2510(spi))
+                               clear_intf |= CANINTF_RX1IF;
+               }
 
-               mcp251x_write_bits(spi, CANINTF, intf, 0x00);
+               /* any error or tx interrupt we need to clear? */
+               if (intf & CANINTF_ERR_TX)
+                       clear_intf |= intf & CANINTF_ERR_TX;
+               if (clear_intf)
+                       mcp251x_write_bits(spi, CANINTF, clear_intf, 0x00);
 
-               eflag = mcp251x_read_reg(spi, EFLG);
-               mcp251x_write_reg(spi, EFLG, 0x00);
+               if (eflag)
+                       mcp251x_write_bits(spi, EFLG, eflag, 0x00);
 
                /* Update can state */
                if (eflag & EFLG_TXBO) {
@@ -816,10 +861,14 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
                if (intf & CANINTF_ERRIF) {
                        /* Handle overflow counters */
                        if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) {
-                               if (eflag & EFLG_RX0OVR)
+                               if (eflag & EFLG_RX0OVR) {
                                        net->stats.rx_over_errors++;
-                               if (eflag & EFLG_RX1OVR)
+                                       net->stats.rx_errors++;
+                               }
+                               if (eflag & EFLG_RX1OVR) {
                                        net->stats.rx_over_errors++;
+                                       net->stats.rx_errors++;
+                               }
                                can_id |= CAN_ERR_CRTL;
                                data1 |= CAN_ERR_CRTL_RX_OVERFLOW;
                        }
@@ -921,16 +970,12 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
        struct net_device *net;
        struct mcp251x_priv *priv;
        struct mcp251x_platform_data *pdata = spi->dev.platform_data;
-       int model = spi_get_device_id(spi)->driver_data;
        int ret = -ENODEV;
 
        if (!pdata)
                /* Platform data is required for osc freq */
                goto error_out;
 
-       if (model)
-               pdata->model = model;
-
        /* Allocate can/net device */
        net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX);
        if (!net) {
@@ -947,6 +992,7 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
        priv->can.clock.freq = pdata->oscillator_frequency / 2;
        priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
                CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
+       priv->model = spi_get_device_id(spi)->driver_data;
        priv->net = net;
        dev_set_drvdata(&spi->dev, priv);
 
@@ -1120,8 +1166,7 @@ static int mcp251x_can_resume(struct spi_device *spi)
 #define mcp251x_can_resume NULL
 #endif
 
-static struct spi_device_id mcp251x_id_table[] = {
-       { "mcp251x",    0 /* Use pdata.model */ },
+static const struct spi_device_id mcp251x_id_table[] = {
        { "mcp2510",    CAN_MCP251X_MCP2510 },
        { "mcp2515",    CAN_MCP251X_MCP2515 },
        { },
index 5c7bde7f9baeb18268c80f04e63cdcf951fe328c..873cb7d86c57fc50ae3c71d6638c5e2b58e2ca07 100644 (file)
@@ -132,15 +132,15 @@ struct rss_params {
        unsigned int mode;              /* RSS mode */
        union {
            struct {
-               int synmapen:1;         /* SYN Map Enable */
-               int syn4tupenipv6:1;    /* enable hashing 4-tuple IPv6 SYNs */
-               int syn2tupenipv6:1;    /* enable hashing 2-tuple IPv6 SYNs */
-               int syn4tupenipv4:1;    /* enable hashing 4-tuple IPv4 SYNs */
-               int syn2tupenipv4:1;    /* enable hashing 2-tuple IPv4 SYNs */
-               int ofdmapen:1;         /* Offload Map Enable */
-               int tnlmapen:1;         /* Tunnel Map Enable */
-               int tnlalllookup:1;     /* Tunnel All Lookup */
-               int hashtoeplitz:1;     /* use Toeplitz hash */
+               unsigned int synmapen:1;        /* SYN Map Enable */
+               unsigned int syn4tupenipv6:1;   /* enable hashing 4-tuple IPv6 SYNs */
+               unsigned int syn2tupenipv6:1;   /* enable hashing 2-tuple IPv6 SYNs */
+               unsigned int syn4tupenipv4:1;   /* enable hashing 4-tuple IPv4 SYNs */
+               unsigned int syn2tupenipv4:1;   /* enable hashing 2-tuple IPv4 SYNs */
+               unsigned int ofdmapen:1;        /* Offload Map Enable */
+               unsigned int tnlmapen:1;        /* Tunnel Map Enable */
+               unsigned int tnlalllookup:1;    /* Tunnel All Lookup */
+               unsigned int hashtoeplitz:1;    /* use Toeplitz hash */
            } basicvirtual;
        } u;
 };
@@ -151,10 +151,10 @@ struct rss_params {
 union rss_vi_config {
     struct {
        u16 defaultq;                   /* Ingress Queue ID for !tnlalllookup */
-       int ip6fourtupen:1;             /* hash 4-tuple IPv6 ingress packets */
-       int ip6twotupen:1;              /* hash 2-tuple IPv6 ingress packets */
-       int ip4fourtupen:1;             /* hash 4-tuple IPv4 ingress packets */
-       int ip4twotupen:1;              /* hash 2-tuple IPv4 ingress packets */
+       unsigned int ip6fourtupen:1;    /* hash 4-tuple IPv6 ingress packets */
+       unsigned int ip6twotupen:1;     /* hash 2-tuple IPv6 ingress packets */
+       unsigned int ip4fourtupen:1;    /* hash 4-tuple IPv4 ingress packets */
+       unsigned int ip4twotupen:1;     /* hash 2-tuple IPv4 ingress packets */
        int udpen;                      /* hash 4-tuple UDP ingress packets */
     } basicvirtual;
 };
index 7c075756611ad9d20e752a99105ead383524d342..9d8a20b72fa9ecdaa694481f1c4151a5dd51ce3d 100644 (file)
@@ -27,7 +27,7 @@
 #undef DEBUG
 
 /* function for reading internal MAC register */
-u16 dnet_readw_mac(struct dnet *bp, u16 reg)
+static u16 dnet_readw_mac(struct dnet *bp, u16 reg)
 {
        u16 data_read;
 
@@ -46,7 +46,7 @@ u16 dnet_readw_mac(struct dnet *bp, u16 reg)
 }
 
 /* function for writing internal MAC register */
-void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val)
+static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val)
 {
        /* load data to write */
        dnet_writel(bp, val, MACREG_DATA);
@@ -63,11 +63,11 @@ static void __dnet_set_hwaddr(struct dnet *bp)
 {
        u16 tmp;
 
-       tmp = cpu_to_be16(*((u16 *) bp->dev->dev_addr));
+       tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr);
        dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp);
-       tmp = cpu_to_be16(*((u16 *) (bp->dev->dev_addr + 2)));
+       tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2));
        dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp);
-       tmp = cpu_to_be16(*((u16 *) (bp->dev->dev_addr + 4)));
+       tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4));
        dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp);
 }
 
@@ -89,11 +89,11 @@ static void __devinit dnet_get_hwaddr(struct dnet *bp)
         * Mac_addr[15:0]).
         */
        tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG);
-       *((u16 *) addr) = be16_to_cpu(tmp);
+       *((__be16 *)addr) = cpu_to_be16(tmp);
        tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG);
-       *((u16 *) (addr + 2)) = be16_to_cpu(tmp);
+       *((__be16 *)(addr + 2)) = cpu_to_be16(tmp);
        tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG);
-       *((u16 *) (addr + 4)) = be16_to_cpu(tmp);
+       *((__be16 *)(addr + 4)) = cpu_to_be16(tmp);
 
        if (is_valid_ether_addr(addr))
                memcpy(bp->dev->dev_addr, addr, sizeof(addr));
@@ -361,7 +361,7 @@ err_out:
 }
 
 /* For Neptune board: LINK1000 as Link LED and TX as activity LED */
-int dnet_phy_marvell_fixup(struct phy_device *phydev)
+static int dnet_phy_marvell_fixup(struct phy_device *phydev)
 {
        return phy_write(phydev, 0x18, 0x4148);
 }
index b7f15b3f0e0332c59f6ecfaab012cd51084ed4a9..8984d165a39b8bab735d718b363c9e7e97f48dd2 100644 (file)
@@ -1717,13 +1717,6 @@ static void e1000_diag_test(struct net_device *netdev,
 
                e_info("offline testing starting\n");
 
-               /*
-                * Link test performed before hardware reset so autoneg doesn't
-                * interfere with test result
-                */
-               if (e1000_link_test(adapter, &data[4]))
-                       eth_test->flags |= ETH_TEST_FL_FAILED;
-
                if (if_running)
                        /* indicate we're in test mode */
                        dev_close(netdev);
@@ -1747,15 +1740,19 @@ static void e1000_diag_test(struct net_device *netdev,
                if (e1000_loopback_test(adapter, &data[3]))
                        eth_test->flags |= ETH_TEST_FL_FAILED;
 
+               /* force this routine to wait until autoneg complete/timeout */
+               adapter->hw.phy.autoneg_wait_to_complete = 1;
+               e1000e_reset(adapter);
+               adapter->hw.phy.autoneg_wait_to_complete = 0;
+
+               if (e1000_link_test(adapter, &data[4]))
+                       eth_test->flags |= ETH_TEST_FL_FAILED;
+
                /* restore speed, duplex, autoneg settings */
                adapter->hw.phy.autoneg_advertised = autoneg_advertised;
                adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
                adapter->hw.mac.autoneg = autoneg;
-
-               /* force this routine to wait until autoneg complete/timeout */
-               adapter->hw.phy.autoneg_wait_to_complete = 1;
                e1000e_reset(adapter);
-               adapter->hw.phy.autoneg_wait_to_complete = 0;
 
                clear_bit(__E1000_TESTING, &adapter->state);
                if (if_running)
index 44e0ff1494e08f37395d18a4827ea31db06935aa..edab9c442399448bc6f930d5cae1a3a7e75ffd61 100644 (file)
@@ -159,6 +159,7 @@ struct igb_tx_queue_stats {
        u64 packets;
        u64 bytes;
        u64 restart_queue;
+       u64 restart_queue2;
 };
 
 struct igb_rx_queue_stats {
@@ -210,11 +211,14 @@ struct igb_ring {
                /* TX */
                struct {
                        struct igb_tx_queue_stats tx_stats;
+                       struct u64_stats_sync tx_syncp;
+                       struct u64_stats_sync tx_syncp2;
                        bool detect_tx_hung;
                };
                /* RX */
                struct {
                        struct igb_rx_queue_stats rx_stats;
+                       struct u64_stats_sync rx_syncp;
                        u32 rx_buffer_len;
                };
        };
@@ -288,6 +292,9 @@ struct igb_adapter {
        struct timecompare compare;
        struct hwtstamp_config hwtstamp_config;
 
+       spinlock_t stats64_lock;
+       struct rtnl_link_stats64 stats64;
+
        /* structs defined in e1000_hw.h */
        struct e1000_hw hw;
        struct e1000_hw_stats stats;
@@ -357,7 +364,7 @@ extern netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, struct igb_ring *);
 extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
                                           struct igb_buffer *);
 extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
-extern void igb_update_stats(struct igb_adapter *);
+extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
 extern bool igb_has_link(struct igb_adapter *adapter);
 extern void igb_set_ethtool_ops(struct net_device *);
 extern void igb_power_up_link(struct igb_adapter *);
index 26bf6a13d1c1a6e40beb4f1b8ec9c207fb501e1a..a70e16bcfa7e3fb58ac34205c3540cb306957cd7 100644 (file)
@@ -90,8 +90,8 @@ static const struct igb_stats igb_gstrings_stats[] = {
 
 #define IGB_NETDEV_STAT(_net_stat) { \
        .stat_string = __stringify(_net_stat), \
-       .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
-       .stat_offset = offsetof(struct net_device_stats, _net_stat) \
+       .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \
+       .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \
 }
 static const struct igb_stats igb_gstrings_net_stats[] = {
        IGB_NETDEV_STAT(rx_errors),
@@ -111,8 +111,9 @@ static const struct igb_stats igb_gstrings_net_stats[] = {
        (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats))
 #define IGB_RX_QUEUE_STATS_LEN \
        (sizeof(struct igb_rx_queue_stats) / sizeof(u64))
-#define IGB_TX_QUEUE_STATS_LEN \
-       (sizeof(struct igb_tx_queue_stats) / sizeof(u64))
+
+#define IGB_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */
+
 #define IGB_QUEUE_STATS_LEN \
        ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
          IGB_RX_QUEUE_STATS_LEN) + \
@@ -2070,12 +2071,14 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
                                  struct ethtool_stats *stats, u64 *data)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
-       struct net_device_stats *net_stats = &netdev->stats;
-       u64 *queue_stat;
-       int i, j, k;
+       struct rtnl_link_stats64 *net_stats = &adapter->stats64;
+       unsigned int start;
+       struct igb_ring *ring;
+       int i, j;
        char *p;
 
-       igb_update_stats(adapter);
+       spin_lock(&adapter->stats64_lock);
+       igb_update_stats(adapter, net_stats);
 
        for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
                p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
@@ -2088,15 +2091,36 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
                        sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
        }
        for (j = 0; j < adapter->num_tx_queues; j++) {
-               queue_stat = (u64 *)&adapter->tx_ring[j]->tx_stats;
-               for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++)
-                       data[i] = queue_stat[k];
+               u64     restart2;
+
+               ring = adapter->tx_ring[j];
+               do {
+                       start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
+                       data[i]   = ring->tx_stats.packets;
+                       data[i+1] = ring->tx_stats.bytes;
+                       data[i+2] = ring->tx_stats.restart_queue;
+               } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
+               do {
+                       start = u64_stats_fetch_begin_bh(&ring->tx_syncp2);
+                       restart2  = ring->tx_stats.restart_queue2;
+               } while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start));
+               data[i+2] += restart2;
+
+               i += IGB_TX_QUEUE_STATS_LEN;
        }
        for (j = 0; j < adapter->num_rx_queues; j++) {
-               queue_stat = (u64 *)&adapter->rx_ring[j]->rx_stats;
-               for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++)
-                       data[i] = queue_stat[k];
+               ring = adapter->rx_ring[j];
+               do {
+                       start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
+                       data[i]   = ring->rx_stats.packets;
+                       data[i+1] = ring->rx_stats.bytes;
+                       data[i+2] = ring->rx_stats.drops;
+                       data[i+3] = ring->rx_stats.csum_err;
+                       data[i+4] = ring->rx_stats.alloc_failed;
+               } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
+               i += IGB_RX_QUEUE_STATS_LEN;
        }
+       spin_unlock(&adapter->stats64_lock);
 }
 
 static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
index 5b04eff2fd23458eee1ef7b7098194db754ab573..b8dccc0ac089780b1fc8c6b445f9f2acde132221 100644 (file)
@@ -96,7 +96,6 @@ static int igb_setup_all_rx_resources(struct igb_adapter *);
 static void igb_free_all_tx_resources(struct igb_adapter *);
 static void igb_free_all_rx_resources(struct igb_adapter *);
 static void igb_setup_mrqc(struct igb_adapter *);
-void igb_update_stats(struct igb_adapter *);
 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
 static void __devexit igb_remove(struct pci_dev *pdev);
 static int igb_sw_init(struct igb_adapter *);
@@ -113,7 +112,8 @@ static void igb_update_phy_info(unsigned long);
 static void igb_watchdog(unsigned long);
 static void igb_watchdog_task(struct work_struct *);
 static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
-static struct net_device_stats *igb_get_stats(struct net_device *);
+static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
+                                                struct rtnl_link_stats64 *stats);
 static int igb_change_mtu(struct net_device *, int);
 static int igb_set_mac(struct net_device *, void *);
 static void igb_set_uta(struct igb_adapter *adapter);
@@ -1536,7 +1536,9 @@ void igb_down(struct igb_adapter *adapter)
        netif_carrier_off(netdev);
 
        /* record the stats before reset*/
-       igb_update_stats(adapter);
+       spin_lock(&adapter->stats64_lock);
+       igb_update_stats(adapter, &adapter->stats64);
+       spin_unlock(&adapter->stats64_lock);
 
        adapter->link_speed = 0;
        adapter->link_duplex = 0;
@@ -1689,7 +1691,7 @@ static const struct net_device_ops igb_netdev_ops = {
        .ndo_open               = igb_open,
        .ndo_stop               = igb_close,
        .ndo_start_xmit         = igb_xmit_frame_adv,
-       .ndo_get_stats          = igb_get_stats,
+       .ndo_get_stats64        = igb_get_stats64,
        .ndo_set_rx_mode        = igb_set_rx_mode,
        .ndo_set_multicast_list = igb_set_rx_mode,
        .ndo_set_mac_address    = igb_set_mac,
@@ -2276,6 +2278,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
        adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
        adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 
+       spin_lock_init(&adapter->stats64_lock);
 #ifdef CONFIG_PCI_IOV
        if (hw->mac.type == e1000_82576)
                adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs;
@@ -3483,7 +3486,9 @@ static void igb_watchdog_task(struct work_struct *work)
                }
        }
 
-       igb_update_stats(adapter);
+       spin_lock(&adapter->stats64_lock);
+       igb_update_stats(adapter, &adapter->stats64);
+       spin_unlock(&adapter->stats64_lock);
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
                struct igb_ring *tx_ring = adapter->tx_ring[i];
@@ -3550,6 +3555,8 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
        int new_val = q_vector->itr_val;
        int avg_wire_size = 0;
        struct igb_adapter *adapter = q_vector->adapter;
+       struct igb_ring *ring;
+       unsigned int packets;
 
        /* For non-gigabit speeds, just fix the interrupt rate at 4000
         * ints/sec - ITR timer value of 120 ticks.
@@ -3559,16 +3566,21 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
                goto set_itr_val;
        }
 
-       if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
-               struct igb_ring *ring = q_vector->rx_ring;
-               avg_wire_size = ring->total_bytes / ring->total_packets;
+       ring = q_vector->rx_ring;
+       if (ring) {
+               packets = ACCESS_ONCE(ring->total_packets);
+
+               if (packets)
+                       avg_wire_size = ring->total_bytes / packets;
        }
 
-       if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
-               struct igb_ring *ring = q_vector->tx_ring;
-               avg_wire_size = max_t(u32, avg_wire_size,
-                                     (ring->total_bytes /
-                                      ring->total_packets));
+       ring = q_vector->tx_ring;
+       if (ring) {
+               packets = ACCESS_ONCE(ring->total_packets);
+
+               if (packets)
+                       avg_wire_size = max_t(u32, avg_wire_size,
+                                             ring->total_bytes / packets);
        }
 
        /* if avg_wire_size isn't set no work was done */
@@ -4077,7 +4089,11 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
 
        /* A reprieve! */
        netif_wake_subqueue(netdev, tx_ring->queue_index);
-       tx_ring->tx_stats.restart_queue++;
+
+       u64_stats_update_begin(&tx_ring->tx_syncp2);
+       tx_ring->tx_stats.restart_queue2++;
+       u64_stats_update_end(&tx_ring->tx_syncp2);
+
        return 0;
 }
 
@@ -4214,16 +4230,22 @@ static void igb_reset_task(struct work_struct *work)
 }
 
 /**
- * igb_get_stats - Get System Network Statistics
+ * igb_get_stats64 - Get System Network Statistics
  * @netdev: network interface device structure
+ * @stats: rtnl_link_stats64 pointer
  *
- * Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
  **/
-static struct net_device_stats *igb_get_stats(struct net_device *netdev)
+static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
+                                                struct rtnl_link_stats64 *stats)
 {
-       /* only return the current stats */
-       return &netdev->stats;
+       struct igb_adapter *adapter = netdev_priv(netdev);
+
+       spin_lock(&adapter->stats64_lock);
+       igb_update_stats(adapter, &adapter->stats64);
+       memcpy(stats, &adapter->stats64, sizeof(*stats));
+       spin_unlock(&adapter->stats64_lock);
+
+       return stats;
 }
 
 /**
@@ -4305,15 +4327,17 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
  * @adapter: board private structure
  **/
 
-void igb_update_stats(struct igb_adapter *adapter)
+void igb_update_stats(struct igb_adapter *adapter,
+                     struct rtnl_link_stats64 *net_stats)
 {
-       struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
        struct e1000_hw *hw = &adapter->hw;
        struct pci_dev *pdev = adapter->pdev;
        u32 reg, mpc;
        u16 phy_tmp;
        int i;
        u64 bytes, packets;
+       unsigned int start;
+       u64 _bytes, _packets;
 
 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
 
@@ -4331,10 +4355,17 @@ void igb_update_stats(struct igb_adapter *adapter)
        for (i = 0; i < adapter->num_rx_queues; i++) {
                u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
                struct igb_ring *ring = adapter->rx_ring[i];
+
                ring->rx_stats.drops += rqdpc_tmp;
                net_stats->rx_fifo_errors += rqdpc_tmp;
-               bytes += ring->rx_stats.bytes;
-               packets += ring->rx_stats.packets;
+
+               do {
+                       start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
+                       _bytes = ring->rx_stats.bytes;
+                       _packets = ring->rx_stats.packets;
+               } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
+               bytes += _bytes;
+               packets += _packets;
        }
 
        net_stats->rx_bytes = bytes;
@@ -4344,8 +4375,13 @@ void igb_update_stats(struct igb_adapter *adapter)
        packets = 0;
        for (i = 0; i < adapter->num_tx_queues; i++) {
                struct igb_ring *ring = adapter->tx_ring[i];
-               bytes += ring->tx_stats.bytes;
-               packets += ring->tx_stats.packets;
+               do {
+                       start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
+                       _bytes = ring->tx_stats.bytes;
+                       _packets = ring->tx_stats.packets;
+               } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
+               bytes += _bytes;
+               packets += _packets;
        }
        net_stats->tx_bytes = bytes;
        net_stats->tx_packets = packets;
@@ -5397,7 +5433,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
                if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
                    !(test_bit(__IGB_DOWN, &adapter->state))) {
                        netif_wake_subqueue(netdev, tx_ring->queue_index);
+
+                       u64_stats_update_begin(&tx_ring->tx_syncp);
                        tx_ring->tx_stats.restart_queue++;
+                       u64_stats_update_end(&tx_ring->tx_syncp);
                }
        }
 
@@ -5437,8 +5476,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
        }
        tx_ring->total_bytes += total_bytes;
        tx_ring->total_packets += total_packets;
+       u64_stats_update_begin(&tx_ring->tx_syncp);
        tx_ring->tx_stats.bytes += total_bytes;
        tx_ring->tx_stats.packets += total_packets;
+       u64_stats_update_end(&tx_ring->tx_syncp);
        return count < tx_ring->count;
 }
 
@@ -5480,9 +5521,11 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring,
                 * packets, (aka let the stack check the crc32c)
                 */
                if ((skb->len == 60) &&
-                   (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
+                   (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) {
+                       u64_stats_update_begin(&ring->rx_syncp);
                        ring->rx_stats.csum_err++;
-
+                       u64_stats_update_end(&ring->rx_syncp);
+               }
                /* let the stack verify checksum errors */
                return;
        }
@@ -5669,8 +5712,10 @@ next_desc:
 
        rx_ring->total_packets += total_packets;
        rx_ring->total_bytes += total_bytes;
+       u64_stats_update_begin(&rx_ring->rx_syncp);
        rx_ring->rx_stats.packets += total_packets;
        rx_ring->rx_stats.bytes += total_bytes;
+       u64_stats_update_end(&rx_ring->rx_syncp);
        return cleaned;
 }
 
@@ -5698,8 +5743,10 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
                if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
                        if (!buffer_info->page) {
                                buffer_info->page = netdev_alloc_page(netdev);
-                               if (!buffer_info->page) {
+                               if (unlikely(!buffer_info->page)) {
+                                       u64_stats_update_begin(&rx_ring->rx_syncp);
                                        rx_ring->rx_stats.alloc_failed++;
+                                       u64_stats_update_end(&rx_ring->rx_syncp);
                                        goto no_buffers;
                                }
                                buffer_info->page_offset = 0;
@@ -5714,7 +5761,9 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
                        if (dma_mapping_error(rx_ring->dev,
                                              buffer_info->page_dma)) {
                                buffer_info->page_dma = 0;
+                               u64_stats_update_begin(&rx_ring->rx_syncp);
                                rx_ring->rx_stats.alloc_failed++;
+                               u64_stats_update_end(&rx_ring->rx_syncp);
                                goto no_buffers;
                        }
                }
@@ -5722,8 +5771,10 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
                skb = buffer_info->skb;
                if (!skb) {
                        skb = netdev_alloc_skb_ip_align(netdev, bufsz);
-                       if (!skb) {
+                       if (unlikely(!skb)) {
+                               u64_stats_update_begin(&rx_ring->rx_syncp);
                                rx_ring->rx_stats.alloc_failed++;
+                               u64_stats_update_end(&rx_ring->rx_syncp);
                                goto no_buffers;
                        }
 
@@ -5737,7 +5788,9 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
                        if (dma_mapping_error(rx_ring->dev,
                                              buffer_info->dma)) {
                                buffer_info->dma = 0;
+                               u64_stats_update_begin(&rx_ring->rx_syncp);
                                rx_ring->rx_stats.alloc_failed++;
+                               u64_stats_update_end(&rx_ring->rx_syncp);
                                goto no_buffers;
                        }
                }
index 265501348f33776c22c825e78d0978703e77c093..6693323a6cf5cbd21f3caca60176bece32d22ddb 100644 (file)
 #include <linux/mii.h>
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
-#include <linux/pm_qos_params.h>
 
 #include "igbvf.h"
 
 #define DRV_VERSION "1.0.0-k0"
 char igbvf_driver_name[] = "igbvf";
 const char igbvf_driver_version[] = DRV_VERSION;
-static struct pm_qos_request_list igbvf_driver_pm_qos_req;
 static const char igbvf_driver_string[] =
                                "Intel(R) Virtual Function Network Driver";
 static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
@@ -2904,8 +2902,6 @@ static int __init igbvf_init_module(void)
        printk(KERN_INFO "%s\n", igbvf_copyright);
 
        ret = pci_register_driver(&igbvf_driver);
-       pm_qos_add_request(&igbvf_driver_pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
-                          PM_QOS_DEFAULT_VALUE);
 
        return ret;
 }
@@ -2920,7 +2916,6 @@ module_init(igbvf_init_module);
 static void __exit igbvf_exit_module(void)
 {
        pci_unregister_driver(&igbvf_driver);
-       pm_qos_remove_request(&igbvf_driver_pm_qos_req);
 }
 module_exit(igbvf_exit_module);
 
index ca142c47b2e4c631a90c5b61382c99c49f784dd2..94255f09093d670695de61b00a07b818371844a0 100644 (file)
@@ -678,7 +678,14 @@ static int netconsole_netdev_event(struct notifier_block *this,
                                strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ);
                                break;
                        case NETDEV_UNREGISTER:
-                               netpoll_cleanup(&nt->np);
+                               /*
+                                * rtnl_lock already held
+                                */
+                               if (nt->np.dev) {
+                                       __netpoll_cleanup(&nt->np);
+                                       dev_put(nt->np.dev);
+                                       nt->np.dev = NULL;
+                               }
                                /* Fall through */
                        case NETDEV_GOING_DOWN:
                        case NETDEV_BONDING_DESLAVE:
index 6dca3574e35507a94ca7e5b1b518a06199e99e03..92f89af0720e666e32be4840f24c5d937d83f849 100644 (file)
 #define        MAX_NUM_CARDS           4
 
 #define MAX_BUFFERS_PER_CMD    32
-#define TX_STOP_THRESH         ((MAX_SKB_FRAGS >> 2) + 4)
+#define MAX_TSO_HEADER_DESC    2
+#define MGMT_CMD_DESC_RESV     4
+#define TX_STOP_THRESH         ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+                                                       + MGMT_CMD_DESC_RESV)
 #define NX_MAX_TX_TIMEOUTS     2
 
 /*
index 29d7b93d0493be12e4a5def185282371b5cd400d..4b4ac7106786904c803ac649d00fdcf243c62321 100644 (file)
@@ -598,8 +598,14 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
 
        if (nr_desc >= netxen_tx_avail(tx_ring)) {
                netif_tx_stop_queue(tx_ring->txq);
-               __netif_tx_unlock_bh(tx_ring->txq);
-               return -EBUSY;
+               smp_mb();
+               if (netxen_tx_avail(tx_ring) > nr_desc) {
+                       if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
+                               netif_tx_wake_queue(tx_ring->txq);
+               } else {
+                       __netif_tx_unlock_bh(tx_ring->txq);
+                       return -EBUSY;
+               }
        }
 
        do {
@@ -1816,14 +1822,14 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
        if (netxen_rom_fast_read(adapter, offset, &board_type))
                return -EIO;
 
-       adapter->ahw.board_type = board_type;
-
        if (board_type == NETXEN_BRDTYPE_P3_4_GB_MM) {
                u32 gpio = NXRD32(adapter, NETXEN_ROMUSB_GLB_PAD_GPIO_I);
                if ((gpio & 0x8000) == 0)
                        board_type = NETXEN_BRDTYPE_P3_10G_TP;
        }
 
+       adapter->ahw.board_type = board_type;
+
        switch (board_type) {
        case NETXEN_BRDTYPE_P2_SB35_4G:
                adapter->ahw.port_type = NETXEN_NIC_GBE;
index a2d805aa75cd3a9698803d4358cbf1aa412f2330..95fe552aa2795a1a3a38bd5263c1eea8cfff5d13 100644 (file)
@@ -1763,14 +1763,10 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
 
                smp_mb();
 
-               if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
-                       __netif_tx_lock(tx_ring->txq, smp_processor_id());
-                       if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) {
+               if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev))
+                       if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
                                netif_wake_queue(netdev);
-                               adapter->tx_timeo_cnt = 0;
-                       }
-                       __netif_tx_unlock(tx_ring->txq);
-               }
+               adapter->tx_timeo_cnt = 0;
        }
        /*
         * If everything is freed up to consumer then check if the ring is full
index 2c6ceeb592b3d4d7b1fe8cf12fb1de1f265f568e..6f111691aca44ba8d7078c3bd9ae3f65cdbac72e 100644 (file)
@@ -125,11 +125,6 @@ netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
                struct nx_host_tx_ring *tx_ring)
 {
        NXWRIO(adapter, tx_ring->crb_cmd_producer, tx_ring->producer);
-
-       if (netxen_tx_avail(tx_ring) <= TX_STOP_THRESH) {
-               netif_stop_queue(adapter->netdev);
-               smp_mb();
-       }
 }
 
 static uint32_t crb_cmd_consumer[4] = {
@@ -1209,7 +1204,7 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
                adapter->max_mc_count = 16;
 
        netdev->netdev_ops         = &netxen_netdev_ops;
-       netdev->watchdog_timeo     = 2*HZ;
+       netdev->watchdog_timeo     = 5*HZ;
 
        netxen_nic_change_mtu(netdev, netdev->mtu);
 
@@ -1254,6 +1249,28 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
        return 0;
 }
 
+#ifdef CONFIG_PCIEAER
+static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct pci_dev *root = pdev->bus->self;
+       u32 aer_pos;
+
+       if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM &&
+               adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP)
+               return;
+
+       if (root->pcie_type != PCI_EXP_TYPE_ROOT_PORT)
+               return;
+
+       aer_pos = pci_find_ext_capability(root, PCI_EXT_CAP_ID_ERR);
+       if (!aer_pos)
+               return;
+
+       pci_write_config_dword(root, aer_pos + PCI_ERR_COR_MASK, 0xffff);
+}
+#endif
+
 static int __devinit
 netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
@@ -1322,6 +1339,10 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_out_iounmap;
        }
 
+#ifdef CONFIG_PCIEAER
+       netxen_mask_aer_correctable(adapter);
+#endif
+
        /* Mezz cards have PCI function 0,2,3 enabled */
        switch (adapter->ahw.board_type) {
        case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
@@ -1825,9 +1846,13 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        /* 4 fragments per cmd des */
        no_of_desc = (frag_count + 3) >> 2;
 
-       if (unlikely(no_of_desc + 2 > netxen_tx_avail(tx_ring))) {
+       if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
                netif_stop_queue(netdev);
-               return NETDEV_TX_BUSY;
+               smp_mb();
+               if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
+                       netif_start_queue(netdev);
+               else
+                       return NETDEV_TX_BUSY;
        }
 
        producer = tx_ring->producer;
index c0437fd8d3f29a38f9dc6ada9b1bd7431262369d..781e368329f91efe26389c17e0b293bb4342fcde 100644 (file)
@@ -7090,24 +7090,20 @@ static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
 static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp,
                                        struct ethtool_rx_flow_spec *fsp)
 {
+       u32 tmp;
+       u16 prt;
 
-       fsp->h_u.tcp_ip4_spec.ip4src = (tp->key[3] & TCAM_V4KEY3_SADDR) >>
-               TCAM_V4KEY3_SADDR_SHIFT;
-       fsp->h_u.tcp_ip4_spec.ip4dst = (tp->key[3] & TCAM_V4KEY3_DADDR) >>
-               TCAM_V4KEY3_DADDR_SHIFT;
-       fsp->m_u.tcp_ip4_spec.ip4src = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >>
-               TCAM_V4KEY3_SADDR_SHIFT;
-       fsp->m_u.tcp_ip4_spec.ip4dst = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >>
-               TCAM_V4KEY3_DADDR_SHIFT;
-
-       fsp->h_u.tcp_ip4_spec.ip4src =
-               cpu_to_be32(fsp->h_u.tcp_ip4_spec.ip4src);
-       fsp->m_u.tcp_ip4_spec.ip4src =
-               cpu_to_be32(fsp->m_u.tcp_ip4_spec.ip4src);
-       fsp->h_u.tcp_ip4_spec.ip4dst =
-               cpu_to_be32(fsp->h_u.tcp_ip4_spec.ip4dst);
-       fsp->m_u.tcp_ip4_spec.ip4dst =
-               cpu_to_be32(fsp->m_u.tcp_ip4_spec.ip4dst);
+       tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
+       fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
+
+       tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
+       fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
+
+       tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
+       fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
+
+       tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
+       fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
 
        fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >>
                TCAM_V4KEY2_TOS_SHIFT;
@@ -7118,54 +7114,40 @@ static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp,
        case TCP_V4_FLOW:
        case UDP_V4_FLOW:
        case SCTP_V4_FLOW:
-               fsp->h_u.tcp_ip4_spec.psrc =
-                       ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
-                        TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
-               fsp->h_u.tcp_ip4_spec.pdst =
-                       ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
-                        TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
-               fsp->m_u.tcp_ip4_spec.psrc =
-                       ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
-                        TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
-               fsp->m_u.tcp_ip4_spec.pdst =
-                       ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
-                        TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
+               prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
+                       TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
+               fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
+
+               prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
+                       TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
+               fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
 
-               fsp->h_u.tcp_ip4_spec.psrc =
-                       cpu_to_be16(fsp->h_u.tcp_ip4_spec.psrc);
-               fsp->h_u.tcp_ip4_spec.pdst =
-                       cpu_to_be16(fsp->h_u.tcp_ip4_spec.pdst);
-               fsp->m_u.tcp_ip4_spec.psrc =
-                       cpu_to_be16(fsp->m_u.tcp_ip4_spec.psrc);
-               fsp->m_u.tcp_ip4_spec.pdst =
-                       cpu_to_be16(fsp->m_u.tcp_ip4_spec.pdst);
+               prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
+                       TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
+               fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
+
+               prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
+                        TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
+               fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
                break;
        case AH_V4_FLOW:
        case ESP_V4_FLOW:
-               fsp->h_u.ah_ip4_spec.spi =
-                       (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
-                       TCAM_V4KEY2_PORT_SPI_SHIFT;
-               fsp->m_u.ah_ip4_spec.spi =
-                       (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
+               tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
                        TCAM_V4KEY2_PORT_SPI_SHIFT;
+               fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
 
-               fsp->h_u.ah_ip4_spec.spi =
-                       cpu_to_be32(fsp->h_u.ah_ip4_spec.spi);
-               fsp->m_u.ah_ip4_spec.spi =
-                       cpu_to_be32(fsp->m_u.ah_ip4_spec.spi);
+               tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
+                       TCAM_V4KEY2_PORT_SPI_SHIFT;
+               fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
                break;
        case IP_USER_FLOW:
-               fsp->h_u.usr_ip4_spec.l4_4_bytes =
-                       (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
-                       TCAM_V4KEY2_PORT_SPI_SHIFT;
-               fsp->m_u.usr_ip4_spec.l4_4_bytes =
-                       (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
+               tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
                        TCAM_V4KEY2_PORT_SPI_SHIFT;
+               fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
 
-               fsp->h_u.usr_ip4_spec.l4_4_bytes =
-                       cpu_to_be32(fsp->h_u.usr_ip4_spec.l4_4_bytes);
-               fsp->m_u.usr_ip4_spec.l4_4_bytes =
-                       cpu_to_be32(fsp->m_u.usr_ip4_spec.l4_4_bytes);
+               tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
+                       TCAM_V4KEY2_PORT_SPI_SHIFT;
+               fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
 
                fsp->h_u.usr_ip4_spec.proto =
                        (tp->key[2] & TCAM_V4KEY2_PROTO) >>
index 3bbd0aab17e813201eb51ba35f1f4990e80e7887..84134c766f3a86fca82195683be07de25cbf5fa1 100644 (file)
@@ -772,7 +772,7 @@ static int ns83820_setup_rx(struct net_device *ndev)
                phy_intr(ndev);
 
                /* Okay, let it rip */
-               spin_lock_irq(&dev->misc_lock);
+               spin_lock(&dev->misc_lock);
                dev->IMR_cache |= ISR_PHY;
                dev->IMR_cache |= ISR_RXRCMP;
                //dev->IMR_cache |= ISR_RXERR;
index e06c6aea45271c5d8089e378222f55ef58c57ad0..c8cc32c0edc9678d6174693412e977e43a4ebf29 100644 (file)
@@ -113,9 +113,10 @@ static int pch_gbe_set_settings(struct net_device *netdev,
 
        pch_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET);
 
-       if (ecmd->speed == -1)
+       if (ecmd->speed == USHRT_MAX) {
                ecmd->speed = SPEED_1000;
                ecmd->duplex = DUPLEX_FULL;
+       }
        ret = mii_ethtool_sset(&adapter->mii, ecmd);
        if (ret) {
                pr_err("Error: mii_ethtool_sset\n");
index e44644f169fd2b0e7e8198def7402e436bac8850..cf4b49d6c6da2d96941de995a384e3a8a2e932cd 100644 (file)
@@ -2394,7 +2394,7 @@ err_disable_device:
        return ret;
 }
 
-static const struct pci_device_id pch_gbe_pcidev_id[] = {
+static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
        {.vendor = PCI_VENDOR_ID_INTEL,
         .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
         .subvendor = PCI_ANY_ID,
index 2568aa6650243272ddaeda40013d8d03d270bf94..25e93a53fca0954e41728e9c55e31305ee62cc53 100644 (file)
@@ -78,7 +78,25 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
 
 };
 
+static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
+       "rx unicast frames",
+       "rx multicast frames",
+       "rx broadcast frames",
+       "rx dropped frames",
+       "rx errors",
+       "rx local frames",
+       "rx numbytes",
+       "tx unicast frames",
+       "tx multicast frames",
+       "tx broadcast frames",
+       "tx dropped frames",
+       "tx errors",
+       "tx local frames",
+       "tx numbytes",
+};
+
 #define QLCNIC_STATS_LEN       ARRAY_SIZE(qlcnic_gstrings_stats)
+#define QLCNIC_DEVICE_STATS_LEN        ARRAY_SIZE(qlcnic_device_gstrings_stats)
 
 static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
        "Register_Test_on_offline",
@@ -625,10 +643,13 @@ static int qlcnic_reg_test(struct net_device *dev)
 
 static int qlcnic_get_sset_count(struct net_device *dev, int sset)
 {
+       struct qlcnic_adapter *adapter = netdev_priv(dev);
        switch (sset) {
        case ETH_SS_TEST:
                return QLCNIC_TEST_LEN;
        case ETH_SS_STATS:
+               if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+                       return QLCNIC_STATS_LEN + QLCNIC_DEVICE_STATS_LEN;
                return QLCNIC_STATS_LEN;
        default:
                return -EOPNOTSUPP;
@@ -795,7 +816,8 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
 static void
 qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
 {
-       int index;
+       struct qlcnic_adapter *adapter = netdev_priv(dev);
+       int index, i;
 
        switch (stringset) {
        case ETH_SS_TEST:
@@ -808,16 +830,43 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
                               qlcnic_gstrings_stats[index].stat_string,
                               ETH_GSTRING_LEN);
                }
-               break;
+               if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+                       return;
+               for (i = 0; i < QLCNIC_DEVICE_STATS_LEN; index++, i++) {
+                       memcpy(data + index * ETH_GSTRING_LEN,
+                              qlcnic_device_gstrings_stats[i],
+                              ETH_GSTRING_LEN);
+               }
        }
 }
 
+#define QLCNIC_FILL_ESWITCH_STATS(VAL1) \
+       (((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) ? 0 : VAL1)
+
+static void
+qlcnic_fill_device_stats(int *index, u64 *data,
+               struct __qlcnic_esw_statistics *stats)
+{
+       int ind = *index;
+
+       data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->unicast_frames);
+       data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->multicast_frames);
+       data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->broadcast_frames);
+       data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->dropped_frames);
+       data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->errors);
+       data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->local_frames);
+       data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->numbytes);
+
+       *index = ind;
+}
+
 static void
 qlcnic_get_ethtool_stats(struct net_device *dev,
                             struct ethtool_stats *stats, u64 * data)
 {
        struct qlcnic_adapter *adapter = netdev_priv(dev);
-       int index;
+       struct qlcnic_esw_statistics port_stats;
+       int index, ret;
 
        for (index = 0; index < QLCNIC_STATS_LEN; index++) {
                char *p =
@@ -827,6 +876,24 @@ qlcnic_get_ethtool_stats(struct net_device *dev,
                    (qlcnic_gstrings_stats[index].sizeof_stat ==
                     sizeof(u64)) ? *(u64 *)p:(*(u32 *)p);
        }
+
+       if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+               return;
+
+       memset(&port_stats, 0, sizeof(struct qlcnic_esw_statistics));
+       ret = qlcnic_get_port_stats(adapter, adapter->ahw.pci_func,
+                       QLCNIC_QUERY_RX_COUNTER, &port_stats.rx);
+       if (ret)
+               return;
+
+       qlcnic_fill_device_stats(&index, data, &port_stats.rx);
+
+       ret = qlcnic_get_port_stats(adapter, adapter->ahw.pci_func,
+                       QLCNIC_QUERY_TX_COUNTER, &port_stats.tx);
+       if (ret)
+               return;
+
+       qlcnic_fill_device_stats(&index, data, &port_stats.tx);
 }
 
 static int qlcnic_set_tx_csum(struct net_device *dev, u32 data)
index bc669a40ae963a8498e7e52e6856c199cf7b9a73..1760533852a46a0681bc6ed9de7cedec9637ae02 100644 (file)
@@ -187,12 +187,7 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
 
 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
 
-/*
- * we set our copybreak very high so that we don't have
- * to allocate 16k frames all the time (see note in
- * rtl8169_open()
- */
-static int rx_copybreak = 16383;
+static int rx_buf_sz = 16383;
 static int use_dac;
 static struct {
        u32 msg_enable;
@@ -484,10 +479,8 @@ struct rtl8169_private {
        struct RxDesc *RxDescArray;     /* 256-aligned Rx descriptor ring */
        dma_addr_t TxPhyAddr;
        dma_addr_t RxPhyAddr;
-       struct sk_buff *Rx_skbuff[NUM_RX_DESC]; /* Rx data buffers */
+       void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
        struct ring_info tx_skb[NUM_TX_DESC];   /* Tx data buffers */
-       unsigned align;
-       unsigned rx_buf_sz;
        struct timer_list timer;
        u16 cp_cmd;
        u16 intr_event;
@@ -515,8 +508,6 @@ struct rtl8169_private {
 
 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
-module_param(rx_copybreak, int, 0);
-MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
 module_param(use_dac, int, 0);
 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
 module_param_named(debug, debug.msg_enable, int, 0);
@@ -3196,7 +3187,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        dev->features |= NETIF_F_GRO;
 
        tp->intr_mask = 0xffff;
-       tp->align = cfg->align;
        tp->hw_start = cfg->hw_start;
        tp->intr_event = cfg->intr_event;
        tp->napi_event = cfg->napi_event;
@@ -3266,18 +3256,6 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
        pci_set_drvdata(pdev, NULL);
 }
 
-static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
-                                 unsigned int mtu)
-{
-       unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
-
-       if (max_frame != 16383)
-               printk(KERN_WARNING PFX "WARNING! Changing of MTU on this "
-                       "NIC may lead to frame reception errors!\n");
-
-       tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE;
-}
-
 static int rtl8169_open(struct net_device *dev)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
@@ -3286,18 +3264,6 @@ static int rtl8169_open(struct net_device *dev)
 
        pm_runtime_get_sync(&pdev->dev);
 
-       /*
-        * Note that we use a magic value here, its wierd I know
-        * its done because, some subset of rtl8169 hardware suffers from
-        * a problem in which frames received that are longer than
-        * the size set in RxMaxSize register return garbage sizes
-        * when received.  To avoid this we need to turn off filtering,
-        * which is done by setting a value of 16383 in the RxMaxSize register
-        * and allocating 16k frames to handle the largest possible rx value
-        * thats what the magic math below does.
-        */
-       rtl8169_set_rxbufsize(tp, 16383 - VLAN_ETH_HLEN - ETH_FCS_LEN);
-
        /*
         * Rx and Tx desscriptors needs 256 bytes alignment.
         * dma_alloc_coherent provides more.
@@ -3474,7 +3440,7 @@ static void rtl_hw_start_8169(struct net_device *dev)
 
        RTL_W8(EarlyTxThres, EarlyTxThld);
 
-       rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz);
+       rtl_set_rx_max_size(ioaddr, rx_buf_sz);
 
        if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
            (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
@@ -3735,7 +3701,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
 
        RTL_W8(EarlyTxThres, EarlyTxThld);
 
-       rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz);
+       rtl_set_rx_max_size(ioaddr, rx_buf_sz);
 
        tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
 
@@ -3915,7 +3881,7 @@ static void rtl_hw_start_8101(struct net_device *dev)
 
        RTL_W8(EarlyTxThres, EarlyTxThld);
 
-       rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz);
+       rtl_set_rx_max_size(ioaddr, rx_buf_sz);
 
        tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
 
@@ -3956,8 +3922,6 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
 
        rtl8169_down(dev);
 
-       rtl8169_set_rxbufsize(tp, dev->mtu);
-
        ret = rtl8169_init_ring(dev);
        if (ret < 0)
                goto out;
@@ -3978,15 +3942,15 @@ static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
        desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
 }
 
-static void rtl8169_free_rx_skb(struct rtl8169_private *tp,
-                               struct sk_buff **sk_buff, struct RxDesc *desc)
+static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
+                                    void **data_buff, struct RxDesc *desc)
 {
        struct pci_dev *pdev = tp->pci_dev;
 
-       dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
+       dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
                         PCI_DMA_FROMDEVICE);
-       dev_kfree_skb(*sk_buff);
-       *sk_buff = NULL;
+       kfree(*data_buff);
+       *data_buff = NULL;
        rtl8169_make_unusable_by_asic(desc);
 }
 
@@ -4005,33 +3969,34 @@ static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
        rtl8169_mark_to_asic(desc, rx_buf_sz);
 }
 
-static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
+static inline void *rtl8169_align(void *data)
+{
+       return (void *)ALIGN((long)data, 16);
+}
+
+static struct sk_buff *rtl8169_alloc_rx_data(struct pci_dev *pdev,
                                            struct net_device *dev,
-                                           struct RxDesc *desc, int rx_buf_sz,
-                                           unsigned int align, gfp_t gfp)
+                                           struct RxDesc *desc)
 {
-       struct sk_buff *skb;
+       void *data;
        dma_addr_t mapping;
-       unsigned int pad;
+       int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
 
-       pad = align ? align : NET_IP_ALIGN;
+       data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
+       if (!data)
+               return NULL;
 
-       skb = __netdev_alloc_skb(dev, rx_buf_sz + pad, gfp);
-       if (!skb)
-               goto err_out;
-
-       skb_reserve(skb, align ? ((pad - 1) & (unsigned long)skb->data) : pad);
-
-       mapping = dma_map_single(&pdev->dev, skb->data, rx_buf_sz,
+       if (rtl8169_align(data) != data) {
+               kfree(data);
+               data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
+               if (!data)
+                       return NULL;
+       }
+       mapping = dma_map_single(&pdev->dev, rtl8169_align(data), rx_buf_sz,
                                 PCI_DMA_FROMDEVICE);
 
        rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
-out:
-       return skb;
-
-err_out:
-       rtl8169_make_unusable_by_asic(desc);
-       goto out;
+       return data;
 }
 
 static void rtl8169_rx_clear(struct rtl8169_private *tp)
@@ -4039,8 +4004,8 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp)
        unsigned int i;
 
        for (i = 0; i < NUM_RX_DESC; i++) {
-               if (tp->Rx_skbuff[i]) {
-                       rtl8169_free_rx_skb(tp, tp->Rx_skbuff + i,
+               if (tp->Rx_databuff[i]) {
+                       rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
                                            tp->RxDescArray + i);
                }
        }
@@ -4052,21 +4017,21 @@ static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
        u32 cur;
 
        for (cur = start; end - cur != 0; cur++) {
-               struct sk_buff *skb;
+               void *data;
                unsigned int i = cur % NUM_RX_DESC;
 
                WARN_ON((s32)(end - cur) < 0);
 
-               if (tp->Rx_skbuff[i])
+               if (tp->Rx_databuff[i])
                        continue;
 
-               skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev,
-                                          tp->RxDescArray + i,
-                                          tp->rx_buf_sz, tp->align, gfp);
-               if (!skb)
+               data = rtl8169_alloc_rx_data(tp->pci_dev, dev,
+                                            tp->RxDescArray + i);
+               if (!data) {
+                       rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
                        break;
-
-               tp->Rx_skbuff[i] = skb;
+               }
+               tp->Rx_databuff[i] = data;
        }
        return cur - start;
 }
@@ -4088,7 +4053,7 @@ static int rtl8169_init_ring(struct net_device *dev)
        rtl8169_init_ring_indexes(tp);
 
        memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
-       memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
+       memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
 
        if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC, GFP_KERNEL) != NUM_RX_DESC)
                goto err_out;
@@ -4473,27 +4438,23 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
                skb_checksum_none_assert(skb);
 }
 
-static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff,
-                                      struct rtl8169_private *tp, int pkt_size,
-                                      dma_addr_t addr)
+static struct sk_buff *rtl8169_try_rx_copy(void *data,
+                                          struct rtl8169_private *tp,
+                                          int pkt_size,
+                                          dma_addr_t addr)
 {
        struct sk_buff *skb;
-       bool done = false;
-
-       if (pkt_size >= rx_copybreak)
-               goto out;
-
-       skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
-       if (!skb)
-               goto out;
 
+       data = rtl8169_align(data);
        dma_sync_single_for_cpu(&tp->pci_dev->dev, addr, pkt_size,
                                PCI_DMA_FROMDEVICE);
-       skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
-       *sk_buff = skb;
-       done = true;
-out:
-       return done;
+       prefetch(data);
+       skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
+       if (skb)
+               memcpy(skb->data, data, pkt_size);
+       dma_sync_single_for_device(&tp->pci_dev->dev, addr, pkt_size,
+                                  PCI_DMA_FROMDEVICE);
+       return skb;
 }
 
 /*
@@ -4508,7 +4469,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
                                void __iomem *ioaddr, u32 budget)
 {
        unsigned int cur_rx, rx_left;
-       unsigned int delta, count;
+       unsigned int count;
        int polling = (budget != ~(u32)0) ? 1 : 0;
 
        cur_rx = tp->cur_rx;
@@ -4537,12 +4498,11 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
                                rtl8169_schedule_work(dev, rtl8169_reset_task);
                                dev->stats.rx_fifo_errors++;
                        }
-                       rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
+                       rtl8169_mark_to_asic(desc, rx_buf_sz);
                } else {
-                       struct sk_buff *skb = tp->Rx_skbuff[entry];
+                       struct sk_buff *skb;
                        dma_addr_t addr = le64_to_cpu(desc->addr);
                        int pkt_size = (status & 0x00001FFF) - 4;
-                       struct pci_dev *pdev = tp->pci_dev;
 
                        /*
                         * The driver does not support incoming fragmented
@@ -4552,18 +4512,16 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
                        if (unlikely(rtl8169_fragmented_frame(status))) {
                                dev->stats.rx_dropped++;
                                dev->stats.rx_length_errors++;
-                               rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
+                               rtl8169_mark_to_asic(desc, rx_buf_sz);
                                continue;
                        }
 
-                       if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) {
-                               dma_sync_single_for_device(&pdev->dev, addr,
-                                       pkt_size, PCI_DMA_FROMDEVICE);
-                               rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
-                       } else {
-                               dma_unmap_single(&pdev->dev, addr, tp->rx_buf_sz,
-                                                PCI_DMA_FROMDEVICE);
-                               tp->Rx_skbuff[entry] = NULL;
+                       skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
+                                                 tp, pkt_size, addr);
+                       rtl8169_mark_to_asic(desc, rx_buf_sz);
+                       if (!skb) {
+                               dev->stats.rx_dropped++;
+                               continue;
                        }
 
                        rtl8169_rx_csum(skb, status);
@@ -4592,20 +4550,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
        count = cur_rx - tp->cur_rx;
        tp->cur_rx = cur_rx;
 
-       delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx, GFP_ATOMIC);
-       if (!delta && count)
-               netif_info(tp, intr, dev, "no Rx buffer allocated\n");
-       tp->dirty_rx += delta;
-
-       /*
-        * FIXME: until there is periodic timer to try and refill the ring,
-        * a temporary shortage may definitely kill the Rx process.
-        * - disable the asic to try and avoid an overflow and kick it again
-        *   after refill ?
-        * - how do others driver handle this condition (Uh oh...).
-        */
-       if (tp->dirty_rx + NUM_RX_DESC == tp->cur_rx)
-               netif_emerg(tp, intr, dev, "Rx buffers exhausted\n");
+       tp->dirty_rx += count;
 
        return count;
 }
index dec7ce40c27a05e707cb141c9f5419b8c231db3f..375ea193e139a4a1bd23ecf68937007402682745 100644 (file)
@@ -235,9 +235,9 @@ struct mii_regs {
 };
 
 struct mac_device_info {
-       struct stmmac_ops       *mac;
-       struct stmmac_desc_ops  *desc;
-       struct stmmac_dma_ops   *dma;
+       const struct stmmac_ops         *mac;
+       const struct stmmac_desc_ops    *desc;
+       const struct stmmac_dma_ops     *dma;
        struct mii_regs mii;    /* MII register Addresses */
        struct mac_link link;
 };
index 97956cbf1cb4de2a374e0626f20df4977e680fd1..7c6d857a9cc7f0c357bb7b1d0cb9d417ecbc25a5 100644 (file)
@@ -118,4 +118,4 @@ enum ttc_control {
 #define DMA_MISSED_FRAME_OVE_M 0x00010000      /* Missed Frame Overflow */
 #define DMA_MISSED_FRAME_M_CNTR        0x0000ffff      /* Missed Frame Couinter */
 
-extern struct stmmac_dma_ops dwmac100_dma_ops;
+extern const struct stmmac_dma_ops dwmac100_dma_ops;
index 81ee4fd04386d0163585fc731461af254911fee2..cfcef0ea0fa5db049120c5a2f097d031b29330a8 100644 (file)
@@ -205,4 +205,4 @@ enum rtc_control {
 #define GMAC_MMC_TX_INTR   0x108
 #define GMAC_MMC_RX_CSUM_OFFLOAD   0x208
 
-extern struct stmmac_dma_ops dwmac1000_dma_ops;
+extern const struct stmmac_dma_ops dwmac1000_dma_ops;
index 65667b692024f26b78cc0d01819a5aa96fb8c323..6ae4c3f4c63c712031b4ca67549a8d8f631ed9fd 100644 (file)
@@ -212,7 +212,7 @@ static void dwmac1000_irq_status(void __iomem *ioaddr)
        }
 }
 
-struct stmmac_ops dwmac1000_ops = {
+static const struct stmmac_ops dwmac1000_ops = {
        .core_init = dwmac1000_core_init,
        .rx_coe = dwmac1000_rx_coe_supported,
        .dump_regs = dwmac1000_dump_regs,
index ce6163e39cd540416c1c286f5d9aae77cd397adf..2c47712d45d05cbe7ffddc792f62cf362e6c310c 100644 (file)
@@ -138,7 +138,7 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr)
        }
 }
 
-struct stmmac_dma_ops dwmac1000_dma_ops = {
+const struct stmmac_dma_ops dwmac1000_dma_ops = {
        .init = dwmac1000_dma_init,
        .dump_regs = dwmac1000_dump_dma_regs,
        .dma_mode = dwmac1000_dma_operation_mode,
index 94eeccf3a8a0cb0eb33f3c9b7878d9499ddba72b..c724fc36a24fd696557c32afc87f0cdc6835ba0c 100644 (file)
@@ -168,7 +168,7 @@ static void dwmac100_pmt(void __iomem *ioaddr, unsigned long mode)
        return;
 }
 
-struct stmmac_ops dwmac100_ops = {
+static const struct stmmac_ops dwmac100_ops = {
        .core_init = dwmac100_core_init,
        .rx_coe = dwmac100_rx_coe_supported,
        .dump_regs = dwmac100_dump_mac_regs,
index 96aac93b789b9bc690234cb0c259cedd7aa846d2..e3e224b7d9e23c6903a0f970ae4c7cd0c8491192 100644 (file)
@@ -126,7 +126,7 @@ static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
        }
 }
 
-struct stmmac_dma_ops dwmac100_dma_ops = {
+const struct stmmac_dma_ops dwmac100_dma_ops = {
        .init = dwmac100_dma_init,
        .dump_regs = dwmac100_dump_dma_regs,
        .dma_mode = dwmac100_dma_operation_mode,
index 5d1471d8f8f69d6b355d926a8672f8ebc0e59000..e5dfb6a30182a114e198d4955ac08ee303dd7f41 100644 (file)
@@ -318,7 +318,7 @@ static int enh_desc_get_rx_frame_len(struct dma_desc *p)
        return p->des01.erx.frame_length;
 }
 
-struct stmmac_desc_ops enh_desc_ops = {
+const struct stmmac_desc_ops enh_desc_ops = {
        .tx_status = enh_desc_get_tx_status,
        .rx_status = enh_desc_get_rx_status,
        .get_tx_len = enh_desc_get_tx_len,
index 0dce90cb81247b68418d40a23a17a50a53fbac36..cd0cc76f7a1c8d3fbc3daf615a0c438a2880c07f 100644 (file)
@@ -202,7 +202,7 @@ static int ndesc_get_rx_frame_len(struct dma_desc *p)
        return p->des01.rx.frame_length;
 }
 
-struct stmmac_desc_ops ndesc_ops = {
+const struct stmmac_desc_ops ndesc_ops = {
        .tx_status = ndesc_get_tx_status,
        .rx_status = ndesc_get_rx_status,
        .get_tx_len = ndesc_get_tx_len,
index 92154ff7d70203cc3c6cbc7f710a46ba36831033..79bdc2e1322489d4fc226409b4f3bb50f549bf48 100644 (file)
@@ -121,5 +121,5 @@ static inline int stmmac_claim_resource(struct platform_device *pdev)
 extern int stmmac_mdio_unregister(struct net_device *ndev);
 extern int stmmac_mdio_register(struct net_device *ndev);
 extern void stmmac_set_ethtool_ops(struct net_device *netdev);
-extern struct stmmac_desc_ops enh_desc_ops;
-extern struct stmmac_desc_ops ndesc_ops;
+extern const struct stmmac_desc_ops enh_desc_ops;
+extern const struct stmmac_desc_ops ndesc_ops;
index 25a7e385f8ec61f9a436fd3b393dfaab193638b5..6d65482e789a3639aa310da5d048ea19232022eb 100644 (file)
@@ -89,8 +89,8 @@ static const struct  stmmac_stats stmmac_gstrings_stats[] = {
 };
 #define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
 
-void stmmac_ethtool_getdrvinfo(struct net_device *dev,
-                              struct ethtool_drvinfo *info)
+static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
+                                     struct ethtool_drvinfo *info)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
 
@@ -104,7 +104,8 @@ void stmmac_ethtool_getdrvinfo(struct net_device *dev,
        info->n_stats = STMMAC_STATS_LEN;
 }
 
-int stmmac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int stmmac_ethtool_getsettings(struct net_device *dev,
+                                     struct ethtool_cmd *cmd)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
        struct phy_device *phy = priv->phydev;
@@ -126,7 +127,8 @@ int stmmac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
        return rc;
 }
 
-int stmmac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int stmmac_ethtool_setsettings(struct net_device *dev,
+                                     struct ethtool_cmd *cmd)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
        struct phy_device *phy = priv->phydev;
@@ -139,32 +141,32 @@ int stmmac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
        return rc;
 }
 
-u32 stmmac_ethtool_getmsglevel(struct net_device *dev)
+static u32 stmmac_ethtool_getmsglevel(struct net_device *dev)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
        return priv->msg_enable;
 }
 
-void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
+static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
        priv->msg_enable = level;
 
 }
 
-int stmmac_check_if_running(struct net_device *dev)
+static int stmmac_check_if_running(struct net_device *dev)
 {
        if (!netif_running(dev))
                return -EBUSY;
        return 0;
 }
 
-int stmmac_ethtool_get_regs_len(struct net_device *dev)
+static int stmmac_ethtool_get_regs_len(struct net_device *dev)
 {
        return REG_SPACE_SIZE;
 }
 
-void stmmac_ethtool_gregs(struct net_device *dev,
+static void stmmac_ethtool_gregs(struct net_device *dev,
                          struct ethtool_regs *regs, void *space)
 {
        int i;
@@ -195,7 +197,7 @@ void stmmac_ethtool_gregs(struct net_device *dev,
        }
 }
 
-int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data)
+static int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data)
 {
        if (data)
                netdev->features |= NETIF_F_HW_CSUM;
@@ -205,7 +207,7 @@ int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data)
        return 0;
 }
 
-u32 stmmac_ethtool_get_rx_csum(struct net_device *dev)
+static u32 stmmac_ethtool_get_rx_csum(struct net_device *dev)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
 
@@ -378,10 +380,8 @@ static struct ethtool_ops stmmac_ethtool_ops = {
        .get_wol = stmmac_get_wol,
        .set_wol = stmmac_set_wol,
        .get_sset_count = stmmac_get_sset_count,
-#ifdef NETIF_F_TSO
        .get_tso = ethtool_op_get_tso,
        .set_tso = ethtool_op_set_tso,
-#endif
 };
 
 void stmmac_set_ethtool_ops(struct net_device *netdev)
index 4283cc52a8c9cf463035103de964bf84e80d6094..3ed2a67bd6d36c5da4608570bc8d494faa614f59 100644 (file)
@@ -363,6 +363,19 @@ struct netdev_private {
         dma_addr_t tx_ring_dma;
         dma_addr_t rx_ring_dma;
        struct timer_list timer;                /* Media monitoring timer. */
+       /* ethtool extra stats */
+       struct {
+               u64 tx_multiple_collisions;
+               u64 tx_single_collisions;
+               u64 tx_late_collisions;
+               u64 tx_deferred;
+               u64 tx_deferred_excessive;
+               u64 tx_aborted;
+               u64 tx_bcasts;
+               u64 rx_bcasts;
+               u64 tx_mcasts;
+               u64 rx_mcasts;
+       } xstats;
        /* Frequently used values: keep some adjacent for cache effect. */
        spinlock_t lock;
        int msg_enable;
@@ -1486,21 +1499,34 @@ static struct net_device_stats *get_stats(struct net_device *dev)
 {
        struct netdev_private *np = netdev_priv(dev);
        void __iomem *ioaddr = np->base;
-       int i;
        unsigned long flags;
+       u8 late_coll, single_coll, mult_coll;
 
        spin_lock_irqsave(&np->statlock, flags);
        /* The chip only need report frame silently dropped. */
        dev->stats.rx_missed_errors     += ioread8(ioaddr + RxMissed);
        dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
        dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
-       dev->stats.collisions += ioread8(ioaddr + StatsLateColl);
-       dev->stats.collisions += ioread8(ioaddr + StatsMultiColl);
-       dev->stats.collisions += ioread8(ioaddr + StatsOneColl);
        dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
-       ioread8(ioaddr + StatsTxDefer);
-       for (i = StatsTxDefer; i <= StatsMcastRx; i++)
-               ioread8(ioaddr + i);
+
+       mult_coll = ioread8(ioaddr + StatsMultiColl);
+       np->xstats.tx_multiple_collisions += mult_coll;
+       single_coll = ioread8(ioaddr + StatsOneColl);
+       np->xstats.tx_single_collisions += single_coll;
+       late_coll = ioread8(ioaddr + StatsLateColl);
+       np->xstats.tx_late_collisions += late_coll;
+       dev->stats.collisions += mult_coll
+               + single_coll
+               + late_coll;
+
+       np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
+       np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
+       np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
+       np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
+       np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
+       np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
+       np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
+
        dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
        dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
        dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
@@ -1566,6 +1592,21 @@ static int __set_mac_addr(struct net_device *dev)
        return 0;
 }
 
+static const struct {
+       const char name[ETH_GSTRING_LEN];
+} sundance_stats[] = {
+       { "tx_multiple_collisions" },
+       { "tx_single_collisions" },
+       { "tx_late_collisions" },
+       { "tx_deferred" },
+       { "tx_deferred_excessive" },
+       { "tx_aborted" },
+       { "tx_bcasts" },
+       { "rx_bcasts" },
+       { "tx_mcasts" },
+       { "rx_mcasts" },
+};
+
 static int check_if_running(struct net_device *dev)
 {
        if (!netif_running(dev))
@@ -1624,6 +1665,42 @@ static void set_msglevel(struct net_device *dev, u32 val)
        np->msg_enable = val;
 }
 
+static void get_strings(struct net_device *dev, u32 stringset,
+               u8 *data)
+{
+       if (stringset == ETH_SS_STATS)
+               memcpy(data, sundance_stats, sizeof(sundance_stats));
+}
+
+static int get_sset_count(struct net_device *dev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_STATS:
+               return ARRAY_SIZE(sundance_stats);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void get_ethtool_stats(struct net_device *dev,
+               struct ethtool_stats *stats, u64 *data)
+{
+       struct netdev_private *np = netdev_priv(dev);
+       int i = 0;
+
+       get_stats(dev);
+       data[i++] = np->xstats.tx_multiple_collisions;
+       data[i++] = np->xstats.tx_single_collisions;
+       data[i++] = np->xstats.tx_late_collisions;
+       data[i++] = np->xstats.tx_deferred;
+       data[i++] = np->xstats.tx_deferred_excessive;
+       data[i++] = np->xstats.tx_aborted;
+       data[i++] = np->xstats.tx_bcasts;
+       data[i++] = np->xstats.rx_bcasts;
+       data[i++] = np->xstats.tx_mcasts;
+       data[i++] = np->xstats.rx_mcasts;
+}
+
 static const struct ethtool_ops ethtool_ops = {
        .begin = check_if_running,
        .get_drvinfo = get_drvinfo,
@@ -1633,6 +1710,9 @@ static const struct ethtool_ops ethtool_ops = {
        .get_link = get_link,
        .get_msglevel = get_msglevel,
        .set_msglevel = set_msglevel,
+       .get_strings = get_strings,
+       .get_sset_count = get_sset_count,
+       .get_ethtool_stats = get_ethtool_stats,
 };
 
 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
index 943c28325444f95003260826d6cbf285b27afcd4..5b4c510dbcedcd062fdc729f48ec78d5a8ba75f0 100644 (file)
 
 #define DRV_MODULE_NAME                "tg3"
 #define TG3_MAJ_NUM                    3
-#define TG3_MIN_NUM                    114
+#define TG3_MIN_NUM                    115
 #define DRV_MODULE_VERSION     \
        __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE     "September 30, 2010"
+#define DRV_MODULE_RELDATE     "October 14, 2010"
 
 #define TG3_DEF_MAC_MODE       0
 #define TG3_DEF_RX_MODE                0
@@ -1162,6 +1162,52 @@ static void tg3_mdio_fini(struct tg3 *tp)
        }
 }
 
+static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
+{
+       int err;
+
+       err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
+       if (err)
+               goto done;
+
+       err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
+       if (err)
+               goto done;
+
+       err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
+                          MII_TG3_MMD_CTRL_DATA_NOINC | devad);
+       if (err)
+               goto done;
+
+       err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
+
+done:
+       return err;
+}
+
+static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
+{
+       int err;
+
+       err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
+       if (err)
+               goto done;
+
+       err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
+       if (err)
+               goto done;
+
+       err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
+                          MII_TG3_MMD_CTRL_DATA_NOINC | devad);
+       if (err)
+               goto done;
+
+       err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
+
+done:
+       return err;
+}
+
 /* tp->lock is held. */
 static inline void tg3_generate_fw_event(struct tg3 *tp)
 {
@@ -1538,6 +1584,17 @@ static void tg3_phy_fini(struct tg3 *tp)
        }
 }
 
+static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
+{
+       int err;
+
+       err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
+       if (!err)
+               err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
+
+       return err;
+}
+
 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
 {
        int err;
@@ -1701,6 +1758,42 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
        tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
 }
 
+static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
+{
+       u32 val;
+
+       if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
+               return;
+
+       tp->setlpicnt = 0;
+
+       if (tp->link_config.autoneg == AUTONEG_ENABLE &&
+           current_link_up == 1 &&
+           (tp->link_config.active_speed == SPEED_1000 ||
+            (tp->link_config.active_speed == SPEED_100 &&
+             tp->link_config.active_duplex == DUPLEX_FULL))) {
+               u32 eeectl;
+
+               if (tp->link_config.active_speed == SPEED_1000)
+                       eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
+               else
+                       eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
+
+               tw32(TG3_CPMU_EEE_CTRL, eeectl);
+
+               tg3_phy_cl45_read(tp, 0x7, TG3_CL45_D7_EEERES_STAT, &val);
+
+               if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
+                   val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
+                       tp->setlpicnt = 2;
+       }
+
+       if (!tp->setlpicnt) {
+               val = tr32(TG3_CPMU_EEE_MODE);
+               tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
+       }
+}
+
 static int tg3_wait_macro_done(struct tg3 *tp)
 {
        int limit = 100;
@@ -2875,6 +2968,44 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
                tg3_writephy(tp, MII_TG3_CTRL, new_adv);
        }
 
+       if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
+               u32 val = 0;
+
+               tw32(TG3_CPMU_EEE_MODE,
+                    tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
+
+               /* Enable SM_DSP clock and tx 6dB coding. */
+               val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
+                     MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
+                     MII_TG3_AUXCTL_ACTL_TX_6DB;
+               tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
+
+               if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
+                   !tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
+                       tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2,
+                                        val | MII_TG3_DSP_CH34TP2_HIBW01);
+
+               if (tp->link_config.autoneg == AUTONEG_ENABLE) {
+                       /* Advertise 100-BaseTX EEE ability */
+                       if (tp->link_config.advertising &
+                           (ADVERTISED_100baseT_Half |
+                            ADVERTISED_100baseT_Full))
+                               val |= TG3_CL45_D7_EEEADV_CAP_100TX;
+                       /* Advertise 1000-BaseT EEE ability */
+                       if (tp->link_config.advertising &
+                           (ADVERTISED_1000baseT_Half |
+                            ADVERTISED_1000baseT_Full))
+                               val |= TG3_CL45_D7_EEEADV_CAP_1000T;
+               }
+               tg3_phy_cl45_write(tp, 0x7, TG3_CL45_D7_EEEADV_CAP, val);
+
+               /* Turn off SM_DSP clock. */
+               val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
+                     MII_TG3_AUXCTL_ACTL_TX_6DB;
+               tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
+       }
+
        if (tp->link_config.autoneg == AUTONEG_DISABLE &&
            tp->link_config.speed != SPEED_INVALID) {
                u32 bmcr, orig_bmcr;
@@ -3236,6 +3367,8 @@ relink:
        tw32_f(MAC_MODE, tp->mac_mode);
        udelay(40);
 
+       tg3_phy_eee_adjust(tp, current_link_up);
+
        if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
                /* Polled via timer. */
                tw32_f(MAC_EVENT, 0);
@@ -4440,12 +4573,11 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
                            u32 opaque_key, u32 dest_idx_unmasked)
 {
        struct tg3_rx_buffer_desc *desc;
-       struct ring_info *map, *src_map;
+       struct ring_info *map;
        struct sk_buff *skb;
        dma_addr_t mapping;
        int skb_size, dest_idx;
 
-       src_map = NULL;
        switch (opaque_key) {
        case RXD_OPAQUE_RING_STD:
                dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
@@ -6087,7 +6219,8 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
                tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
                                tp->rx_pkt_map_sz);
 
-       if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
+       if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
+           !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
                for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
                        tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
                                        TG3_RX_JMB_MAP_SZ);
@@ -6114,7 +6247,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
        if (tpr != &tp->napi[0].prodring) {
                memset(&tpr->rx_std_buffers[0], 0,
                       TG3_RX_STD_BUFF_RING_SIZE(tp));
-               if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
+               if (tpr->rx_jmb_buffers)
                        memset(&tpr->rx_jmb_buffers[0], 0,
                               TG3_RX_JMB_BUFF_RING_SIZE(tp));
                goto done;
@@ -6157,7 +6290,8 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
                }
        }
 
-       if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
+       if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ||
+           (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
                goto done;
 
        memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
@@ -6229,7 +6363,8 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
        if (!tpr->rx_std)
                goto err_out;
 
-       if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
+       if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
+           !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
                tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
                                              GFP_KERNEL);
                if (!tpr->rx_jmb_buffers)
@@ -7536,6 +7671,9 @@ static void tg3_rings_reset(struct tg3 *tp)
        /* Disable all transmit rings but the first. */
        if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
                limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
+       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+                GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+               limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
        else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
                limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
        else
@@ -7741,6 +7879,22 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                tw32(TG3_CPMU_LSPD_10MB_CLK, val);
        }
 
+       /* Enable MAC control of LPI */
+       if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
+               tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
+                      TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
+                      TG3_CPMU_EEE_LNKIDL_UART_IDL);
+
+               tw32_f(TG3_CPMU_EEE_CTRL,
+                      TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
+
+               tw32_f(TG3_CPMU_EEE_MODE,
+                      TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
+                      TG3_CPMU_EEEMD_LPI_IN_TX |
+                      TG3_CPMU_EEEMD_LPI_IN_RX |
+                      TG3_CPMU_EEEMD_EEE_ENABLE);
+       }
+
        /* This works around an issue with Athlon chipsets on
         * B3 tigon3 silicon.  This bit has no effect on any
         * other revision.  But do not set this on PCI Express
@@ -8549,6 +8703,12 @@ static void tg3_timer(unsigned long __opaque)
                if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
                        tg3_periodic_fetch_stats(tp);
 
+               if (tp->setlpicnt && !--tp->setlpicnt) {
+                       u32 val = tr32(TG3_CPMU_EEE_MODE);
+                       tw32(TG3_CPMU_EEE_MODE,
+                            val | TG3_CPMU_EEEMD_LPI_ENABLE);
+               }
+
                if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
                        u32 mac_stat;
                        int phy_event;
@@ -9647,6 +9807,9 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        if (netif_running(dev)) {
                cmd->speed = tp->link_config.active_speed;
                cmd->duplex = tp->link_config.active_duplex;
+       } else {
+               cmd->speed = SPEED_INVALID;
+               cmd->duplex = DUPLEX_INVALID;
        }
        cmd->phy_address = tp->phy_addr;
        cmd->transceiver = XCVR_INTERNAL;
@@ -12383,6 +12546,11 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
                }
        }
 
+       if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
+           (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
+            tp->pci_chip_rev_id != CHIPREV_ID_57765_A0))
+               tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
+
        if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
            !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
@@ -12702,6 +12870,9 @@ static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
        case TG3_EEPROM_SB_REVISION_5:
                offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
                break;
+       case TG3_EEPROM_SB_REVISION_6:
+               offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
+               break;
        default:
                return;
        }
index f6b709a3ca322a05eab064ee62e20f3ec8210f5e..8342190df0ff3af8d367305969f0da89b17b6010 100644 (file)
 #define  CPMU_MUTEX_GNT_DRIVER          0x00001000
 #define TG3_CPMU_PHY_STRAP             0x00003664
 #define TG3_CPMU_PHY_STRAP_IS_SERDES    0x00000020
-/* 0x3664 --> 0x3800 unused */
+/* 0x3664 --> 0x36b0 unused */
+
+#define TG3_CPMU_EEE_MODE              0x000036b0
+#define TG3_CPMU_EEEMD_ERLY_L1_XIT_DET  0x00000008
+#define TG3_CPMU_EEEMD_LPI_ENABLE       0x00000080
+#define TG3_CPMU_EEEMD_LPI_IN_TX        0x00000100
+#define TG3_CPMU_EEEMD_LPI_IN_RX        0x00000200
+#define TG3_CPMU_EEEMD_EEE_ENABLE       0x00100000
+/* 0x36b4 --> 0x36b8 unused */
+
+#define TG3_CPMU_EEE_LNKIDL_CTRL       0x000036bc
+#define  TG3_CPMU_EEE_LNKIDL_PCIE_NL0   0x01000000
+#define  TG3_CPMU_EEE_LNKIDL_UART_IDL   0x00000004
+/* 0x36c0 --> 0x36d0 unused */
+
+#define TG3_CPMU_EEE_CTRL              0x000036d0
+#define TG3_CPMU_EEE_CTRL_EXIT_16_5_US  0x0000019d
+#define TG3_CPMU_EEE_CTRL_EXIT_36_US    0x00000384
+#define TG3_CPMU_EEE_CTRL_EXIT_20_1_US  0x000001f8
+/* 0x36d4 --> 0x3800 unused */
 
 /* Mbuf cluster free registers */
 #define MBFREE_MODE                    0x00003800
 #define TG3_EEPROM_SB_REVISION_3       0x00030000
 #define TG3_EEPROM_SB_REVISION_4       0x00040000
 #define TG3_EEPROM_SB_REVISION_5       0x00050000
+#define TG3_EEPROM_SB_REVISION_6       0x00060000
 #define TG3_EEPROM_MAGIC_HW            0xabcd
 #define TG3_EEPROM_MAGIC_HW_MSK                0xffff
 
 #define TG3_EEPROM_SB_F1R3_EDH_OFF     0x18
 #define TG3_EEPROM_SB_F1R4_EDH_OFF     0x1c
 #define TG3_EEPROM_SB_F1R5_EDH_OFF     0x20
+#define TG3_EEPROM_SB_F1R6_EDH_OFF     0x4c
 #define TG3_EEPROM_SB_EDH_MAJ_MASK     0x00000700
 #define TG3_EEPROM_SB_EDH_MAJ_SHFT     8
 #define TG3_EEPROM_SB_EDH_MIN_MASK     0x000000ff
 #define  MII_TG3_CTRL_AS_MASTER                0x0800
 #define  MII_TG3_CTRL_ENABLE_AS_MASTER 0x1000
 
+#define MII_TG3_MMD_CTRL               0x0d /* MMD Access Control register */
+#define MII_TG3_MMD_CTRL_DATA_NOINC    0x4000
+#define MII_TG3_MMD_ADDRESS            0x0e /* MMD Address Data register */
+
 #define MII_TG3_EXT_CTRL               0x10 /* Extended control register */
 #define  MII_TG3_EXT_CTRL_FIFO_ELASTIC 0x0001
 #define  MII_TG3_EXT_CTRL_LNK3_LED_MODE        0x0002
 #define MII_TG3_DSP_TAP1               0x0001
 #define  MII_TG3_DSP_TAP1_AGCTGT_DFLT  0x0007
 #define MII_TG3_DSP_AADJ1CH0           0x001f
+#define MII_TG3_DSP_CH34TP2            0x4022
+#define MII_TG3_DSP_CH34TP2_HIBW01     0x0010
 #define MII_TG3_DSP_AADJ1CH3           0x601f
 #define  MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002
 #define MII_TG3_DSP_EXP1_INT_STAT      0x0f01
 #define MII_TG3_TEST1_TRIM_EN          0x0010
 #define MII_TG3_TEST1_CRC_EN           0x8000
 
+/* Clause 45 expansion registers */
+#define TG3_CL45_D7_EEEADV_CAP         0x003c
+#define TG3_CL45_D7_EEEADV_CAP_100TX   0x0002
+#define TG3_CL45_D7_EEEADV_CAP_1000T   0x0004
+#define TG3_CL45_D7_EEERES_STAT                0x803e
+#define TG3_CL45_D7_EEERES_STAT_LP_100TX       0x0002
+#define TG3_CL45_D7_EEERES_STAT_LP_1000T       0x0004
+
 
 /* Fast Ethernet Tranceiver definitions */
 #define MII_TG3_FET_PTEST              0x17
@@ -2986,9 +3021,11 @@ struct tg3 {
 #define TG3_PHYFLG_BER_BUG             0x00008000
 #define TG3_PHYFLG_SERDES_PREEMPHASIS  0x00010000
 #define TG3_PHYFLG_PARALLEL_DETECT     0x00020000
+#define TG3_PHYFLG_EEE_CAP             0x00040000
 
        u32                             led_ctrl;
        u32                             phy_otp;
+       u32                             setlpicnt;
 
 #define TG3_BPN_SIZE                   24
        char                            board_part_number[TG3_BPN_SIZE];
index 6884813b809c642f86775b99d479afa97f8088eb..f1995615dc8459297018e7b517e7c4987c3a8c97 100644 (file)
@@ -312,13 +312,14 @@ VELOCITY_PARAM(flow_control, "Enable flow control ability");
 
 #define MED_LNK_DEF 0
 #define MED_LNK_MIN 0
-#define MED_LNK_MAX 4
+#define MED_LNK_MAX 5
 /* speed_duplex[] is used for setting the speed and duplex mode of NIC.
    0: indicate autonegotiation for both speed and duplex mode
    1: indicate 100Mbps half duplex mode
    2: indicate 100Mbps full duplex mode
    3: indicate 10Mbps half duplex mode
    4: indicate 10Mbps full duplex mode
+   5: indicate 1000Mbps full duplex mode
 
    Note:
    if EEPROM have been set to the force mode, this option is ignored
@@ -617,6 +618,9 @@ static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
        case SPD_DPX_10_HALF:
                status = VELOCITY_SPEED_10;
                break;
+       case SPD_DPX_1000_FULL:
+               status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
+               break;
        }
        vptr->mii_status = status;
        return status;
@@ -922,6 +926,7 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
                /* enable AUTO-NEGO mode */
                mii_set_auto_on(vptr);
        } else {
+               u16 CTRL1000;
                u16 ANAR;
                u8 CHIPGCR;
 
@@ -936,7 +941,11 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
                BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
 
                CHIPGCR = readb(&regs->CHIPGCR);
-               CHIPGCR &= ~CHIPGCR_FCGMII;
+
+               if (mii_status & VELOCITY_SPEED_1000)
+                       CHIPGCR |= CHIPGCR_FCGMII;
+               else
+                       CHIPGCR &= ~CHIPGCR_FCGMII;
 
                if (mii_status & VELOCITY_DUPLEX_FULL) {
                        CHIPGCR |= CHIPGCR_FCFDX;
@@ -952,7 +961,13 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
                                BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
                }
 
-               MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
+               velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000);
+               CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
+               if ((mii_status & VELOCITY_SPEED_1000) &&
+                   (mii_status & VELOCITY_DUPLEX_FULL)) {
+                       CTRL1000 |= ADVERTISE_1000FULL;
+               }
+               velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000);
 
                if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
                        BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
@@ -967,7 +982,7 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
                                ANAR |= ADVERTISE_100FULL;
                        else
                                ANAR |= ADVERTISE_100HALF;
-               } else {
+               } else if (mii_status & VELOCITY_SPEED_10) {
                        if (mii_status & VELOCITY_DUPLEX_FULL)
                                ANAR |= ADVERTISE_10FULL;
                        else
@@ -1013,6 +1028,9 @@ static void velocity_print_link_status(struct velocity_info *vptr)
        } else {
                VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);
                switch (vptr->options.spd_dpx) {
+               case SPD_DPX_1000_FULL:
+                       VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n");
+                       break;
                case SPD_DPX_100_HALF:
                        VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");
                        break;
@@ -3170,6 +3188,37 @@ static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd
                        SUPPORTED_100baseT_Full |
                        SUPPORTED_1000baseT_Half |
                        SUPPORTED_1000baseT_Full;
+
+       cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
+       if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
+               cmd->advertising |=
+                       ADVERTISED_10baseT_Half |
+                       ADVERTISED_10baseT_Full |
+                       ADVERTISED_100baseT_Half |
+                       ADVERTISED_100baseT_Full |
+                       ADVERTISED_1000baseT_Half |
+                       ADVERTISED_1000baseT_Full;
+       } else {
+               switch (vptr->options.spd_dpx) {
+               case SPD_DPX_1000_FULL:
+                       cmd->advertising |= ADVERTISED_1000baseT_Full;
+                       break;
+               case SPD_DPX_100_HALF:
+                       cmd->advertising |= ADVERTISED_100baseT_Half;
+                       break;
+               case SPD_DPX_100_FULL:
+                       cmd->advertising |= ADVERTISED_100baseT_Full;
+                       break;
+               case SPD_DPX_10_HALF:
+                       cmd->advertising |= ADVERTISED_10baseT_Half;
+                       break;
+               case SPD_DPX_10_FULL:
+                       cmd->advertising |= ADVERTISED_10baseT_Full;
+                       break;
+               default:
+                       break;
+               }
+       }
        if (status & VELOCITY_SPEED_1000)
                cmd->speed = SPEED_1000;
        else if (status & VELOCITY_SPEED_100)
@@ -3200,14 +3249,35 @@ static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd
        curr_status &= (~VELOCITY_LINK_FAIL);
 
        new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
+       new_status |= ((cmd->speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
        new_status |= ((cmd->speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
        new_status |= ((cmd->speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
        new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
 
-       if ((new_status & VELOCITY_AUTONEG_ENABLE) && (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE)))
+       if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
+           (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
                ret = -EINVAL;
-       else
+       } else {
+               enum speed_opt spd_dpx;
+
+               if (new_status & VELOCITY_AUTONEG_ENABLE)
+                       spd_dpx = SPD_DPX_AUTO;
+               else if ((new_status & VELOCITY_SPEED_1000) &&
+                        (new_status & VELOCITY_DUPLEX_FULL)) {
+                       spd_dpx = SPD_DPX_1000_FULL;
+               } else if (new_status & VELOCITY_SPEED_100)
+                       spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
+                               SPD_DPX_100_FULL : SPD_DPX_100_HALF;
+               else if (new_status & VELOCITY_SPEED_10)
+                       spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
+                               SPD_DPX_10_FULL : SPD_DPX_10_HALF;
+               else
+                       return -EOPNOTSUPP;
+
+               vptr->options.spd_dpx = spd_dpx;
+
                velocity_set_media_mode(vptr, new_status);
+       }
 
        return ret;
 }
index b5e120b0074b5d379166e290bca557dcc473101d..aa2e69b9ff61301569cf1a8e1b3d62259ace2894 100644 (file)
@@ -848,7 +848,7 @@ enum  velocity_owner {
  *     Bits in CHIPGCR register
  */
 
-#define CHIPGCR_FCGMII      0x80
+#define CHIPGCR_FCGMII      0x80       /* enable GMII mode */
 #define CHIPGCR_FCFDX       0x40
 #define CHIPGCR_FCRESV      0x20
 #define CHIPGCR_FCMODE      0x10
@@ -1390,7 +1390,8 @@ enum speed_opt {
        SPD_DPX_100_HALF = 1,
        SPD_DPX_100_FULL = 2,
        SPD_DPX_10_HALF = 3,
-       SPD_DPX_10_FULL = 4
+       SPD_DPX_10_FULL = 4,
+       SPD_DPX_1000_FULL = 5
 };
 
 enum velocity_init_type {
index dba28268e651931d8b3094438158d15c10d3ca2c..8e20540043f55dfffc4b2096ceee6ca817e994df 100644 (file)
@@ -12,7 +12,6 @@
 /**
  * struct mcp251x_platform_data - MCP251X SPI CAN controller platform data
  * @oscillator_frequency:       - oscillator frequency in Hz
- * @model:                      - actual type of chip
  * @board_specific_setup:       - called before probing the chip (power,reset)
  * @transceiver_enable:         - called to power on/off the transceiver
  * @power_enable:               - called to power on/off the mcp *and* the
@@ -25,9 +24,6 @@
 
 struct mcp251x_platform_data {
        unsigned long oscillator_frequency;
-       int model;
-#define CAN_MCP251X_MCP2510 0x2510
-#define CAN_MCP251X_MCP2515 0x2515
        int (*board_specific_setup)(struct spi_device *spi);
        int (*transceiver_enable)(int enable);
        int (*power_enable) (int enable);
index 50d8009be86c023cbee9607cb728927ba86fb531..79358bb712c6905e474a55b09b1edae58c3a5c43 100644 (file)
@@ -14,7 +14,6 @@
 
 struct netpoll {
        struct net_device *dev;
-       struct net_device *real_dev;
        char dev_name[IFNAMSIZ];
        const char *name;
        void (*rx_hook)(struct netpoll *, int, char *, int);
@@ -53,7 +52,13 @@ void netpoll_set_trap(int trap);
 void __netpoll_cleanup(struct netpoll *np);
 void netpoll_cleanup(struct netpoll *np);
 int __netpoll_rx(struct sk_buff *skb);
-void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
+void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
+                            struct net_device *dev);
+static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
+{
+       netpoll_send_skb_on_dev(np, skb, np->dev);
+}
+
 
 
 #ifdef CONFIG_NETPOLL
index 0b53c43ac92e0a2db2d202ba28071989332eff4c..05a358f1ba11a3035e37e8e064a2983f6276ba87 100644 (file)
@@ -496,13 +496,13 @@ extern struct sk_buff *__alloc_skb(unsigned int size,
 static inline struct sk_buff *alloc_skb(unsigned int size,
                                        gfp_t priority)
 {
-       return __alloc_skb(size, priority, 0, -1);
+       return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
 }
 
 static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
                                               gfp_t priority)
 {
-       return __alloc_skb(size, priority, 1, -1);
+       return __alloc_skb(size, priority, 1, NUMA_NO_NODE);
 }
 
 extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
@@ -1563,13 +1563,25 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
        return skb;
 }
 
-extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask);
+/**
+ *     __netdev_alloc_page - allocate a page for ps-rx on a specific device
+ *     @dev: network device to receive on
+ *     @gfp_mask: alloc_pages_node mask
+ *
+ *     Allocate a new page. dev currently unused.
+ *
+ *     %NULL is returned if there is no free memory.
+ */
+static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
+{
+       return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0);
+}
 
 /**
  *     netdev_alloc_page - allocate a page for ps-rx on a specific device
  *     @dev: network device to receive on
  *
- *     Allocate a new page node local to the specified device.
+ *     Allocate a new page. dev currently unused.
  *
  *     %NULL is returned if there is no free memory.
  */
index bd10a7908993fc369de73957751588d933a9bb12..65af9a07cf766b3fe0309149cd46fcc6495aaffb 100644 (file)
@@ -41,6 +41,8 @@ struct net {
                                                 * destroy on demand
                                                 */
 #endif
+       spinlock_t              rules_mod_lock;
+
        struct list_head        list;           /* list of network namespaces */
        struct list_head        cleanup_list;   /* namespaces on death row */
        struct list_head        exit_list;      /* Use only net_mutex */
@@ -52,7 +54,8 @@ struct net {
        struct ctl_table_set    sysctls;
 #endif
 
-       struct net_device       *loopback_dev;          /* The loopback */
+       struct sock             *rtnl;                  /* rtnetlink socket */
+       struct sock             *genl_sock;
 
        struct list_head        dev_base_head;
        struct hlist_head       *dev_name_head;
@@ -60,11 +63,9 @@ struct net {
 
        /* core fib_rules */
        struct list_head        rules_ops;
-       spinlock_t              rules_mod_lock;
 
-       struct sock             *rtnl;                  /* rtnetlink socket */
-       struct sock             *genl_sock;
 
+       struct net_device       *loopback_dev;          /* The loopback */
        struct netns_core       core;
        struct netns_mib        mib;
        struct netns_packet     packet;
@@ -84,13 +85,15 @@ struct net {
        struct sock             *nfnl;
        struct sock             *nfnl_stash;
 #endif
-#ifdef CONFIG_XFRM
-       struct netns_xfrm       xfrm;
-#endif
 #ifdef CONFIG_WEXT_CORE
        struct sk_buff_head     wext_nlevents;
 #endif
        struct net_generic      *gen;
+
+       /* Note : following structs are cache line aligned */
+#ifdef CONFIG_XFRM
+       struct netns_xfrm       xfrm;
+#endif
 };
 
 
index 74f119a2829a2fe8749e28fdccd99822e58e76dc..748f91f87cd573783efcf59a615292a86b2c289e 100644 (file)
@@ -43,10 +43,6 @@ struct netns_xfrm {
        unsigned int            policy_count[XFRM_POLICY_MAX * 2];
        struct work_struct      policy_hash_work;
 
-       struct dst_ops          xfrm4_dst_ops;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-       struct dst_ops          xfrm6_dst_ops;
-#endif
 
        struct sock             *nlsk;
        struct sock             *nlsk_stash;
@@ -58,6 +54,11 @@ struct netns_xfrm {
 #ifdef CONFIG_SYSCTL
        struct ctl_table_header *sysctl_hdr;
 #endif
+
+       struct dst_ops          xfrm4_dst_ops;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+       struct dst_ops          xfrm6_dst_ops;
+#endif
 };
 
 #endif
index 15af6dca0b493f982f6de117402a19f04c7b03a1..1e0645e1eed22c9b26156c6f285b68f1be38256e 100644 (file)
@@ -50,8 +50,6 @@
  * TIPC operating mode routines
  */
 
-u32 tipc_get_addr(void);
-
 #define TIPC_NOT_RUNNING  0
 #define TIPC_NODE_MODE    1
 #define TIPC_NET_MODE     2
@@ -62,8 +60,6 @@ int tipc_attach(unsigned int *userref, tipc_mode_event, void *usr_handle);
 
 void tipc_detach(unsigned int userref);
 
-int tipc_get_mode(void);
-
 /*
  * TIPC port manipulation routines
  */
@@ -153,12 +149,6 @@ int tipc_disconnect(u32 portref);
 
 int tipc_shutdown(u32 ref);
 
-int tipc_isconnected(u32 portref, int *isconnected);
-
-int tipc_peer(u32 portref, struct tipc_portid *peer);
-
-int tipc_ref_valid(u32 portref); 
-
 /*
  * TIPC messaging routines
  */
@@ -170,38 +160,12 @@ int tipc_send(u32 portref,
              unsigned int num_sect,
              struct iovec const *msg_sect);
 
-int tipc_send_buf(u32 portref,
-                 struct sk_buff *buf,
-                 unsigned int dsz);
-
 int tipc_send2name(u32 portref, 
                   struct tipc_name const *name, 
                   u32 domain,
                   unsigned int num_sect,
                   struct iovec const *msg_sect);
 
-int tipc_send_buf2name(u32 portref,
-                      struct tipc_name const *name,
-                      u32 domain,
-                      struct sk_buff *buf,
-                      unsigned int dsz);
-
-int tipc_forward2name(u32 portref, 
-                     struct tipc_name const *name, 
-                     u32 domain,
-                     unsigned int section_count,
-                     struct iovec const *msg_sect,
-                     struct tipc_portid const *origin,
-                     unsigned int importance);
-
-int tipc_forward_buf2name(u32 portref,
-                         struct tipc_name const *name,
-                         u32 domain,
-                         struct sk_buff *buf,
-                         unsigned int dsz,
-                         struct tipc_portid const *orig,
-                         unsigned int importance);
-
 int tipc_send2port(u32 portref,
                   struct tipc_portid const *dest,
                   unsigned int num_sect,
@@ -212,46 +176,11 @@ int tipc_send_buf2port(u32 portref,
                       struct sk_buff *buf,
                       unsigned int dsz);
 
-int tipc_forward2port(u32 portref,
-                     struct tipc_portid const *dest,
-                     unsigned int num_sect,
-                     struct iovec const *msg_sect,
-                     struct tipc_portid const *origin,
-                     unsigned int importance);
-
-int tipc_forward_buf2port(u32 portref,
-                         struct tipc_portid const *dest,
-                         struct sk_buff *buf,
-                         unsigned int dsz,
-                         struct tipc_portid const *orig,
-                         unsigned int importance);
-
 int tipc_multicast(u32 portref, 
                   struct tipc_name_seq const *seq, 
                   u32 domain,  /* currently unused */
                   unsigned int section_count,
                   struct iovec const *msg);
-
-#if 0
-int tipc_multicast_buf(u32 portref, 
-                      struct tipc_name_seq const *seq, 
-                      u32 domain,
-                      void *buf,
-                      unsigned int size);
-#endif
-
-/*
- * TIPC subscription routines
- */
-
-int tipc_ispublished(struct tipc_name const *name);
-
-/*
- * Get number of available nodes within specified domain (excluding own node)
- */
-
-unsigned int tipc_available_nodes(const u32 domain);
-
 #endif
 
 #endif
index c54917cbfa48fb3ea792db913c51b7fcc823672a..1893aaf49426cc27ac95d292babc27d5c0398e2c 100644 (file)
@@ -88,8 +88,6 @@ void tipc_acknowledge(u32 port_ref,u32 ack);
 
 struct tipc_port *tipc_get_port(const u32 ref);
 
-void *tipc_get_handle(const u32 ref);
-
 /*
  * The following routines require that the port be locked on entry
  */
index 21698f8c49ee7a7c0590fc38ab28e4ad7afaf1fd..1bc3f253ba6c76efe7e8b97c78c2e6a72d771b23 100644 (file)
@@ -494,7 +494,6 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
                        }
                }
 
-               synchronize_rcu();
                notify_rule_change(RTM_DELRULE, rule, ops, nlh,
                                   NETLINK_CB(skb).pid);
                fib_rule_put(rule);
index 537e01afd81baf1e9bc7269e0c97ca3fb3844ffe..4e98ffac3af0259bda8cbadfb9609fa6b57405a8 100644 (file)
@@ -288,11 +288,11 @@ static int netpoll_owner_active(struct net_device *dev)
        return 0;
 }
 
-void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
+void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
+                            struct net_device *dev)
 {
        int status = NETDEV_TX_BUSY;
        unsigned long tries;
-       struct net_device *dev = np->dev;
        const struct net_device_ops *ops = dev->netdev_ops;
        /* It is up to the caller to keep npinfo alive. */
        struct netpoll_info *npinfo = np->dev->npinfo;
@@ -346,7 +346,7 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
                schedule_delayed_work(&npinfo->tx_work,0);
        }
 }
-EXPORT_SYMBOL(netpoll_send_skb);
+EXPORT_SYMBOL(netpoll_send_skb_on_dev);
 
 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
 {
index 752c1972b3a79eb76f83f4906a7c9355622c8b08..4e8b82e167d838a7eb6d5be6784fc0946a99f699 100644 (file)
@@ -247,10 +247,9 @@ EXPORT_SYMBOL(__alloc_skb);
 struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
                unsigned int length, gfp_t gfp_mask)
 {
-       int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
        struct sk_buff *skb;
 
-       skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
+       skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE);
        if (likely(skb)) {
                skb_reserve(skb, NET_SKB_PAD);
                skb->dev = dev;
@@ -259,16 +258,6 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
 }
 EXPORT_SYMBOL(__netdev_alloc_skb);
 
-struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
-{
-       int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
-       struct page *page;
-
-       page = alloc_pages_node(node, gfp_mask, 0);
-       return page;
-}
-EXPORT_SYMBOL(__netdev_alloc_page);
-
 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
                int size)
 {
index c2ff48fa18c723ff110b4ada4e49461225ee8098..dc94b0316b783fd1c1985e407fb0339782c66df5 100644 (file)
@@ -403,6 +403,9 @@ static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
        return inet_insert_ifa(ifa);
 }
 
+/* Caller must hold RCU or RTNL :
+ * We dont take a reference on found in_device
+ */
 struct in_device *inetdev_by_index(struct net *net, int ifindex)
 {
        struct net_device *dev;
@@ -411,7 +414,7 @@ struct in_device *inetdev_by_index(struct net *net, int ifindex)
        rcu_read_lock();
        dev = dev_get_by_index_rcu(net, ifindex);
        if (dev)
-               in_dev = in_dev_get(dev);
+               in_dev = rcu_dereference_rtnl(dev->ip_ptr);
        rcu_read_unlock();
        return in_dev;
 }
@@ -453,8 +456,6 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
                goto errout;
        }
 
-       __in_dev_put(in_dev);
-
        for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
             ifap = &ifa->ifa_next) {
                if (tb[IFA_LOCAL] &&
index 919f2ad19b4973eecfec44b39a9c6376e751ecae..36e27c2107de9f8286e7848c9b0dfe3560ab2907 100644 (file)
@@ -153,7 +153,7 @@ static void fib_flush(struct net *net)
  * @addr: the source address
  * @devref: if true, take a reference on the found device
  *
- * If a caller uses devref=false, it should be protected by RCU
+ * If a caller uses devref=false, it should be protected by RCU, or RTNL
  */
 struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
 {
@@ -1016,16 +1016,15 @@ static struct notifier_block fib_netdev_notifier = {
 static int __net_init ip_fib_net_init(struct net *net)
 {
        int err;
-       unsigned int i;
+       size_t size = sizeof(struct hlist_head) * FIB_TABLE_HASHSZ;
+
+       /* Avoid false sharing : Use at least a full cache line */
+       size = max_t(size_t, size, L1_CACHE_BYTES);
 
-       net->ipv4.fib_table_hash = kzalloc(
-                       sizeof(struct hlist_head)*FIB_TABLE_HASHSZ, GFP_KERNEL);
+       net->ipv4.fib_table_hash = kzalloc(size, GFP_KERNEL);
        if (net->ipv4.fib_table_hash == NULL)
                return -ENOMEM;
 
-       for (i = 0; i < FIB_TABLE_HASHSZ; i++)
-               INIT_HLIST_HEAD(&net->ipv4.fib_table_hash[i]);
-
        err = fib4_rules_init(net);
        if (err < 0)
                goto fail;
index 83cca68e259c420b8c84c1a520e3c9b3fbe31c5c..4f1aafd3ba89f92a65d01488a37591c35b0f669c 100644 (file)
@@ -54,36 +54,37 @@ struct fib_node {
        struct fib_alias        fn_embedded_alias;
 };
 
-struct fn_zone {
-       struct fn_zone          *fz_next;       /* Next not empty zone  */
-       struct hlist_head       *fz_hash;       /* Hash table pointer   */
-       int                     fz_nent;        /* Number of entries    */
+#define EMBEDDED_HASH_SIZE (L1_CACHE_BYTES / sizeof(struct hlist_head))
 
-       int                     fz_divisor;     /* Hash divisor         */
+struct fn_zone {
+       struct fn_zone __rcu    *fz_next;       /* Next not empty zone  */
+       struct hlist_head __rcu *fz_hash;       /* Hash table pointer   */
+       seqlock_t               fz_lock;
        u32                     fz_hashmask;    /* (fz_divisor - 1)     */
-#define FZ_HASHMASK(fz)                ((fz)->fz_hashmask)
 
-       int                     fz_order;       /* Zone order           */
-       __be32                  fz_mask;
+       u8                      fz_order;       /* Zone order (0..32)   */
+       u8                      fz_revorder;    /* 32 - fz_order        */
+       __be32                  fz_mask;        /* inet_make_mask(order) */
 #define FZ_MASK(fz)            ((fz)->fz_mask)
-};
 
-/* NOTE. On fast computers evaluation of fz_hashmask and fz_mask
- * can be cheaper than memory lookup, so that FZ_* macros are used.
- */
+       struct hlist_head       fz_embedded_hash[EMBEDDED_HASH_SIZE];
+
+       int                     fz_nent;        /* Number of entries    */
+       int                     fz_divisor;     /* Hash size (mask+1)   */
+};
 
 struct fn_hash {
-       struct fn_zone  *fn_zones[33];
-       struct fn_zone  *fn_zone_list;
+       struct fn_zone          *fn_zones[33];
+       struct fn_zone __rcu    *fn_zone_list;
 };
 
 static inline u32 fn_hash(__be32 key, struct fn_zone *fz)
 {
-       u32 h = ntohl(key)>>(32 - fz->fz_order);
+       u32 h = ntohl(key) >> fz->fz_revorder;
        h ^= (h>>20);
        h ^= (h>>10);
        h ^= (h>>5);
-       h &= FZ_HASHMASK(fz);
+       h &= fz->fz_hashmask;
        return h;
 }
 
@@ -92,7 +93,6 @@ static inline __be32 fz_key(__be32 dst, struct fn_zone *fz)
        return dst & FZ_MASK(fz);
 }
 
-static DEFINE_RWLOCK(fib_hash_lock);
 static unsigned int fib_hash_genid;
 
 #define FZ_MAX_DIVISOR ((PAGE_SIZE<<MAX_ORDER) / sizeof(struct hlist_head))
@@ -101,12 +101,11 @@ static struct hlist_head *fz_hash_alloc(int divisor)
 {
        unsigned long size = divisor * sizeof(struct hlist_head);
 
-       if (size <= PAGE_SIZE) {
+       if (size <= PAGE_SIZE)
                return kzalloc(size, GFP_KERNEL);
-       } else {
-               return (struct hlist_head *)
-                       __get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(size));
-       }
+
+       return (struct hlist_head *)
+               __get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(size));
 }
 
 /* The fib hash lock must be held when this is called. */
@@ -121,12 +120,12 @@ static inline void fn_rebuild_zone(struct fn_zone *fz,
                struct fib_node *f;
 
                hlist_for_each_entry_safe(f, node, n, &old_ht[i], fn_hash) {
-                       struct hlist_head *new_head;
+                       struct hlist_head __rcu *new_head;
 
-                       hlist_del(&f->fn_hash);
+                       hlist_del_rcu(&f->fn_hash);
 
                        new_head = &fz->fz_hash[fn_hash(f->fn_key, fz)];
-                       hlist_add_head(&f->fn_hash, new_head);
+                       hlist_add_head_rcu(&f->fn_hash, new_head);
                }
        }
 }
@@ -147,14 +146,14 @@ static void fn_rehash_zone(struct fn_zone *fz)
        int old_divisor, new_divisor;
        u32 new_hashmask;
 
-       old_divisor = fz->fz_divisor;
+       new_divisor = old_divisor = fz->fz_divisor;
 
        switch (old_divisor) {
-       case 16:
-               new_divisor = 256;
+       case EMBEDDED_HASH_SIZE:
+               new_divisor *= EMBEDDED_HASH_SIZE;
                break;
-       case 256:
-               new_divisor = 1024;
+       case EMBEDDED_HASH_SIZE*EMBEDDED_HASH_SIZE:
+               new_divisor *= (EMBEDDED_HASH_SIZE/2);
                break;
        default:
                if ((old_divisor << 1) > FZ_MAX_DIVISOR) {
@@ -175,31 +174,55 @@ static void fn_rehash_zone(struct fn_zone *fz)
        ht = fz_hash_alloc(new_divisor);
 
        if (ht) {
-               write_lock_bh(&fib_hash_lock);
+               struct fn_zone nfz;
+
+               memcpy(&nfz, fz, sizeof(nfz));
+
+               write_seqlock_bh(&fz->fz_lock);
                old_ht = fz->fz_hash;
-               fz->fz_hash = ht;
+               nfz.fz_hash = ht;
+               nfz.fz_hashmask = new_hashmask;
+               nfz.fz_divisor = new_divisor;
+               fn_rebuild_zone(&nfz, old_ht, old_divisor);
+               fib_hash_genid++;
+               rcu_assign_pointer(fz->fz_hash, ht);
                fz->fz_hashmask = new_hashmask;
                fz->fz_divisor = new_divisor;
-               fn_rebuild_zone(fz, old_ht, old_divisor);
-               fib_hash_genid++;
-               write_unlock_bh(&fib_hash_lock);
+               write_sequnlock_bh(&fz->fz_lock);
 
-               fz_hash_free(old_ht, old_divisor);
+               if (old_ht != fz->fz_embedded_hash) {
+                       synchronize_rcu();
+                       fz_hash_free(old_ht, old_divisor);
+               }
        }
 }
 
-static inline void fn_free_node(struct fib_node * f)
+static void fn_free_node_rcu(struct rcu_head *head)
 {
+       struct fib_node *f = container_of(head, struct fib_node, fn_embedded_alias.rcu);
+
        kmem_cache_free(fn_hash_kmem, f);
 }
 
+static inline void fn_free_node(struct fib_node *f)
+{
+       call_rcu(&f->fn_embedded_alias.rcu, fn_free_node_rcu);
+}
+
+static void fn_free_alias_rcu(struct rcu_head *head)
+{
+       struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
+
+       kmem_cache_free(fn_alias_kmem, fa);
+}
+
 static inline void fn_free_alias(struct fib_alias *fa, struct fib_node *f)
 {
        fib_release_info(fa->fa_info);
        if (fa == &f->fn_embedded_alias)
                fa->fa_info = NULL;
        else
-               kmem_cache_free(fn_alias_kmem, fa);
+               call_rcu(&fa->rcu, fn_free_alias_rcu);
 }
 
 static struct fn_zone *
@@ -210,36 +233,30 @@ fn_new_zone(struct fn_hash *table, int z)
        if (!fz)
                return NULL;
 
-       if (z) {
-               fz->fz_divisor = 16;
-       } else {
-               fz->fz_divisor = 1;
-       }
-       fz->fz_hashmask = (fz->fz_divisor - 1);
-       fz->fz_hash = fz_hash_alloc(fz->fz_divisor);
-       if (!fz->fz_hash) {
-               kfree(fz);
-               return NULL;
-       }
+       seqlock_init(&fz->fz_lock);
+       fz->fz_divisor = z ? EMBEDDED_HASH_SIZE : 1;
+       fz->fz_hashmask = fz->fz_divisor - 1;
+       fz->fz_hash = fz->fz_embedded_hash;
        fz->fz_order = z;
+       fz->fz_revorder = 32 - z;
        fz->fz_mask = inet_make_mask(z);
 
        /* Find the first not empty zone with more specific mask */
-       for (i=z+1; i<=32; i++)
+       for (i = z + 1; i <= 32; i++)
                if (table->fn_zones[i])
                        break;
-       write_lock_bh(&fib_hash_lock);
-       if (i>32) {
+       if (i > 32) {
                /* No more specific masks, we are the first. */
-               fz->fz_next = table->fn_zone_list;
-               table->fn_zone_list = fz;
+               rcu_assign_pointer(fz->fz_next,
+                                  rtnl_dereference(table->fn_zone_list));
+               rcu_assign_pointer(table->fn_zone_list, fz);
        } else {
-               fz->fz_next = table->fn_zones[i]->fz_next;
-               table->fn_zones[i]->fz_next = fz;
+               rcu_assign_pointer(fz->fz_next,
+                                  rtnl_dereference(table->fn_zones[i]->fz_next));
+               rcu_assign_pointer(table->fn_zones[i]->fz_next, fz);
        }
        table->fn_zones[z] = fz;
        fib_hash_genid++;
-       write_unlock_bh(&fib_hash_lock);
        return fz;
 }
 
@@ -251,28 +268,36 @@ int fib_table_lookup(struct fib_table *tb,
        struct fn_zone *fz;
        struct fn_hash *t = (struct fn_hash *)tb->tb_data;
 
-       read_lock(&fib_hash_lock);
-       for (fz = t->fn_zone_list; fz; fz = fz->fz_next) {
-               struct hlist_head *head;
+       rcu_read_lock();
+       for (fz = rcu_dereference(t->fn_zone_list);
+            fz != NULL;
+            fz = rcu_dereference(fz->fz_next)) {
+               struct hlist_head __rcu *head;
                struct hlist_node *node;
                struct fib_node *f;
-               __be32 k = fz_key(flp->fl4_dst, fz);
+               __be32 k;
+               unsigned int seq;
 
-               head = &fz->fz_hash[fn_hash(k, fz)];
-               hlist_for_each_entry(f, node, head, fn_hash) {
-                       if (f->fn_key != k)
-                               continue;
+               do {
+                       seq = read_seqbegin(&fz->fz_lock);
+                       k = fz_key(flp->fl4_dst, fz);
 
-                       err = fib_semantic_match(&f->fn_alias,
+                       head = &fz->fz_hash[fn_hash(k, fz)];
+                       hlist_for_each_entry_rcu(f, node, head, fn_hash) {
+                               if (f->fn_key != k)
+                                       continue;
+
+                               err = fib_semantic_match(&f->fn_alias,
                                                 flp, res,
                                                 fz->fz_order, fib_flags);
-                       if (err <= 0)
-                               goto out;
-               }
+                               if (err <= 0)
+                                       goto out;
+                       }
+               } while (read_seqretry(&fz->fz_lock, seq));
        }
        err = 1;
 out:
-       read_unlock(&fib_hash_lock);
+       rcu_read_unlock();
        return err;
 }
 
@@ -294,11 +319,11 @@ void fib_table_select_default(struct fib_table *tb,
        last_resort = NULL;
        order = -1;
 
-       read_lock(&fib_hash_lock);
-       hlist_for_each_entry(f, node, &fz->fz_hash[0], fn_hash) {
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(f, node, &fz->fz_hash[0], fn_hash) {
                struct fib_alias *fa;
 
-               list_for_each_entry(fa, &f->fn_alias, fa_list) {
+               list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) {
                        struct fib_info *next_fi = fa->fa_info;
 
                        if (fa->fa_scope != res->scope ||
@@ -342,7 +367,7 @@ void fib_table_select_default(struct fib_table *tb,
                fib_result_assign(res, last_resort);
        tb->tb_default = last_idx;
 out:
-       read_unlock(&fib_hash_lock);
+       rcu_read_unlock();
 }
 
 /* Insert node F to FZ. */
@@ -350,7 +375,7 @@ static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f)
 {
        struct hlist_head *head = &fz->fz_hash[fn_hash(f->fn_key, fz)];
 
-       hlist_add_head(&f->fn_hash, head);
+       hlist_add_head_rcu(&f->fn_hash, head);
 }
 
 /* Return the node in FZ matching KEY. */
@@ -360,7 +385,7 @@ static struct fib_node *fib_find_node(struct fn_zone *fz, __be32 key)
        struct hlist_node *node;
        struct fib_node *f;
 
-       hlist_for_each_entry(f, node, head, fn_hash) {
+       hlist_for_each_entry_rcu(f, node, head, fn_hash) {
                if (f->fn_key == key)
                        return f;
        }
@@ -368,6 +393,17 @@ static struct fib_node *fib_find_node(struct fn_zone *fz, __be32 key)
        return NULL;
 }
 
+
+static struct fib_alias *fib_fast_alloc(struct fib_node *f)
+{
+       struct fib_alias *fa = &f->fn_embedded_alias;
+
+       if (fa->fa_info != NULL)
+               fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
+       return fa;
+}
+
+/* Caller must hold RTNL. */
 int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
 {
        struct fn_hash *table = (struct fn_hash *) tb->tb_data;
@@ -452,7 +488,6 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
                }
 
                if (cfg->fc_nlflags & NLM_F_REPLACE) {
-                       struct fib_info *fi_drop;
                        u8 state;
 
                        fa = fa_first;
@@ -461,21 +496,25 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
                                        err = 0;
                                goto out;
                        }
-                       write_lock_bh(&fib_hash_lock);
-                       fi_drop = fa->fa_info;
-                       fa->fa_info = fi;
-                       fa->fa_type = cfg->fc_type;
-                       fa->fa_scope = cfg->fc_scope;
+                       err = -ENOBUFS;
+                       new_fa = fib_fast_alloc(f);
+                       if (new_fa == NULL)
+                               goto out;
+
+                       new_fa->fa_tos = fa->fa_tos;
+                       new_fa->fa_info = fi;
+                       new_fa->fa_type = cfg->fc_type;
+                       new_fa->fa_scope = cfg->fc_scope;
                        state = fa->fa_state;
-                       fa->fa_state &= ~FA_S_ACCESSED;
+                       new_fa->fa_state = state & ~FA_S_ACCESSED;
                        fib_hash_genid++;
-                       write_unlock_bh(&fib_hash_lock);
+                       list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
 
-                       fib_release_info(fi_drop);
+                       fn_free_alias(fa, f);
                        if (state & FA_S_ACCESSED)
                                rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
-                       rtmsg_fib(RTM_NEWROUTE, key, fa, cfg->fc_dst_len, tb->tb_id,
-                                 &cfg->fc_nlinfo, NLM_F_REPLACE);
+                       rtmsg_fib(RTM_NEWROUTE, key, new_fa, cfg->fc_dst_len,
+                                 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
                        return 0;
                }
 
@@ -507,12 +546,10 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
                f = new_f;
        }
 
-       new_fa = &f->fn_embedded_alias;
-       if (new_fa->fa_info != NULL) {
-               new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
-               if (new_fa == NULL)
-                       goto out;
-       }
+       new_fa = fib_fast_alloc(f);
+       if (new_fa == NULL)
+               goto out;
+
        new_fa->fa_info = fi;
        new_fa->fa_tos = tos;
        new_fa->fa_type = cfg->fc_type;
@@ -523,13 +560,11 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
         * Insert new entry to the list.
         */
 
-       write_lock_bh(&fib_hash_lock);
        if (new_f)
                fib_insert_node(fz, new_f);
-       list_add_tail(&new_fa->fa_list,
+       list_add_tail_rcu(&new_fa->fa_list,
                 (fa ? &fa->fa_list : &f->fn_alias));
        fib_hash_genid++;
-       write_unlock_bh(&fib_hash_lock);
 
        if (new_f)
                fz->fz_nent++;
@@ -604,14 +639,12 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
                          tb->tb_id, &cfg->fc_nlinfo, 0);
 
                kill_fn = 0;
-               write_lock_bh(&fib_hash_lock);
-               list_del(&fa->fa_list);
+               list_del_rcu(&fa->fa_list);
                if (list_empty(&f->fn_alias)) {
-                       hlist_del(&f->fn_hash);
+                       hlist_del_rcu(&f->fn_hash);
                        kill_fn = 1;
                }
                fib_hash_genid++;
-               write_unlock_bh(&fib_hash_lock);
 
                if (fa->fa_state & FA_S_ACCESSED)
                        rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
@@ -642,14 +675,12 @@ static int fn_flush_list(struct fn_zone *fz, int idx)
                        struct fib_info *fi = fa->fa_info;
 
                        if (fi && (fi->fib_flags&RTNH_F_DEAD)) {
-                               write_lock_bh(&fib_hash_lock);
-                               list_del(&fa->fa_list);
+                               list_del_rcu(&fa->fa_list);
                                if (list_empty(&f->fn_alias)) {
-                                       hlist_del(&f->fn_hash);
+                                       hlist_del_rcu(&f->fn_hash);
                                        kill_f = 1;
                                }
                                fib_hash_genid++;
-                               write_unlock_bh(&fib_hash_lock);
 
                                fn_free_alias(fa, f);
                                found++;
@@ -663,13 +694,16 @@ static int fn_flush_list(struct fn_zone *fz, int idx)
        return found;
 }
 
+/* caller must hold RTNL. */
 int fib_table_flush(struct fib_table *tb)
 {
        struct fn_hash *table = (struct fn_hash *) tb->tb_data;
        struct fn_zone *fz;
        int found = 0;
 
-       for (fz = table->fn_zone_list; fz; fz = fz->fz_next) {
+       for (fz = rtnl_dereference(table->fn_zone_list);
+            fz != NULL;
+            fz = rtnl_dereference(fz->fz_next)) {
                int i;
 
                for (i = fz->fz_divisor - 1; i >= 0; i--)
@@ -691,10 +725,10 @@ fn_hash_dump_bucket(struct sk_buff *skb, struct netlink_callback *cb,
 
        s_i = cb->args[4];
        i = 0;
-       hlist_for_each_entry(f, node, head, fn_hash) {
+       hlist_for_each_entry_rcu(f, node, head, fn_hash) {
                struct fib_alias *fa;
 
-               list_for_each_entry(fa, &f->fn_alias, fa_list) {
+               list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) {
                        if (i < s_i)
                                goto next;
 
@@ -712,7 +746,7 @@ fn_hash_dump_bucket(struct sk_buff *skb, struct netlink_callback *cb,
                                cb->args[4] = i;
                                return -1;
                        }
-               next:
+next:
                        i++;
                }
        }
@@ -747,23 +781,26 @@ fn_hash_dump_zone(struct sk_buff *skb, struct netlink_callback *cb,
 int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
                   struct netlink_callback *cb)
 {
-       int m, s_m;
+       int m = 0, s_m;
        struct fn_zone *fz;
        struct fn_hash *table = (struct fn_hash *)tb->tb_data;
 
        s_m = cb->args[2];
-       read_lock(&fib_hash_lock);
-       for (fz = table->fn_zone_list, m=0; fz; fz = fz->fz_next, m++) {
-               if (m < s_m) continue;
+       rcu_read_lock();
+       for (fz = rcu_dereference(table->fn_zone_list);
+            fz != NULL;
+            fz = rcu_dereference(fz->fz_next), m++) {
+               if (m < s_m)
+                       continue;
                if (fn_hash_dump_zone(skb, cb, tb, fz) < 0) {
                        cb->args[2] = m;
-                       read_unlock(&fib_hash_lock);
+                       rcu_read_unlock();
                        return -1;
                }
                memset(&cb->args[3], 0,
                       sizeof(cb->args) - 3*sizeof(cb->args[0]));
        }
-       read_unlock(&fib_hash_lock);
+       rcu_read_unlock();
        cb->args[2] = m;
        return skb->len;
 }
@@ -826,8 +863,9 @@ static struct fib_alias *fib_get_first(struct seq_file *seq)
        iter->genid     = fib_hash_genid;
        iter->valid     = 1;
 
-       for (iter->zone = table->fn_zone_list; iter->zone;
-            iter->zone = iter->zone->fz_next) {
+       for (iter->zone = rcu_dereference(table->fn_zone_list);
+            iter->zone != NULL;
+            iter->zone = rcu_dereference(iter->zone->fz_next)) {
                int maxslot;
 
                if (!iter->zone->fz_nent)
@@ -912,7 +950,7 @@ static struct fib_alias *fib_get_next(struct seq_file *seq)
                        }
                }
 
-               iter->zone = iter->zone->fz_next;
+               iter->zone = rcu_dereference(iter->zone->fz_next);
 
                if (!iter->zone)
                        goto out;
@@ -951,11 +989,11 @@ static struct fib_alias *fib_get_idx(struct seq_file *seq, loff_t pos)
 }
 
 static void *fib_seq_start(struct seq_file *seq, loff_t *pos)
-       __acquires(fib_hash_lock)
+       __acquires(RCU)
 {
        void *v = NULL;
 
-       read_lock(&fib_hash_lock);
+       rcu_read_lock();
        if (fib_get_table(seq_file_net(seq), RT_TABLE_MAIN))
                v = *pos ? fib_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
        return v;
@@ -968,15 +1006,16 @@ static void *fib_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 }
 
 static void fib_seq_stop(struct seq_file *seq, void *v)
-       __releases(fib_hash_lock)
+       __releases(RCU)
 {
-       read_unlock(&fib_hash_lock);
+       rcu_read_unlock();
 }
 
 static unsigned fib_flag_trans(int type, __be32 mask, struct fib_info *fi)
 {
        static const unsigned type2flags[RTN_MAX + 1] = {
-               [7] = RTF_REJECT, [8] = RTF_REJECT,
+               [7] = RTF_REJECT,
+               [8] = RTF_REJECT,
        };
        unsigned flags = type2flags[type];
 
index b9c9a9f2aee54f50f14f7594f119f46b3631fe3e..5072d8effd5d21a75a6b63494f3630bfb48de921 100644 (file)
@@ -12,9 +12,7 @@ struct fib_alias {
        u8                      fa_type;
        u8                      fa_scope;
        u8                      fa_state;
-#ifdef CONFIG_IP_FIB_TRIE
        struct rcu_head         rcu;
-#endif
 };
 
 #define FA_S_ACCESSED  0x01
index 0f80dfc2f7fb49a4336329b48935aa937669351e..6734c9cab24852330a838b3154314bafb3e03a6c 100644 (file)
@@ -590,32 +590,29 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
                if (!dev)
                        goto out;
                dev_hold(dev);
-               err = -ENETDOWN;
-               if (!(dev->flags & IFF_UP))
-                       goto out;
-               err = 0;
-out:
-               rcu_read_unlock();
-               return err;
+               err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN;
        } else {
                struct in_device *in_dev;
 
                if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK))
                        return -EINVAL;
 
+               rcu_read_lock();
+               err = -ENODEV;
                in_dev = inetdev_by_index(net, nh->nh_oif);
                if (in_dev == NULL)
-                       return -ENODEV;
-               if (!(in_dev->dev->flags & IFF_UP)) {
-                       in_dev_put(in_dev);
-                       return -ENETDOWN;
-               }
+                       goto out;
+               err = -ENETDOWN;
+               if (!(in_dev->dev->flags & IFF_UP))
+                       goto out;
                nh->nh_dev = in_dev->dev;
                dev_hold(nh->nh_dev);
                nh->nh_scope = RT_SCOPE_HOST;
-               in_dev_put(in_dev);
+               err = 0;
        }
-       return 0;
+out:
+       rcu_read_unlock();
+       return err;
 }
 
 static inline unsigned int fib_laddr_hashfn(__be32 val)
index 271c89bdf049706dbc8288457c308f437a427205..31494f33568638a4d540a90bc5541ee3977f9911 100644 (file)
@@ -1384,8 +1384,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
        t_key cindex = 0;
        int current_prefix_length = KEYLENGTH;
        struct tnode *cn;
-       t_key node_prefix, key_prefix, pref_mismatch;
-       int mp;
+       t_key pref_mismatch;
 
        rcu_read_lock();
 
@@ -1500,10 +1499,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
                 * matching prefix.
                 */
 
-               node_prefix = mask_pfx(cn->key, cn->pos);
-               key_prefix = mask_pfx(key, cn->pos);
-               pref_mismatch = key_prefix^node_prefix;
-               mp = 0;
+               pref_mismatch = mask_pfx(cn->key ^ key, cn->pos);
 
                /*
                 * In short: If skipped bits in this node do not match
@@ -1511,13 +1507,9 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
                 * state.directly.
                 */
                if (pref_mismatch) {
-                       while (!(pref_mismatch & (1<<(KEYLENGTH-1)))) {
-                               mp++;
-                               pref_mismatch = pref_mismatch << 1;
-                       }
-                       key_prefix = tkey_extract_bits(cn->key, mp, cn->pos-mp);
+                       int mp = KEYLENGTH - fls(pref_mismatch);
 
-                       if (key_prefix != 0)
+                       if (tkey_extract_bits(cn->key, mp, cn->pos - mp) != 0)
                                goto backtrace;
 
                        if (current_prefix_length >= cn->pos)
index 25f339672b2891347ffed75adc3d91fb6486598b..c8877c6c72164ccaee2af4def0025f8300bc7e80 100644 (file)
@@ -1418,6 +1418,7 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
        write_unlock_bh(&in_dev->mc_list_lock);
 }
 
+/* RTNL is locked */
 static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
 {
        struct flowi fl = { .nl_u = { .ip4_u =
@@ -1428,15 +1429,12 @@ static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
 
        if (imr->imr_ifindex) {
                idev = inetdev_by_index(net, imr->imr_ifindex);
-               if (idev)
-                       __in_dev_put(idev);
                return idev;
        }
        if (imr->imr_address.s_addr) {
-               dev = ip_dev_find(net, imr->imr_address.s_addr);
+               dev = __ip_dev_find(net, imr->imr_address.s_addr, false);
                if (!dev)
                        return NULL;
-               dev_put(dev);
        }
 
        if (!dev && !ip_route_output_key(net, &rt, &fl)) {
index 9d421f4cf3efbf41f52d1bd9d0a43c5b6aa2313d..d0ffcbe369b76b4a000c5f5e23b6e61dc64fb3dd 100644 (file)
@@ -1245,10 +1245,8 @@ static int ipgre_close(struct net_device *dev)
        if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
                struct in_device *in_dev;
                in_dev = inetdev_by_index(dev_net(dev), t->mlink);
-               if (in_dev) {
+               if (in_dev)
                        ip_mc_dec_group(in_dev, t->parms.iph.daddr);
-                       in_dev_put(in_dev);
-               }
        }
        return 0;
 }
index 0755aa4af86ca8c6cd2390c371c04f749c2a204f..ff98983d2a45465dade3905ce877430d91150d80 100644 (file)
@@ -2124,7 +2124,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
            ipv4_is_loopback(saddr))
                goto martian_source;
 
-       if (daddr == htonl(0xFFFFFFFF) || (saddr == 0 && daddr == 0))
+       if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
                goto brd_input;
 
        /* Accept zero addresses only to limited broadcast;
@@ -2133,8 +2133,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
        if (ipv4_is_zeronet(saddr))
                goto martian_source;
 
-       if (ipv4_is_lbcast(daddr) || ipv4_is_zeronet(daddr) ||
-           ipv4_is_loopback(daddr))
+       if (ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr))
                goto martian_destination;
 
        /*
@@ -2367,11 +2366,11 @@ static int __mkroute_output(struct rtable **result,
        if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags & IFF_LOOPBACK))
                return -EINVAL;
 
-       if (fl->fl4_dst == htonl(0xFFFFFFFF))
+       if (ipv4_is_lbcast(fl->fl4_dst))
                res->type = RTN_BROADCAST;
        else if (ipv4_is_multicast(fl->fl4_dst))
                res->type = RTN_MULTICAST;
-       else if (ipv4_is_lbcast(fl->fl4_dst) || ipv4_is_zeronet(fl->fl4_dst))
+       else if (ipv4_is_zeronet(fl->fl4_dst))
                return -EINVAL;
 
        if (dev_out->flags & IFF_LOOPBACK)
@@ -2530,7 +2529,7 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
 
                if (oldflp->oif == 0 &&
                    (ipv4_is_multicast(oldflp->fl4_dst) ||
-                    oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
+                    ipv4_is_lbcast(oldflp->fl4_dst))) {
                        /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
                        dev_out = __ip_dev_find(net, oldflp->fl4_src, false);
                        if (dev_out == NULL)
@@ -2574,7 +2573,7 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
                        goto out;       /* Wrong error code */
 
                if (ipv4_is_local_multicast(oldflp->fl4_dst) ||
-                   oldflp->fl4_dst == htonl(0xFFFFFFFF)) {
+                   ipv4_is_lbcast(oldflp->fl4_dst)) {
                        if (!fl.fl4_src)
                                fl.fl4_src = inet_select_addr(dev_out, 0,
                                                              RT_SCOPE_LINK);
index e4fbdae066d5f3a41d874646edfb62f630541e12..ee0df48174989093e24862f61ae316d573cb1703 100644 (file)
@@ -2495,7 +2495,7 @@ static void tcp_timeout_skbs(struct sock *sk)
 /* Mark head of queue up as lost. With RFC3517 SACK, the packets is
  * is against sacked "cnt", otherwise it's against facked "cnt"
  */
-static void tcp_mark_head_lost(struct sock *sk, int packets)
+static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
@@ -2503,13 +2503,13 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
        int err;
        unsigned int mss;
 
-       if (packets == 0)
-               return;
-
        WARN_ON(packets > tp->packets_out);
        if (tp->lost_skb_hint) {
                skb = tp->lost_skb_hint;
                cnt = tp->lost_cnt_hint;
+               /* Head already handled? */
+               if (mark_head && skb != tcp_write_queue_head(sk))
+                       return;
        } else {
                skb = tcp_write_queue_head(sk);
                cnt = 0;
@@ -2544,6 +2544,9 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
                }
 
                tcp_skb_mark_lost(tp, skb);
+
+               if (mark_head)
+                       break;
        }
        tcp_verify_left_out(tp);
 }
@@ -2555,17 +2558,18 @@ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
        struct tcp_sock *tp = tcp_sk(sk);
 
        if (tcp_is_reno(tp)) {
-               tcp_mark_head_lost(sk, 1);
+               tcp_mark_head_lost(sk, 1, 1);
        } else if (tcp_is_fack(tp)) {
                int lost = tp->fackets_out - tp->reordering;
                if (lost <= 0)
                        lost = 1;
-               tcp_mark_head_lost(sk, lost);
+               tcp_mark_head_lost(sk, lost, 0);
        } else {
                int sacked_upto = tp->sacked_out - tp->reordering;
-               if (sacked_upto < fast_rexmit)
-                       sacked_upto = fast_rexmit;
-               tcp_mark_head_lost(sk, sacked_upto);
+               if (sacked_upto >= 0)
+                       tcp_mark_head_lost(sk, sacked_upto, 0);
+               else if (fast_rexmit)
+                       tcp_mark_head_lost(sk, 1, 1);
        }
 
        tcp_timeout_skbs(sk);
@@ -2971,7 +2975,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
            before(tp->snd_una, tp->high_seq) &&
            icsk->icsk_ca_state != TCP_CA_Open &&
            tp->fackets_out > tp->reordering) {
-               tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering);
+               tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering, 0);
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS);
        }
 
index f3c8c6c019ae9790d0d1c79bb695d1cb5ac8ff25..74a6aa003657392d45d97f040ba6a987ee413004 100644 (file)
@@ -367,18 +367,19 @@ void tcp_retransmit_timer(struct sock *sk)
        if (icsk->icsk_retransmits == 0) {
                int mib_idx;
 
-               if (icsk->icsk_ca_state == TCP_CA_Disorder) {
-                       if (tcp_is_sack(tp))
-                               mib_idx = LINUX_MIB_TCPSACKFAILURES;
-                       else
-                               mib_idx = LINUX_MIB_TCPRENOFAILURES;
-               } else if (icsk->icsk_ca_state == TCP_CA_Recovery) {
+               if (icsk->icsk_ca_state == TCP_CA_Recovery) {
                        if (tcp_is_sack(tp))
                                mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
                        else
                                mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
                } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
                        mib_idx = LINUX_MIB_TCPLOSSFAILURES;
+               } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
+                          tp->sacked_out) {
+                       if (tcp_is_sack(tp))
+                               mib_idx = LINUX_MIB_TCPSACKFAILURES;
+                       else
+                               mib_idx = LINUX_MIB_TCPRENOFAILURES;
                } else {
                        mib_idx = LINUX_MIB_TCPTIMEOUTS;
                }
index b1108ede18e1c4cec4eea90f9741441add7047fb..d829874d8946e75e6735fd8df4c0ad6facf58bc0 100644 (file)
@@ -34,11 +34,10 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi *fl,
 {
        struct fib_lookup_arg arg = {
                .lookup_ptr = lookup,
+               .flags = FIB_LOOKUP_NOREF,
        };
 
        fib_rules_lookup(net->ipv6.fib6_rules_ops, fl, flags, &arg);
-       if (arg.rule)
-               fib_rule_put(arg.rule);
 
        if (arg.result)
                return arg.result;
index b6a585909d3560d0f9c0396fe8ed4b136c0ef5f6..de382114609b7de8d0cbf5c5447a18d4f5a89a2d 100644 (file)
@@ -1500,15 +1500,18 @@ static void fib6_gc_timer_cb(unsigned long arg)
 
 static int __net_init fib6_net_init(struct net *net)
 {
+       size_t size = sizeof(struct hlist_head) * FIB6_TABLE_HASHSZ;
+
        setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net);
 
        net->ipv6.rt6_stats = kzalloc(sizeof(*net->ipv6.rt6_stats), GFP_KERNEL);
        if (!net->ipv6.rt6_stats)
                goto out_timer;
 
-       net->ipv6.fib_table_hash = kcalloc(FIB6_TABLE_HASHSZ,
-                                          sizeof(*net->ipv6.fib_table_hash),
-                                          GFP_KERNEL);
+       /* Avoid false sharing : Use at least a full cache line */
+       size = max_t(size_t, size, L1_CACHE_BYTES);
+
+       net->ipv6.fib_table_hash = kzalloc(size, GFP_KERNEL);
        if (!net->ipv6.fib_table_hash)
                goto out_rt6_stats;
 
index 9c903f9e5079a303b691182d26df542ac8cd5b0f..3e60f2e4e6c2d1e5982b2a56aed64fc38aa49259 100644 (file)
@@ -300,7 +300,6 @@ static int pipe_handler_send_ind(struct sock *sk, u8 utid, u8 msg_id)
 
 static int pipe_handler_enable_pipe(struct sock *sk, int enable)
 {
-       struct pep_sock *pn = pep_sk(sk);
        int utid, req;
 
        if (enable) {
index 2ddc351b3be98f5d36515feba412cf04f7cd2564..8a2e89bffde5ca6677b3562a65b61380a3f7e0a7 100644 (file)
 #include "cluster.h"
 #include "net.h"
 
-u32 tipc_get_addr(void)
-{
-       return tipc_own_addr;
-}
-
 /**
  * tipc_addr_domain_valid - validates a network domain address
  *
index ecfaac10d0b484a59ba1fff5b6c9412915f6dea3..22a60fc98392de04e9b0d1ee4506309aa117cbba 100644 (file)
@@ -121,6 +121,9 @@ static DEFINE_SPINLOCK(bc_lock);
 
 const char tipc_bclink_name[] = "broadcast-link";
 
+static void tipc_nmap_diff(struct tipc_node_map *nm_a,
+                          struct tipc_node_map *nm_b,
+                          struct tipc_node_map *nm_diff);
 
 static u32 buf_seqno(struct sk_buff *buf)
 {
@@ -287,7 +290,7 @@ static void bclink_send_nack(struct tipc_node *n_ptr)
        if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to))
                return;
 
-       buf = buf_acquire(INT_H_SIZE);
+       buf = tipc_buf_acquire(INT_H_SIZE);
        if (buf) {
                msg = buf_msg(buf);
                tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
@@ -871,8 +874,9 @@ void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
  * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
  */
 
-void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b,
-                                 struct tipc_node_map *nm_diff)
+static void tipc_nmap_diff(struct tipc_node_map *nm_a,
+                          struct tipc_node_map *nm_b,
+                          struct tipc_node_map *nm_diff)
 {
        int stop = ARRAY_SIZE(nm_a->map);
        int w;
index e8c2b81658c7513c1a49e8bf0c3d57a99348e0ea..011c03f0a4abebb16c104fbeec5386ea8e94022d 100644 (file)
@@ -84,9 +84,6 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_m
        return !memcmp(nm_a, nm_b, sizeof(*nm_a));
 }
 
-void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b,
-                                 struct tipc_node_map *nm_diff);
-
 void tipc_port_list_add(struct port_list *pl_ptr, u32 port);
 void tipc_port_list_free(struct port_list *pl_ptr);
 
index 9c10c6b7c12baaded7bf6849939e1a42e16cf14f..fd9c06c6828173f42edcdaf3649fb42772c4faef 100644 (file)
@@ -288,9 +288,6 @@ static struct bearer *bearer_find(const char *name)
        struct bearer *b_ptr;
        u32 i;
 
-       if (tipc_mode != TIPC_NET_MODE)
-               return NULL;
-
        for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
                if (b_ptr->active && (!strcmp(b_ptr->publ.name, name)))
                        return b_ptr;
@@ -630,30 +627,17 @@ int tipc_block_bearer(const char *name)
  * Note: This routine assumes caller holds tipc_net_lock.
  */
 
-static int bearer_disable(const char *name)
+static int bearer_disable(struct bearer *b_ptr)
 {
-       struct bearer *b_ptr;
        struct link *l_ptr;
        struct link *temp_l_ptr;
 
-       b_ptr = bearer_find(name);
-       if (!b_ptr) {
-               warn("Attempt to disable unknown bearer <%s>\n", name);
-               return -EINVAL;
-       }
-
-       info("Disabling bearer <%s>\n", name);
+       info("Disabling bearer <%s>\n", b_ptr->publ.name);
        tipc_disc_stop_link_req(b_ptr->link_req);
        spin_lock_bh(&b_ptr->publ.lock);
        b_ptr->link_req = NULL;
        b_ptr->publ.blocked = 1;
-       if (b_ptr->media->disable_bearer) {
-               spin_unlock_bh(&b_ptr->publ.lock);
-               write_unlock_bh(&tipc_net_lock);
-               b_ptr->media->disable_bearer(&b_ptr->publ);
-               write_lock_bh(&tipc_net_lock);
-               spin_lock_bh(&b_ptr->publ.lock);
-       }
+       b_ptr->media->disable_bearer(&b_ptr->publ);
        list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
                tipc_link_delete(l_ptr);
        }
@@ -664,10 +648,16 @@ static int bearer_disable(const char *name)
 
 int tipc_disable_bearer(const char *name)
 {
+       struct bearer *b_ptr;
        int res;
 
        write_lock_bh(&tipc_net_lock);
-       res = bearer_disable(name);
+       b_ptr = bearer_find(name);
+       if (b_ptr == NULL) {
+               warn("Attempt to disable unknown bearer <%s>\n", name);
+               res = -EINVAL;
+       } else
+               res = bearer_disable(b_ptr);
        write_unlock_bh(&tipc_net_lock);
        return res;
 }
@@ -680,13 +670,7 @@ void tipc_bearer_stop(void)
 
        for (i = 0; i < MAX_BEARERS; i++) {
                if (tipc_bearers[i].active)
-                       tipc_bearers[i].publ.blocked = 1;
-       }
-       for (i = 0; i < MAX_BEARERS; i++) {
-               if (tipc_bearers[i].active)
-                       bearer_disable(tipc_bearers[i].publ.name);
+                       bearer_disable(&tipc_bearers[i]);
        }
        media_count = 0;
 }
-
-
index e68f705381bc345a75283dc69074c737fba26f7a..7fea14b98b9716ba927e0476979cbf9747f9850b 100644 (file)
@@ -113,25 +113,6 @@ void tipc_cltr_delete(struct cluster *c_ptr)
        kfree(c_ptr);
 }
 
-u32 tipc_cltr_next_node(struct cluster *c_ptr, u32 addr)
-{
-       struct tipc_node *n_ptr;
-       u32 n_num = tipc_node(addr) + 1;
-
-       if (!c_ptr)
-               return addr;
-       for (; n_num <= c_ptr->highest_node; n_num++) {
-               n_ptr = c_ptr->nodes[n_num];
-               if (n_ptr && tipc_node_has_active_links(n_ptr))
-                       return n_ptr->addr;
-       }
-       for (n_num = 1; n_num < tipc_node(addr); n_num++) {
-               n_ptr = c_ptr->nodes[n_num];
-               if (n_ptr && tipc_node_has_active_links(n_ptr))
-                       return n_ptr->addr;
-       }
-       return 0;
-}
 
 void tipc_cltr_attach_node(struct cluster *c_ptr, struct tipc_node *n_ptr)
 {
@@ -232,7 +213,7 @@ struct tipc_node *tipc_cltr_select_node(struct cluster *c_ptr, u32 selector)
 static struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest)
 {
        u32 size = INT_H_SIZE + data_size;
-       struct sk_buff *buf = buf_acquire(size);
+       struct sk_buff *buf = tipc_buf_acquire(size);
        struct tipc_msg *msg;
 
        if (buf) {
index 333efb0b9c44c28c1c479c293e58213de473b821..32636d98c9c6878e2266bfca65dc3ab65b4fce3c 100644 (file)
@@ -75,7 +75,7 @@ void tipc_cltr_attach_node(struct cluster *c_ptr, struct tipc_node *n_ptr);
 void tipc_cltr_send_slave_routes(struct cluster *c_ptr, u32 dest);
 void tipc_cltr_broadcast(struct sk_buff *buf);
 int tipc_cltr_init(void);
-u32 tipc_cltr_next_node(struct cluster *c_ptr, u32 addr);
+
 void tipc_cltr_bcast_new_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
 void tipc_cltr_send_local_routes(struct cluster *c_ptr, u32 dest);
 void tipc_cltr_bcast_lost_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
index c429b0d488a33644529d291eb794382b77539e18..50a6133a3668f082daf5bb5f6e63a1b67191ab52 100644 (file)
@@ -95,7 +95,7 @@ int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type,
        return 1;
 }
 
-struct sk_buff *tipc_cfg_reply_unsigned_type(u16 tlv_type, u32 value)
+static struct sk_buff *tipc_cfg_reply_unsigned_type(u16 tlv_type, u32 value)
 {
        struct sk_buff *buf;
        __be32 value_net;
@@ -109,6 +109,11 @@ struct sk_buff *tipc_cfg_reply_unsigned_type(u16 tlv_type, u32 value)
        return buf;
 }
 
+static struct sk_buff *tipc_cfg_reply_unsigned(u32 value)
+{
+       return tipc_cfg_reply_unsigned_type(TIPC_TLV_UNSIGNED, value);
+}
+
 struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string)
 {
        struct sk_buff *buf;
index 5cd7cc56c54d13ee43a1a5155ff3d0a99dfa5a88..481e12ece715dcc9c02b44928e1cf2b3b2635eb1 100644 (file)
@@ -45,7 +45,6 @@
 struct sk_buff *tipc_cfg_reply_alloc(int payload_size);
 int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type,
                        void *tlv_data, int tlv_data_size);
-struct sk_buff *tipc_cfg_reply_unsigned_type(u16 tlv_type, u32 value);
 struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string);
 
 static inline struct sk_buff *tipc_cfg_reply_none(void)
@@ -53,11 +52,6 @@ static inline struct sk_buff *tipc_cfg_reply_none(void)
        return tipc_cfg_reply_alloc(0);
 }
 
-static inline struct sk_buff *tipc_cfg_reply_unsigned(u32 value)
-{
-       return tipc_cfg_reply_unsigned_type(TIPC_TLV_UNSIGNED, value);
-}
-
 static inline struct sk_buff *tipc_cfg_reply_error_string(char *string)
 {
        return tipc_cfg_reply_string_type(TIPC_TLV_ERROR_STRING, string);
index 466b861dab91acb0fb6fbcab76e24585e755fd48..e2a09eb8efd459957c79475a24e9b5849537f90f 100644 (file)
@@ -96,13 +96,8 @@ int tipc_net_id;
 int tipc_remote_management;
 
 
-int tipc_get_mode(void)
-{
-       return tipc_mode;
-}
-
 /**
- * buf_acquire - creates a TIPC message buffer
+ * tipc_buf_acquire - creates a TIPC message buffer
  * @size: message size (including TIPC header)
  *
  * Returns a new buffer with data pointers set to the specified size.
@@ -111,7 +106,7 @@ int tipc_get_mode(void)
  *       There may also be unrequested tailroom present at the buffer's end.
  */
 
-struct sk_buff *buf_acquire(u32 size)
+struct sk_buff *tipc_buf_acquire(u32 size)
 {
        struct sk_buff *skb;
        unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
@@ -129,7 +124,7 @@ struct sk_buff *buf_acquire(u32 size)
  * tipc_core_stop_net - shut down TIPC networking sub-systems
  */
 
-void tipc_core_stop_net(void)
+static void tipc_core_stop_net(void)
 {
        tipc_eth_media_stop();
        tipc_net_stop();
@@ -154,7 +149,7 @@ int tipc_core_start_net(unsigned long addr)
  * tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode
  */
 
-void tipc_core_stop(void)
+static void tipc_core_stop(void)
 {
        if (tipc_mode != TIPC_NODE_MODE)
                return;
@@ -176,7 +171,7 @@ void tipc_core_stop(void)
  * tipc_core_start - switch TIPC from NOT RUNNING to SINGLE NODE mode
  */
 
-int tipc_core_start(void)
+static int tipc_core_start(void)
 {
        int res;
 
@@ -246,8 +241,6 @@ MODULE_VERSION(TIPC_MOD_VER);
 
 EXPORT_SYMBOL(tipc_attach);
 EXPORT_SYMBOL(tipc_detach);
-EXPORT_SYMBOL(tipc_get_addr);
-EXPORT_SYMBOL(tipc_get_mode);
 EXPORT_SYMBOL(tipc_createport);
 EXPORT_SYMBOL(tipc_deleteport);
 EXPORT_SYMBOL(tipc_ownidentity);
@@ -262,23 +255,10 @@ EXPORT_SYMBOL(tipc_withdraw);
 EXPORT_SYMBOL(tipc_connect2port);
 EXPORT_SYMBOL(tipc_disconnect);
 EXPORT_SYMBOL(tipc_shutdown);
-EXPORT_SYMBOL(tipc_isconnected);
-EXPORT_SYMBOL(tipc_peer);
-EXPORT_SYMBOL(tipc_ref_valid);
 EXPORT_SYMBOL(tipc_send);
-EXPORT_SYMBOL(tipc_send_buf);
 EXPORT_SYMBOL(tipc_send2name);
-EXPORT_SYMBOL(tipc_forward2name);
-EXPORT_SYMBOL(tipc_send_buf2name);
-EXPORT_SYMBOL(tipc_forward_buf2name);
 EXPORT_SYMBOL(tipc_send2port);
-EXPORT_SYMBOL(tipc_forward2port);
-EXPORT_SYMBOL(tipc_send_buf2port);
-EXPORT_SYMBOL(tipc_forward_buf2port);
 EXPORT_SYMBOL(tipc_multicast);
-/* EXPORT_SYMBOL(tipc_multicast_buf); not available yet */
-EXPORT_SYMBOL(tipc_ispublished);
-EXPORT_SYMBOL(tipc_available_nodes);
 
 /* TIPC API for external bearers (see tipc_bearer.h) */
 
@@ -295,6 +275,4 @@ EXPORT_SYMBOL(tipc_createport_raw);
 EXPORT_SYMBOL(tipc_reject_msg);
 EXPORT_SYMBOL(tipc_send_buf_fast);
 EXPORT_SYMBOL(tipc_acknowledge);
-EXPORT_SYMBOL(tipc_get_port);
-EXPORT_SYMBOL(tipc_get_handle);
 
index 188799017abdb35aa5416862539395dbb412974b..e19389e57227572a5da156393ab90bc7b3573a1c 100644 (file)
@@ -83,9 +83,7 @@
  * Note: TIPC_LOG is configured to echo its output to the system console;
  *       user-defined buffers can be configured to do the same thing.
  */
-
 extern struct print_buf *const TIPC_NULL;
-extern struct print_buf *const TIPC_CONS;
 extern struct print_buf *const TIPC_LOG;
 
 void tipc_printf(struct print_buf *, const char *fmt, ...);
@@ -204,10 +202,7 @@ extern atomic_t tipc_user_count;
  * Routines available to privileged subsystems
  */
 
-extern int  tipc_core_start(void);
-extern void tipc_core_stop(void);
-extern int  tipc_core_start_net(unsigned long addr);
-extern void tipc_core_stop_net(void);
+extern int tipc_core_start_net(unsigned long);
 extern int  tipc_handler_start(void);
 extern void tipc_handler_stop(void);
 extern int  tipc_netlink_start(void);
@@ -328,7 +323,7 @@ static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
        return (struct tipc_msg *)skb->data;
 }
 
-extern struct sk_buff *buf_acquire(u32 size);
+extern struct sk_buff *tipc_buf_acquire(u32 size);
 
 /**
  * buf_discard - frees a TIPC message buffer
index 6569d45bfb9a197d0fbdd77701c509651bba862a..46f51d208e5e55193f410ae1c9c512f6144ff791 100644 (file)
@@ -52,7 +52,7 @@ static struct print_buf null_buf = { NULL, 0, NULL, 0 };
 struct print_buf *const TIPC_NULL = &null_buf;
 
 static struct print_buf cons_buf = { NULL, 0, NULL, 1 };
-struct print_buf *const TIPC_CONS = &cons_buf;
+static struct print_buf *const TIPC_CONS = &cons_buf;
 
 static struct print_buf log_buf = { NULL, 0, NULL, 1 };
 struct print_buf *const TIPC_LOG = &log_buf;
@@ -76,6 +76,10 @@ struct print_buf *const TIPC_LOG = &log_buf;
 static char print_string[TIPC_PB_MAX_STR];
 static DEFINE_SPINLOCK(print_lock);
 
+static void tipc_printbuf_reset(struct print_buf *pb);
+static int  tipc_printbuf_empty(struct print_buf *pb);
+static void tipc_printbuf_move(struct print_buf *pb_to,
+                              struct print_buf *pb_from);
 
 #define FORMAT(PTR,LEN,FMT) \
 {\
@@ -116,7 +120,7 @@ void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size)
  * @pb: pointer to print buffer structure
  */
 
-void tipc_printbuf_reset(struct print_buf *pb)
+static void tipc_printbuf_reset(struct print_buf *pb)
 {
        if (pb->buf) {
                pb->crs = pb->buf;
@@ -132,7 +136,7 @@ void tipc_printbuf_reset(struct print_buf *pb)
  * Returns non-zero if print buffer is empty.
  */
 
-int tipc_printbuf_empty(struct print_buf *pb)
+static int tipc_printbuf_empty(struct print_buf *pb)
 {
        return !pb->buf || (pb->crs == pb->buf);
 }
@@ -181,7 +185,8 @@ int tipc_printbuf_validate(struct print_buf *pb)
  * Source print buffer becomes empty if a successful move occurs.
  */
 
-void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
+static void tipc_printbuf_move(struct print_buf *pb_to,
+                              struct print_buf *pb_from)
 {
        int len;
 
index 5ef1bc8f64ef186b5db3dfedbe839fe25ef4c52e..3ba6ba8b434ae38a8815982729057036fa0459dd 100644 (file)
@@ -56,10 +56,7 @@ struct print_buf {
 #define TIPC_PB_MAX_STR 512    /* max printable string (with trailing NUL) */
 
 void tipc_printbuf_init(struct print_buf *pb, char *buf, u32 size);
-void tipc_printbuf_reset(struct print_buf *pb);
-int  tipc_printbuf_empty(struct print_buf *pb);
 int  tipc_printbuf_validate(struct print_buf *pb);
-void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from);
 
 int tipc_log_resize(int log_size);
 
index dbd79c67d7c00485faee5ede751f1ea062c5e81f..4a7cd3719b781f42739d8c5c524e4a324e09d79b 100644 (file)
@@ -68,20 +68,6 @@ struct link_req {
        unsigned int timer_intv;
 };
 
-
-/*
- * disc_lost_link(): A link has lost contact
- */
-
-void tipc_disc_link_event(u32 addr, char *name, int up)
-{
-       if (in_own_cluster(addr))
-               return;
-       /*
-        * Code for inter cluster link setup here
-        */
-}
-
 /**
  * tipc_disc_init_msg - initialize a link setup message
  * @type: message type (request or response)
@@ -95,7 +81,7 @@ static struct sk_buff *tipc_disc_init_msg(u32 type,
                                          u32 dest_domain,
                                          struct bearer *b_ptr)
 {
-       struct sk_buff *buf = buf_acquire(DSC_H_SIZE);
+       struct sk_buff *buf = tipc_buf_acquire(DSC_H_SIZE);
        struct tipc_msg *msg;
 
        if (buf) {
index 9d064c3639bf6ebd31c5b8b6a4039bf8256da7cf..f8e75063612340fa8ea7e3f653dd5e1978ee47f8 100644 (file)
@@ -50,6 +50,4 @@ void tipc_disc_stop_link_req(struct link_req *req);
 
 void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr);
 
-void tipc_disc_link_event(u32 addr, char *name, int up);
-
 #endif
index 4be78ecf4a6711d2f8c4f9e01fd30dc90cd01ec1..b31992ccd5d3a2dc60c3d0e4ca9de186ff9e6682 100644 (file)
@@ -112,6 +112,9 @@ static void link_state_event(struct link *l_ptr, u32 event);
 static void link_reset_statistics(struct link *l_ptr);
 static void link_print(struct link *l_ptr, struct print_buf *buf,
                       const char *str);
+static void link_start(struct link *l_ptr);
+static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf);
+
 
 /*
  * Debugging code used by link routines only
@@ -442,7 +445,7 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
 
        k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
        list_add_tail(&l_ptr->link_list, &b_ptr->links);
-       tipc_k_signal((Handler)tipc_link_start, (unsigned long)l_ptr);
+       tipc_k_signal((Handler)link_start, (unsigned long)l_ptr);
 
        dbg("tipc_link_create(): tolerance = %u,cont intv = %u, abort_limit = %u\n",
            l_ptr->tolerance, l_ptr->continuity_interval, l_ptr->abort_limit);
@@ -482,9 +485,9 @@ void tipc_link_delete(struct link *l_ptr)
        kfree(l_ptr);
 }
 
-void tipc_link_start(struct link *l_ptr)
+static void link_start(struct link *l_ptr)
 {
-       dbg("tipc_link_start %x\n", l_ptr);
+       dbg("link_start %x\n", l_ptr);
        link_state_event(l_ptr, STARTING_EVT);
 }
 
@@ -1000,7 +1003,7 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
        /* Fragmentation needed ? */
 
        if (size > max_packet)
-               return tipc_link_send_long_buf(l_ptr, buf);
+               return link_send_long_buf(l_ptr, buf);
 
        /* Packet can be queued or sent: */
 
@@ -1036,7 +1039,7 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
                /* Try creating a new bundle */
 
                if (size <= max_packet * 2 / 3) {
-                       struct sk_buff *bundler = buf_acquire(max_packet);
+                       struct sk_buff *bundler = tipc_buf_acquire(max_packet);
                        struct tipc_msg bundler_hdr;
 
                        if (bundler) {
@@ -1312,7 +1315,7 @@ again:
 
        /* Prepare header of first fragment: */
 
-       buf_chain = buf = buf_acquire(max_pkt);
+       buf_chain = buf = tipc_buf_acquire(max_pkt);
        if (!buf)
                return -ENOMEM;
        buf->next = NULL;
@@ -1369,7 +1372,7 @@ error:
                        msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
                        msg_set_fragm_no(&fragm_hdr, ++fragm_no);
                        prev = buf;
-                       buf = buf_acquire(fragm_sz + INT_H_SIZE);
+                       buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
                        if (!buf)
                                goto error;
 
@@ -2145,7 +2148,7 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
        if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
                if (!l_ptr->proto_msg_queue) {
                        l_ptr->proto_msg_queue =
-                               buf_acquire(sizeof(l_ptr->proto_msg));
+                               tipc_buf_acquire(sizeof(l_ptr->proto_msg));
                }
                buf = l_ptr->proto_msg_queue;
                if (!buf)
@@ -2159,7 +2162,7 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
 
        msg_dbg(msg, ">>");
 
-       buf = buf_acquire(msg_size);
+       buf = tipc_buf_acquire(msg_size);
        if (!buf)
                return;
 
@@ -2318,10 +2321,10 @@ exit:
  * tipc_link_tunnel(): Send one message via a link belonging to
  * another bearer. Owner node is locked.
  */
-void tipc_link_tunnel(struct link *l_ptr,
-                     struct tipc_msg *tunnel_hdr,
-                     struct tipc_msg  *msg,
-                     u32 selector)
+static void tipc_link_tunnel(struct link *l_ptr,
+                            struct tipc_msg *tunnel_hdr,
+                            struct tipc_msg  *msg,
+                            u32 selector)
 {
        struct link *tunnel;
        struct sk_buff *buf;
@@ -2334,7 +2337,7 @@ void tipc_link_tunnel(struct link *l_ptr,
                return;
        }
        msg_set_size(tunnel_hdr, length + INT_H_SIZE);
-       buf = buf_acquire(length + INT_H_SIZE);
+       buf = tipc_buf_acquire(length + INT_H_SIZE);
        if (!buf) {
                warn("Link changeover error, "
                     "unable to send tunnel msg\n");
@@ -2380,7 +2383,7 @@ void tipc_link_changeover(struct link *l_ptr)
        if (!l_ptr->first_out) {
                struct sk_buff *buf;
 
-               buf = buf_acquire(INT_H_SIZE);
+               buf = tipc_buf_acquire(INT_H_SIZE);
                if (buf) {
                        skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
                        msg_set_size(&tunnel_hdr, INT_H_SIZE);
@@ -2441,7 +2444,7 @@ void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
                msg_set_ack(msg, mod(l_ptr->next_in_no - 1));   /* Update */
                msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
                msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
-               outbuf = buf_acquire(length + INT_H_SIZE);
+               outbuf = tipc_buf_acquire(length + INT_H_SIZE);
                if (outbuf == NULL) {
                        warn("Link changeover error, "
                             "unable to send duplicate msg\n");
@@ -2477,7 +2480,7 @@ static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
        u32 size = msg_size(msg);
        struct sk_buff *eb;
 
-       eb = buf_acquire(size);
+       eb = tipc_buf_acquire(size);
        if (eb)
                skb_copy_to_linear_data(eb, msg, size);
        return eb;
@@ -2605,11 +2608,11 @@ void tipc_link_recv_bundle(struct sk_buff *buf)
 
 
 /*
- * tipc_link_send_long_buf: Entry for buffers needing fragmentation.
+ * link_send_long_buf: Entry for buffers needing fragmentation.
  * The buffer is complete, inclusive total message length.
  * Returns user data length.
  */
-int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
+static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
 {
        struct tipc_msg *inmsg = buf_msg(buf);
        struct tipc_msg fragm_hdr;
@@ -2648,7 +2651,7 @@ int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
                        fragm_sz = rest;
                        msg_set_type(&fragm_hdr, LAST_FRAGMENT);
                }
-               fragm = buf_acquire(fragm_sz + INT_H_SIZE);
+               fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
                if (fragm == NULL) {
                        warn("Link unable to fragment message\n");
                        dsz = -ENOMEM;
@@ -2753,7 +2756,7 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
                        buf_discard(fbuf);
                        return 0;
                }
-               pbuf = buf_acquire(msg_size(imsg));
+               pbuf = tipc_buf_acquire(msg_size(imsg));
                if (pbuf != NULL) {
                        pbuf->next = *pending;
                        *pending = pbuf;
index 4e944ef4a5402d0e78a0b9596f6d056bbef1891f..f98bc613de67b247b0369df611044bf75604e5cd 100644 (file)
@@ -225,7 +225,6 @@ void tipc_link_send_duplicate(struct link *l_ptr, struct link *dest);
 void tipc_link_reset_fragments(struct link *l_ptr);
 int tipc_link_is_up(struct link *l_ptr);
 int tipc_link_is_active(struct link *l_ptr);
-void tipc_link_start(struct link *l_ptr);
 u32 tipc_link_push_packet(struct link *l_ptr);
 void tipc_link_stop(struct link *l_ptr);
 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space, u16 cmd);
@@ -239,9 +238,6 @@ int tipc_link_send_sections_fast(struct port* sender,
                                 struct iovec const *msg_sect,
                                 const u32 num_sect,
                                 u32 destnode);
-int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf);
-void tipc_link_tunnel(struct link *l_ptr, struct tipc_msg *tnl_hdr,
-                     struct tipc_msg *msg, u32 selector);
 void tipc_link_recv_bundle(struct sk_buff *buf);
 int  tipc_link_recv_fragment(struct sk_buff **pending,
                             struct sk_buff **fb,
index 381063817b41b393069c54945e092540610f047e..ecb532fb03512f69720f1c47c6ce9a03b0e7264f 100644 (file)
@@ -112,7 +112,7 @@ int tipc_msg_build(struct tipc_msg *hdr,
                return dsz;
        }
 
-       *buf = buf_acquire(sz);
+       *buf = tipc_buf_acquire(sz);
        if (!(*buf))
                return -ENOMEM;
        skb_copy_to_linear_data(*buf, hdr, hsz);
index 6ac3c543250bfebc1f7f82d4ea7b516393cbf974..7b907171f879ecf7ab2c1a33c0ce6cf2e1f5fca4 100644 (file)
@@ -98,7 +98,7 @@ static void publ_to_item(struct distr_item *i, struct publication *p)
 
 static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
 {
-       struct sk_buff *buf = buf_acquire(LONG_H_SIZE + size);
+       struct sk_buff *buf = tipc_buf_acquire(LONG_H_SIZE + size);
        struct tipc_msg *msg;
 
        if (buf != NULL) {
index 823e9abb7ef5af39d6194b682f3d3ffc4bfb6f3c..b4d87eb2dc5d871352d6d1f54ea73df830d83779 100644 (file)
@@ -50,7 +50,8 @@ void node_print(struct print_buf *buf, struct tipc_node *n_ptr, char *str);
 static void node_lost_contact(struct tipc_node *n_ptr);
 static void node_established_contact(struct tipc_node *n_ptr);
 
-struct tipc_node *tipc_nodes = NULL;   /* sorted list of nodes within cluster */
+/* sorted list of nodes within cluster */
+static struct tipc_node *tipc_nodes = NULL;
 
 static DEFINE_SPINLOCK(node_create_lock);
 
@@ -587,22 +588,6 @@ void tipc_node_remove_router(struct tipc_node *n_ptr, u32 router)
                node_lost_contact(n_ptr);
 }
 
-u32 tipc_available_nodes(const u32 domain)
-{
-       struct tipc_node *n_ptr;
-       u32 cnt = 0;
-
-       read_lock_bh(&tipc_net_lock);
-       for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
-               if (!tipc_in_scope(domain, n_ptr->addr))
-                       continue;
-               if (tipc_node_is_up(n_ptr))
-                       cnt++;
-       }
-       read_unlock_bh(&tipc_net_lock);
-       return cnt;
-}
-
 struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
 {
        u32 domain;
index 45f3db3a595d0d9b7fd7f5cb14a84a5df73613e3..fff331b2d26c46103b64b5a986444af3c579de08 100644 (file)
@@ -96,7 +96,6 @@ struct tipc_node {
        } bclink;
 };
 
-extern struct tipc_node *tipc_nodes;
 extern u32 tipc_own_tag;
 
 struct tipc_node *tipc_node_create(u32 addr);
index 5c4285b2d5552c0d04a757ba548139d2ee3f57c7..82092eaa15366f4262689bd58e37b9a2d91c60b8 100644 (file)
@@ -293,34 +293,6 @@ int tipc_deleteport(u32 ref)
        return 0;
 }
 
-/**
- * tipc_get_port() - return port associated with 'ref'
- *
- * Note: Port is not locked.
- */
-
-struct tipc_port *tipc_get_port(const u32 ref)
-{
-       return (struct tipc_port *)tipc_ref_deref(ref);
-}
-
-/**
- * tipc_get_handle - return user handle associated to port 'ref'
- */
-
-void *tipc_get_handle(const u32 ref)
-{
-       struct port *p_ptr;
-       void * handle;
-
-       p_ptr = tipc_port_lock(ref);
-       if (!p_ptr)
-               return NULL;
-       handle = p_ptr->publ.usr_handle;
-       tipc_port_unlock(p_ptr);
-       return handle;
-}
-
 static int port_unreliable(struct port *p_ptr)
 {
        return msg_src_droppable(&p_ptr->publ.phdr);
@@ -392,7 +364,7 @@ static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
        struct sk_buff *buf;
        struct tipc_msg *msg;
 
-       buf = buf_acquire(LONG_H_SIZE);
+       buf = tipc_buf_acquire(LONG_H_SIZE);
        if (buf) {
                msg = buf_msg(buf);
                tipc_msg_init(msg, usr, type, LONG_H_SIZE, destnode);
@@ -433,7 +405,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
                hdr_sz = MCAST_H_SIZE;
        else
                hdr_sz = LONG_H_SIZE;
-       rbuf = buf_acquire(data_sz + hdr_sz);
+       rbuf = tipc_buf_acquire(data_sz + hdr_sz);
        if (rbuf == NULL) {
                buf_discard(buf);
                return data_sz;
@@ -1242,50 +1214,13 @@ int tipc_shutdown(u32 ref)
        return tipc_disconnect(ref);
 }
 
-int tipc_isconnected(u32 ref, int *isconnected)
-{
-       struct port *p_ptr;
-
-       p_ptr = tipc_port_lock(ref);
-       if (!p_ptr)
-               return -EINVAL;
-       *isconnected = p_ptr->publ.connected;
-       tipc_port_unlock(p_ptr);
-       return 0;
-}
-
-int tipc_peer(u32 ref, struct tipc_portid *peer)
-{
-       struct port *p_ptr;
-       int res;
-
-       p_ptr = tipc_port_lock(ref);
-       if (!p_ptr)
-               return -EINVAL;
-       if (p_ptr->publ.connected) {
-               peer->ref = port_peerport(p_ptr);
-               peer->node = port_peernode(p_ptr);
-               res = 0;
-       } else
-               res = -ENOTCONN;
-       tipc_port_unlock(p_ptr);
-       return res;
-}
-
-int tipc_ref_valid(u32 ref)
-{
-       /* Works irrespective of type */
-       return !!tipc_ref_deref(ref);
-}
-
-
 /*
  *  tipc_port_recv_sections(): Concatenate and deliver sectioned
  *                        message for this node.
  */
 
-int tipc_port_recv_sections(struct port *sender, unsigned int num_sect,
-                      struct iovec const *msg_sect)
+static int tipc_port_recv_sections(struct port *sender, unsigned int num_sect,
+                                  struct iovec const *msg_sect)
 {
        struct sk_buff *buf;
        int res;
@@ -1335,66 +1270,17 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
        return -ELINKCONG;
 }
 
-/**
- * tipc_send_buf - send message buffer on connection
- */
-
-int tipc_send_buf(u32 ref, struct sk_buff *buf, unsigned int dsz)
-{
-       struct port *p_ptr;
-       struct tipc_msg *msg;
-       u32 destnode;
-       u32 hsz;
-       u32 sz;
-       u32 res;
-
-       p_ptr = tipc_port_deref(ref);
-       if (!p_ptr || !p_ptr->publ.connected)
-               return -EINVAL;
-
-       msg = &p_ptr->publ.phdr;
-       hsz = msg_hdr_sz(msg);
-       sz = hsz + dsz;
-       msg_set_size(msg, sz);
-       if (skb_cow(buf, hsz))
-               return -ENOMEM;
-
-       skb_push(buf, hsz);
-       skb_copy_to_linear_data(buf, msg, hsz);
-       destnode = msg_destnode(msg);
-       p_ptr->publ.congested = 1;
-       if (!tipc_port_congested(p_ptr)) {
-               if (likely(destnode != tipc_own_addr))
-                       res = tipc_send_buf_fast(buf, destnode);
-               else {
-                       tipc_port_recv_msg(buf);
-                       res = sz;
-               }
-               if (likely(res != -ELINKCONG)) {
-                       port_incr_out_seqno(p_ptr);
-                       p_ptr->sent++;
-                       p_ptr->publ.congested = 0;
-                       return res;
-               }
-       }
-       if (port_unreliable(p_ptr)) {
-               p_ptr->publ.congested = 0;
-               return dsz;
-       }
-       return -ELINKCONG;
-}
-
 /**
  * tipc_forward2name - forward message sections to port name
  */
 
-int tipc_forward2name(u32 ref,
-                     struct tipc_name const *name,
-                     u32 domain,
-                     u32 num_sect,
-                     struct iovec const *msg_sect,
-                     struct tipc_portid const *orig,
-                     unsigned int importance)
+static int tipc_forward2name(u32 ref,
+                            struct tipc_name const *name,
+                            u32 domain,
+                            u32 num_sect,
+                            struct iovec const *msg_sect,
+                            struct tipc_portid const *orig,
+                            unsigned int importance)
 {
        struct port *p_ptr;
        struct tipc_msg *msg;
@@ -1456,90 +1342,16 @@ int tipc_send2name(u32 ref,
                                 TIPC_PORT_IMPORTANCE);
 }
 
-/**
- * tipc_forward_buf2name - forward message buffer to port name
- */
-
-int tipc_forward_buf2name(u32 ref,
-                         struct tipc_name const *name,
-                         u32 domain,
-                         struct sk_buff *buf,
-                         unsigned int dsz,
-                         struct tipc_portid const *orig,
-                         unsigned int importance)
-{
-       struct port *p_ptr;
-       struct tipc_msg *msg;
-       u32 destnode = domain;
-       u32 destport;
-       int res;
-
-       p_ptr = (struct port *)tipc_ref_deref(ref);
-       if (!p_ptr || p_ptr->publ.connected)
-               return -EINVAL;
-
-       msg = &p_ptr->publ.phdr;
-       if (importance <= TIPC_CRITICAL_IMPORTANCE)
-               msg_set_importance(msg, importance);
-       msg_set_type(msg, TIPC_NAMED_MSG);
-       msg_set_orignode(msg, orig->node);
-       msg_set_origport(msg, orig->ref);
-       msg_set_nametype(msg, name->type);
-       msg_set_nameinst(msg, name->instance);
-       msg_set_lookup_scope(msg, tipc_addr_scope(domain));
-       msg_set_hdr_sz(msg, LONG_H_SIZE);
-       msg_set_size(msg, LONG_H_SIZE + dsz);
-       destport = tipc_nametbl_translate(name->type, name->instance, &destnode);
-       msg_set_destnode(msg, destnode);
-       msg_set_destport(msg, destport);
-       msg_dbg(msg, "forw2name ==> ");
-       if (skb_cow(buf, LONG_H_SIZE))
-               return -ENOMEM;
-       skb_push(buf, LONG_H_SIZE);
-       skb_copy_to_linear_data(buf, msg, LONG_H_SIZE);
-       msg_dbg(buf_msg(buf),"PREP:");
-       if (likely(destport)) {
-               p_ptr->sent++;
-               if (destnode == tipc_own_addr)
-                       return tipc_port_recv_msg(buf);
-               res = tipc_send_buf_fast(buf, destnode);
-               if (likely(res != -ELINKCONG))
-                       return res;
-               if (port_unreliable(p_ptr))
-                       return dsz;
-               return -ELINKCONG;
-       }
-       return tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
-}
-
-/**
- * tipc_send_buf2name - send message buffer to port name
- */
-
-int tipc_send_buf2name(u32 ref,
-                      struct tipc_name const *dest,
-                      u32 domain,
-                      struct sk_buff *buf,
-                      unsigned int dsz)
-{
-       struct tipc_portid orig;
-
-       orig.ref = ref;
-       orig.node = tipc_own_addr;
-       return tipc_forward_buf2name(ref, dest, domain, buf, dsz, &orig,
-                                    TIPC_PORT_IMPORTANCE);
-}
-
 /**
  * tipc_forward2port - forward message sections to port identity
  */
 
-int tipc_forward2port(u32 ref,
-                     struct tipc_portid const *dest,
-                     unsigned int num_sect,
-                     struct iovec const *msg_sect,
-                     struct tipc_portid const *orig,
-                     unsigned int importance)
+static int tipc_forward2port(u32 ref,
+                            struct tipc_portid const *dest,
+                            unsigned int num_sect,
+                            struct iovec const *msg_sect,
+                            struct tipc_portid const *orig,
+                            unsigned int importance)
 {
        struct port *p_ptr;
        struct tipc_msg *msg;
@@ -1591,12 +1403,12 @@ int tipc_send2port(u32 ref,
 /**
  * tipc_forward_buf2port - forward message buffer to port identity
  */
-int tipc_forward_buf2port(u32 ref,
-                         struct tipc_portid const *dest,
-                         struct sk_buff *buf,
-                         unsigned int dsz,
-                         struct tipc_portid const *orig,
-                         unsigned int importance)
+static int tipc_forward_buf2port(u32 ref,
+                                struct tipc_portid const *dest,
+                                struct sk_buff *buf,
+                                unsigned int dsz,
+                                struct tipc_portid const *orig,
+                                unsigned int importance)
 {
        struct port *p_ptr;
        struct tipc_msg *msg;
index e74bd9563739b79ff68c73668eb67952e3e2c04e..73bbf442b346750791965913bed1b146a07e68e0 100644 (file)
@@ -109,8 +109,6 @@ struct port {
 extern spinlock_t tipc_port_list_lock;
 struct port_list;
 
-int tipc_port_recv_sections(struct port *p_ptr, u32 num_sect,
-                           struct iovec const *msg_sect);
 int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
                              struct iovec const *msg_sect, u32 num_sect,
                              int err);
index 8dea66500cf5ce65d6427aad77152f437e0c92ac..ab8ad32d8c202260d283c555b3b435b71dc0ab04 100644 (file)
@@ -282,23 +282,6 @@ void *tipc_ref_lock(u32 ref)
        return NULL;
 }
 
-/**
- * tipc_ref_unlock - unlock referenced object
- */
-
-void tipc_ref_unlock(u32 ref)
-{
-       if (likely(tipc_ref_table.entries)) {
-               struct reference *entry;
-
-               entry = &tipc_ref_table.entries[ref &
-                                               tipc_ref_table.index_mask];
-               if (likely((entry->ref == ref) && (entry->object)))
-                       spin_unlock_bh(&entry->lock);
-               else
-                       err("Attempt to unlock non-existent reference\n");
-       }
-}
 
 /**
  * tipc_ref_deref - return pointer referenced object (without locking it)
index 7e3798ea93b9bd437787f2051167f4425432bc6d..5bc8e7ab84de8192ca71c97a0ae415474f8e266c 100644 (file)
@@ -44,7 +44,6 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock);
 void tipc_ref_discard(u32 ref);
 
 void *tipc_ref_lock(u32 ref);
-void tipc_ref_unlock(u32 ref);
 void *tipc_ref_deref(u32 ref);
 
 #endif
index 1a5b9a6bd128f177bb267089b6773ae9990feeca..18813acc6bef0d431f11e18290a9722c489ef2c6 100644 (file)
@@ -598,12 +598,3 @@ void tipc_subscr_stop(void)
                topsrv.user_ref = 0;
        }
 }
-
-
-int tipc_ispublished(struct tipc_name const *name)
-{
-       u32 domain = 0;
-
-       return tipc_nametbl_translate(name->type, name->instance, &domain) != 0;
-}
-
index 2c01ba2d86bf57f1667897b0993fe9ece6053534..83f8b5e91fc8e45e7f0181dc36f1d5a623e0ae4b 100644 (file)
@@ -160,14 +160,3 @@ u32 tipc_zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref)
        }
        return 0;
 }
-
-
-u32 tipc_zone_next_node(u32 addr)
-{
-       struct cluster *c_ptr = tipc_cltr_find(addr);
-
-       if (c_ptr)
-               return tipc_cltr_next_node(c_ptr, addr);
-       return 0;
-}
-
index 7bdc3406ba9b0a1841f34c21411c72824e327fe0..bd1c20ce9d0656212d653a7bfab17dbf775a458c 100644 (file)
@@ -61,7 +61,6 @@ void tipc_zone_send_external_routes(struct _zone *z_ptr, u32 dest);
 struct _zone *tipc_zone_create(u32 addr);
 void tipc_zone_delete(struct _zone *z_ptr);
 void tipc_zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr);
-u32 tipc_zone_next_node(u32 addr);
 
 static inline struct _zone *tipc_zone_find(u32 addr)
 {