]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/linville...
authorDavid S. Miller <davem@davemloft.net>
Wed, 22 Sep 2010 01:17:19 +0000 (18:17 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 22 Sep 2010 01:17:19 +0000 (18:17 -0700)
43 files changed:
drivers/atm/horizon.c
drivers/net/pppoe.c
drivers/net/pppox.c
drivers/net/pptp.c
drivers/net/qlcnic/qlcnic_init.c
drivers/net/sfc/Makefile
drivers/net/sfc/efx.c
drivers/net/sfc/efx.h
drivers/net/sfc/ethtool.c
drivers/net/sfc/falcon.c
drivers/net/sfc/filter.c [new file with mode: 0644]
drivers/net/sfc/filter.h [new file with mode: 0644]
drivers/net/sfc/net_driver.h
drivers/net/sfc/nic.c
drivers/net/sfc/regs.h
drivers/net/sfc/siena.c
drivers/net/sundance.c
drivers/net/tokenring/tmspci.c
include/linux/dccp.h
include/linux/ethtool.h
include/linux/if_pppox.h
include/net/addrconf.h
net/8021q/vlan.h
net/8021q/vlan_dev.c
net/caif/caif_dev.c
net/caif/caif_socket.c
net/caif/cfcnfg.c
net/caif/chnl_net.c
net/core/ethtool.c
net/core/pktgen.c
net/core/utils.c
net/dccp/ccid.h
net/dccp/ccids/ccid3.c
net/dccp/ccids/ccid3.h
net/dccp/ccids/lib/tfrc.h
net/dccp/ccids/lib/tfrc_equation.c
net/dccp/options.c
net/ipv4/ip_gre.c
net/ipv4/ipip.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6mr.c
net/ipv6/sit.c
net/l2tp/l2tp_ppp.c

index 54720baa7363996203a9489a0bfae45e0fa4a7da..a95790452a685f3325ed64af5db2f2596f911236 100644 (file)
@@ -1645,10 +1645,8 @@ static int hrz_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
     unsigned short d = 0;
     char * s = skb->data;
     if (*s++ == 'D') {
-      for (i = 0; i < 4; ++i) {
-       d = (d<<4) | ((*s <= '9') ? (*s - '0') : (*s - 'a' + 10));
-       ++s;
-      }
+       for (i = 0; i < 4; ++i)
+               d = (d << 4) | hex_to_bin(*s++);
       PRINTK (KERN_INFO, "debug bitmap is now %hx", debug = d);
     }
   }
index c07de359dc074e7abd789826f25728ab065350b3..d72fb0519a2aa674a8757f5146b0df19655d9749 100644 (file)
@@ -1124,7 +1124,7 @@ static const struct proto_ops pppoe_ops = {
        .ioctl          = pppox_ioctl,
 };
 
-static struct pppox_proto pppoe_proto = {
+static const struct pppox_proto pppoe_proto = {
        .create = pppoe_create,
        .ioctl  = pppoe_ioctl,
        .owner  = THIS_MODULE,
index d4191ef9cad14fed8d1cd6c0d284ddb7e51c8163..8c0d170dabcd5ab32685e6e4e0df52533d9f6de5 100644 (file)
@@ -36,9 +36,9 @@
 
 #include <asm/uaccess.h>
 
-static struct pppox_proto *pppox_protos[PX_MAX_PROTO + 1];
+static const struct pppox_proto *pppox_protos[PX_MAX_PROTO + 1];
 
-int register_pppox_proto(int proto_num, struct pppox_proto *pp)
+int register_pppox_proto(int proto_num, const struct pppox_proto *pp)
 {
        if (proto_num < 0 || proto_num > PX_MAX_PROTO)
                return -EINVAL;
index 761f0eced724642cbfa33f0441ca12ea80eef222..ccbc91326bfad5b04cdb279be57b74fd3c0d6247 100644 (file)
@@ -53,7 +53,7 @@ static struct pppox_sock **callid_sock;
 static DEFINE_SPINLOCK(chan_lock);
 
 static struct proto pptp_sk_proto __read_mostly;
-static struct ppp_channel_ops pptp_chan_ops;
+static const struct ppp_channel_ops pptp_chan_ops;
 static const struct proto_ops pptp_ops;
 
 #define PPP_LCP_ECHOREQ 0x09
@@ -628,7 +628,7 @@ static int pptp_ppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
        return err;
 }
 
-static struct ppp_channel_ops pptp_chan_ops = {
+static const struct ppp_channel_ops pptp_chan_ops = {
        .start_xmit = pptp_xmit,
        .ioctl      = pptp_ppp_ioctl,
 };
@@ -659,12 +659,12 @@ static const struct proto_ops pptp_ops = {
        .ioctl      = pppox_ioctl,
 };
 
-static struct pppox_proto pppox_pptp_proto = {
+static const struct pppox_proto pppox_pptp_proto = {
        .create = pptp_create,
        .owner  = THIS_MODULE,
 };
 
-static struct gre_protocol gre_pptp_protocol = {
+static const struct gre_protocol gre_pptp_protocol = {
        .handler = pptp_rcv,
 };
 
index e26fa9593311cf8c6dfcf03884eb2c02390b070f..16dd9ebd36c949404921aadd0976d9b0df9788a3 100644 (file)
@@ -1418,8 +1418,6 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
        if (pkt_offset)
                skb_pull(skb, pkt_offset);
 
-       skb->truesize = skb->len + sizeof(struct sk_buff);
-
        if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
                adapter->stats.rxdropped++;
                dev_kfree_skb(skb);
@@ -1491,8 +1489,6 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
 
        skb_put(skb, lro_length + data_offset);
 
-       skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
-
        skb_pull(skb, l2_hdr_offset);
 
        if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
@@ -1732,8 +1728,6 @@ qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
        if (pkt_offset)
                skb_pull(skb, pkt_offset);
 
-       skb->truesize = skb->len + sizeof(struct sk_buff);
-
        if (!qlcnic_check_loopback_buff(skb->data))
                adapter->diag_cnt++;
 
index 1047b19c60a590f1bb67f4315950c707a5623d6d..fd9272b5873a3260c9644bc6446097ab2f13b481 100644 (file)
@@ -1,4 +1,4 @@
-sfc-y                  += efx.o nic.o falcon.o siena.o tx.o rx.o \
+sfc-y                  += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
                           falcon_gmac.o falcon_xmac.o mcdi_mac.o \
                           selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
                           tenxpress.o falcon_boards.o mcdi.o mcdi_phy.o
index f702f1fb63b6c85f10d2c27f980641857765f566..5be71f49a205a773164a36f9bab8f42804d84228 100644 (file)
@@ -124,8 +124,9 @@ MODULE_PARM_DESC(separate_tx_channels,
 static int napi_weight = 64;
 
 /* This is the time (in jiffies) between invocations of the hardware
- * monitor, which checks for known hardware bugs and resets the
- * hardware and driver as necessary.
+ * monitor.  On Falcon-based NICs, this will:
+ * - Check the on-board hardware monitor;
+ * - Poll the link state and reconfigure the hardware as necessary.
  */
 unsigned int efx_monitor_interval = 1 * HZ;
 
@@ -1357,8 +1358,17 @@ static int efx_probe_all(struct efx_nic *efx)
        if (rc)
                goto fail3;
 
+       rc = efx_probe_filters(efx);
+       if (rc) {
+               netif_err(efx, probe, efx->net_dev,
+                         "failed to create filter tables\n");
+               goto fail4;
+       }
+
        return 0;
 
+ fail4:
+       efx_remove_channels(efx);
  fail3:
        efx_remove_port(efx);
  fail2:
@@ -1489,6 +1499,7 @@ static void efx_stop_all(struct efx_nic *efx)
 
 static void efx_remove_all(struct efx_nic *efx)
 {
+       efx_remove_filters(efx);
        efx_remove_channels(efx);
        efx_remove_port(efx);
        efx_remove_nic(efx);
@@ -1535,8 +1546,7 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
  *
  **************************************************************************/
 
-/* Run periodically off the general workqueue. Serialised against
- * efx_reconfigure_port via the mac_lock */
+/* Run periodically off the general workqueue */
 static void efx_monitor(struct work_struct *data)
 {
        struct efx_nic *efx = container_of(data, struct efx_nic,
@@ -1549,16 +1559,13 @@ static void efx_monitor(struct work_struct *data)
 
        /* If the mac_lock is already held then it is likely a port
         * reconfiguration is already in place, which will likely do
-        * most of the work of check_hw() anyway. */
-       if (!mutex_trylock(&efx->mac_lock))
-               goto out_requeue;
-       if (!efx->port_enabled)
-               goto out_unlock;
-       efx->type->monitor(efx);
+        * most of the work of monitor() anyway. */
+       if (mutex_trylock(&efx->mac_lock)) {
+               if (efx->port_enabled)
+                       efx->type->monitor(efx);
+               mutex_unlock(&efx->mac_lock);
+       }
 
-out_unlock:
-       mutex_unlock(&efx->mac_lock);
-out_requeue:
        queue_delayed_work(efx->workqueue, &efx->monitor_work,
                           efx_monitor_interval);
 }
@@ -2002,6 +2009,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
        efx->mac_op->reconfigure(efx);
 
        efx_init_channels(efx);
+       efx_restore_filters(efx);
 
        mutex_unlock(&efx->spi_lock);
        mutex_unlock(&efx->mac_lock);
index e783c0fedfd8c7f045b264c62783694b99492d3b..f502b14eb22c8c98685bd25b68d08e05a16a3c65 100644 (file)
@@ -12,6 +12,7 @@
 #define EFX_EFX_H
 
 #include "net_driver.h"
+#include "filter.h"
 
 /* PCI IDs */
 #define EFX_VENDID_SFC         0x1924
@@ -64,6 +65,19 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
  * skb. Falcon/A1 may require up to three descriptors per skb_frag. */
 #define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS))
 
+/* Filters */
+extern int efx_probe_filters(struct efx_nic *efx);
+extern void efx_restore_filters(struct efx_nic *efx);
+extern void efx_remove_filters(struct efx_nic *efx);
+extern int efx_filter_insert_filter(struct efx_nic *efx,
+                                   struct efx_filter_spec *spec,
+                                   bool replace);
+extern int efx_filter_remove_filter(struct efx_nic *efx,
+                                   struct efx_filter_spec *spec);
+extern void efx_filter_table_clear(struct efx_nic *efx,
+                                  enum efx_filter_table_id table_id,
+                                  enum efx_filter_priority priority);
+
 /* Channels */
 extern void efx_process_channel_now(struct efx_channel *channel);
 extern int
index 7f735d804801eea9087c4b4a4336dbdeb6c0334f..c95328fa3ee84ae2a0d37b9ed69e9bb48c6df1e5 100644 (file)
@@ -15,6 +15,7 @@
 #include "workarounds.h"
 #include "selftest.h"
 #include "efx.h"
+#include "filter.h"
 #include "nic.h"
 #include "spi.h"
 #include "mdio_10g.h"
@@ -551,9 +552,22 @@ static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
 static int efx_ethtool_set_flags(struct net_device *net_dev, u32 data)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
-       u32 supported = efx->type->offload_features & ETH_FLAG_RXHASH;
+       u32 supported = (efx->type->offload_features &
+                        (ETH_FLAG_RXHASH | ETH_FLAG_NTUPLE));
+       int rc;
+
+       rc = ethtool_op_set_flags(net_dev, data, supported);
+       if (rc)
+               return rc;
+
+       if (!(data & ETH_FLAG_NTUPLE)) {
+               efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP,
+                                      EFX_FILTER_PRI_MANUAL);
+               efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC,
+                                      EFX_FILTER_PRI_MANUAL);
+       }
 
-       return ethtool_op_set_flags(net_dev, data, supported);
+       return 0;
 }
 
 static void efx_ethtool_self_test(struct net_device *net_dev,
@@ -955,6 +969,105 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
        }
 }
 
+static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
+                                    struct ethtool_rx_ntuple *ntuple)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+       struct ethtool_tcpip4_spec *ip_entry = &ntuple->fs.h_u.tcp_ip4_spec;
+       struct ethtool_tcpip4_spec *ip_mask = &ntuple->fs.m_u.tcp_ip4_spec;
+       struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec;
+       struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec;
+       struct efx_filter_spec filter;
+
+       /* Range-check action */
+       if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR ||
+           ntuple->fs.action >= (s32)efx->n_rx_channels)
+               return -EINVAL;
+
+       if (~ntuple->fs.data_mask)
+               return -EINVAL;
+
+       switch (ntuple->fs.flow_type) {
+       case TCP_V4_FLOW:
+       case UDP_V4_FLOW:
+               /* Must match all of destination, */
+               if (ip_mask->ip4dst | ip_mask->pdst)
+                       return -EINVAL;
+               /* all or none of source, */
+               if ((ip_mask->ip4src | ip_mask->psrc) &&
+                   ((__force u32)~ip_mask->ip4src |
+                    (__force u16)~ip_mask->psrc))
+                       return -EINVAL;
+               /* and nothing else */
+               if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask)
+                       return -EINVAL;
+               break;
+       case ETHER_FLOW:
+               /* Must match all of destination, */
+               if (!is_zero_ether_addr(mac_mask->h_dest))
+                       return -EINVAL;
+               /* all or none of VID, */
+               if (ntuple->fs.vlan_tag_mask != 0xf000 &&
+                   ntuple->fs.vlan_tag_mask != 0xffff)
+                       return -EINVAL;
+               /* and nothing else */
+               if (!is_broadcast_ether_addr(mac_mask->h_source) ||
+                   mac_mask->h_proto != htons(0xffff))
+                       return -EINVAL;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       filter.priority = EFX_FILTER_PRI_MANUAL;
+       filter.flags = 0;
+
+       switch (ntuple->fs.flow_type) {
+       case TCP_V4_FLOW:
+               if (!ip_mask->ip4src)
+                       efx_filter_set_rx_tcp_full(&filter,
+                                                  htonl(ip_entry->ip4src),
+                                                  htons(ip_entry->psrc),
+                                                  htonl(ip_entry->ip4dst),
+                                                  htons(ip_entry->pdst));
+               else
+                       efx_filter_set_rx_tcp_wild(&filter,
+                                                  htonl(ip_entry->ip4dst),
+                                                  htons(ip_entry->pdst));
+               break;
+       case UDP_V4_FLOW:
+               if (!ip_mask->ip4src)
+                       efx_filter_set_rx_udp_full(&filter,
+                                                  htonl(ip_entry->ip4src),
+                                                  htons(ip_entry->psrc),
+                                                  htonl(ip_entry->ip4dst),
+                                                  htons(ip_entry->pdst));
+               else
+                       efx_filter_set_rx_udp_wild(&filter,
+                                                  htonl(ip_entry->ip4dst),
+                                                  htons(ip_entry->pdst));
+               break;
+       case ETHER_FLOW:
+               if (ntuple->fs.vlan_tag_mask == 0xf000)
+                       efx_filter_set_rx_mac_full(&filter,
+                                                  ntuple->fs.vlan_tag & 0xfff,
+                                                  mac_entry->h_dest);
+               else
+                       efx_filter_set_rx_mac_wild(&filter, mac_entry->h_dest);
+               break;
+       }
+
+       if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR) {
+               return efx_filter_remove_filter(efx, &filter);
+       } else {
+               if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
+                       filter.dmaq_id = 0xfff;
+               else
+                       filter.dmaq_id = ntuple->fs.action;
+               return efx_filter_insert_filter(efx, &filter, true);
+       }
+}
+
 static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
                                      struct ethtool_rxfh_indir *indir)
 {
@@ -1033,6 +1146,7 @@ const struct ethtool_ops efx_ethtool_ops = {
        .set_wol                = efx_ethtool_set_wol,
        .reset                  = efx_ethtool_reset,
        .get_rxnfc              = efx_ethtool_get_rxnfc,
+       .set_rx_ntuple          = efx_ethtool_set_rx_ntuple,
        .get_rxfh_indir         = efx_ethtool_get_rxfh_indir,
        .set_rxfh_indir         = efx_ethtool_set_rxfh_indir,
 };
index b4d8efe67772ff1f6e3f605a74f6ae05dff7cecb..b398a4198042154509fec5501e0595a3fc8d7212 100644 (file)
@@ -1874,7 +1874,7 @@ struct efx_nic_type falcon_b0_nic_type = {
                                   * channels */
        .tx_dc_base = 0x130000,
        .rx_dc_base = 0x100000,
-       .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH,
+       .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
        .reset_world_flags = ETH_RESET_IRQ,
 };
 
diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c
new file mode 100644 (file)
index 0000000..abc884d
--- /dev/null
@@ -0,0 +1,445 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "efx.h"
+#include "filter.h"
+#include "io.h"
+#include "nic.h"
+#include "regs.h"
+
+/* "Fudge factors" - difference between programmed value and actual depth.
+ * Due to pipelined implementation we need to program H/W with a value that
+ * is larger than the hop limit we want.
+ */
+#define FILTER_CTL_SRCH_FUDGE_WILD 3
+#define FILTER_CTL_SRCH_FUDGE_FULL 1
+
+struct efx_filter_table {
+       u32             offset;         /* address of table relative to BAR */
+       unsigned        size;           /* number of entries */
+       unsigned        step;           /* step between entries */
+       unsigned        used;           /* number currently used */
+       unsigned long   *used_bitmap;
+       struct efx_filter_spec *spec;
+};
+
+struct efx_filter_state {
+       spinlock_t      lock;
+       struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
+       unsigned        search_depth[EFX_FILTER_TYPE_COUNT];
+};
+
+/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
+ * key derived from the n-tuple.  The initial LFSR state is 0xffff. */
+static u16 efx_filter_hash(u32 key)
+{
+       u16 tmp;
+
+       /* First 16 rounds */
+       tmp = 0x1fff ^ key >> 16;
+       tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+       tmp = tmp ^ tmp >> 9;
+       /* Last 16 rounds */
+       tmp = tmp ^ tmp << 13 ^ key;
+       tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+       return tmp ^ tmp >> 9;
+}
+
+/* To allow for hash collisions, filter search continues at these
+ * increments from the first possible entry selected by the hash. */
+static u16 efx_filter_increment(u32 key)
+{
+       return key * 2 - 1;
+}
+
+static enum efx_filter_table_id
+efx_filter_type_table_id(enum efx_filter_type type)
+{
+       BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_TCP_FULL >> 2));
+       BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_TCP_WILD >> 2));
+       BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_UDP_FULL >> 2));
+       BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_UDP_WILD >> 2));
+       BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_RX_MAC_FULL >> 2));
+       BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_RX_MAC_WILD >> 2));
+       return type >> 2;
+}
+
+static void
+efx_filter_table_reset_search_depth(struct efx_filter_state *state,
+                                   enum efx_filter_table_id table_id)
+{
+       memset(state->search_depth + (table_id << 2), 0,
+              sizeof(state->search_depth[0]) << 2);
+}
+
+static void efx_filter_push_rx_limits(struct efx_nic *efx)
+{
+       struct efx_filter_state *state = efx->filter_state;
+       efx_oword_t filter_ctl;
+
+       efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
+
+       EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
+                           state->search_depth[EFX_FILTER_RX_TCP_FULL] +
+                           FILTER_CTL_SRCH_FUDGE_FULL);
+       EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
+                           state->search_depth[EFX_FILTER_RX_TCP_WILD] +
+                           FILTER_CTL_SRCH_FUDGE_WILD);
+       EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
+                           state->search_depth[EFX_FILTER_RX_UDP_FULL] +
+                           FILTER_CTL_SRCH_FUDGE_FULL);
+       EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
+                           state->search_depth[EFX_FILTER_RX_UDP_WILD] +
+                           FILTER_CTL_SRCH_FUDGE_WILD);
+
+       if (state->table[EFX_FILTER_TABLE_RX_MAC].size) {
+               EFX_SET_OWORD_FIELD(
+                       filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
+                       state->search_depth[EFX_FILTER_RX_MAC_FULL] +
+                       FILTER_CTL_SRCH_FUDGE_FULL);
+               EFX_SET_OWORD_FIELD(
+                       filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
+                       state->search_depth[EFX_FILTER_RX_MAC_WILD] +
+                       FILTER_CTL_SRCH_FUDGE_WILD);
+       }
+
+       efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
+}
+
+/* Build a filter entry and return its n-tuple key. */
+static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
+{
+       u32 data3;
+
+       switch (efx_filter_type_table_id(spec->type)) {
+       case EFX_FILTER_TABLE_RX_IP: {
+               bool is_udp = (spec->type == EFX_FILTER_RX_UDP_FULL ||
+                              spec->type == EFX_FILTER_RX_UDP_WILD);
+               EFX_POPULATE_OWORD_7(
+                       *filter,
+                       FRF_BZ_RSS_EN,
+                       !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
+                       FRF_BZ_SCATTER_EN,
+                       !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
+                       FRF_BZ_TCP_UDP, is_udp,
+                       FRF_BZ_RXQ_ID, spec->dmaq_id,
+                       EFX_DWORD_2, spec->data[2],
+                       EFX_DWORD_1, spec->data[1],
+                       EFX_DWORD_0, spec->data[0]);
+               data3 = is_udp;
+               break;
+       }
+
+       case EFX_FILTER_TABLE_RX_MAC: {
+               bool is_wild = spec->type == EFX_FILTER_RX_MAC_WILD;
+               EFX_POPULATE_OWORD_8(
+                       *filter,
+                       FRF_CZ_RMFT_RSS_EN,
+                       !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
+                       FRF_CZ_RMFT_SCATTER_EN,
+                       !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
+                       FRF_CZ_RMFT_IP_OVERRIDE,
+                       !!(spec->flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP),
+                       FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
+                       FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
+                       FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
+                       FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
+                       FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
+               data3 = is_wild;
+               break;
+       }
+
+       default:
+               BUG();
+       }
+
+       return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
+}
+
+static bool efx_filter_equal(const struct efx_filter_spec *left,
+                            const struct efx_filter_spec *right)
+{
+       if (left->type != right->type ||
+           memcmp(left->data, right->data, sizeof(left->data)))
+               return false;
+
+       return true;
+}
+
+static int efx_filter_search(struct efx_filter_table *table,
+                            struct efx_filter_spec *spec, u32 key,
+                            bool for_insert, int *depth_required)
+{
+       unsigned hash, incr, filter_idx, depth;
+       struct efx_filter_spec *cmp;
+
+       hash = efx_filter_hash(key);
+       incr = efx_filter_increment(key);
+
+       for (depth = 1, filter_idx = hash & (table->size - 1);
+            test_bit(filter_idx, table->used_bitmap);
+            ++depth) {
+               cmp = &table->spec[filter_idx];
+               if (efx_filter_equal(spec, cmp))
+                       goto found;
+               filter_idx = (filter_idx + incr) & (table->size - 1);
+       }
+       if (!for_insert)
+               return -ENOENT;
+found:
+       *depth_required = depth;
+       return filter_idx;
+}
+
+/**
+ * efx_filter_insert_filter - add or replace a filter
+ * @efx: NIC in which to insert the filter
+ * @spec: Specification for the filter
+ * @replace: Flag for whether the specified filter may replace a filter
+ *     with an identical match expression and equal or lower priority
+ *
+ * On success, return the filter index within its table.
+ * On failure, return a negative error code.
+ */
+int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
+                            bool replace)
+{
+       struct efx_filter_state *state = efx->filter_state;
+       enum efx_filter_table_id table_id =
+               efx_filter_type_table_id(spec->type);
+       struct efx_filter_table *table = &state->table[table_id];
+       struct efx_filter_spec *saved_spec;
+       efx_oword_t filter;
+       int filter_idx, depth;
+       u32 key;
+       int rc;
+
+       if (table->size == 0)
+               return -EINVAL;
+
+       key = efx_filter_build(&filter, spec);
+
+       netif_vdbg(efx, hw, efx->net_dev,
+                  "%s: type %d search_depth=%d", __func__, spec->type,
+                  state->search_depth[spec->type]);
+
+       spin_lock_bh(&state->lock);
+
+       rc = efx_filter_search(table, spec, key, true, &depth);
+       if (rc < 0)
+               goto out;
+       filter_idx = rc;
+       BUG_ON(filter_idx >= table->size);
+       saved_spec = &table->spec[filter_idx];
+
+       if (test_bit(filter_idx, table->used_bitmap)) {
+               /* Should we replace the existing filter? */
+               if (!replace) {
+                       rc = -EEXIST;
+                       goto out;
+               }
+               if (spec->priority < saved_spec->priority) {
+                       rc = -EPERM;
+                       goto out;
+               }
+       } else {
+               __set_bit(filter_idx, table->used_bitmap);
+               ++table->used;
+       }
+       *saved_spec = *spec;
+
+       if (state->search_depth[spec->type] < depth) {
+               state->search_depth[spec->type] = depth;
+               efx_filter_push_rx_limits(efx);
+       }
+
+       efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
+
+       netif_vdbg(efx, hw, efx->net_dev,
+                  "%s: filter type %d index %d rxq %u set",
+                  __func__, spec->type, filter_idx, spec->dmaq_id);
+
+out:
+       spin_unlock_bh(&state->lock);
+       return rc;
+}
+
+static void efx_filter_table_clear_entry(struct efx_nic *efx,
+                                        struct efx_filter_table *table,
+                                        int filter_idx)
+{
+       static efx_oword_t filter;
+
+       if (test_bit(filter_idx, table->used_bitmap)) {
+               __clear_bit(filter_idx, table->used_bitmap);
+               --table->used;
+               memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
+
+               efx_writeo(efx, &filter,
+                          table->offset + table->step * filter_idx);
+       }
+}
+
+/**
+ * efx_filter_remove_filter - remove a filter by specification
+ * @efx: NIC from which to remove the filter
+ * @spec: Specification for the filter
+ *
+ * On success, return zero.
+ * On failure, return a negative error code.
+ */
+int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec)
+{
+       struct efx_filter_state *state = efx->filter_state;
+       enum efx_filter_table_id table_id =
+               efx_filter_type_table_id(spec->type);
+       struct efx_filter_table *table = &state->table[table_id];
+       struct efx_filter_spec *saved_spec;
+       efx_oword_t filter;
+       int filter_idx, depth;
+       u32 key;
+       int rc;
+
+       key = efx_filter_build(&filter, spec);
+
+       spin_lock_bh(&state->lock);
+
+       rc = efx_filter_search(table, spec, key, false, &depth);
+       if (rc < 0)
+               goto out;
+       filter_idx = rc;
+       saved_spec = &table->spec[filter_idx];
+
+       if (spec->priority < saved_spec->priority) {
+               rc = -EPERM;
+               goto out;
+       }
+
+       efx_filter_table_clear_entry(efx, table, filter_idx);
+       if (table->used == 0)
+               efx_filter_table_reset_search_depth(state, table_id);
+       rc = 0;
+
+out:
+       spin_unlock_bh(&state->lock);
+       return rc;
+}
+
+/**
+ * efx_filter_table_clear - remove filters from a table by priority
+ * @efx: NIC from which to remove the filters
+ * @table_id: Table from which to remove the filters
+ * @priority: Maximum priority to remove
+ */
+void efx_filter_table_clear(struct efx_nic *efx,
+                           enum efx_filter_table_id table_id,
+                           enum efx_filter_priority priority)
+{
+       struct efx_filter_state *state = efx->filter_state;
+       struct efx_filter_table *table = &state->table[table_id];
+       int filter_idx;
+
+       spin_lock_bh(&state->lock);
+
+       for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
+               if (table->spec[filter_idx].priority <= priority)
+                       efx_filter_table_clear_entry(efx, table, filter_idx);
+       if (table->used == 0)
+               efx_filter_table_reset_search_depth(state, table_id);
+
+       spin_unlock_bh(&state->lock);
+}
+
+/* Restore filter stater after reset */
+void efx_restore_filters(struct efx_nic *efx)
+{
+       struct efx_filter_state *state = efx->filter_state;
+       enum efx_filter_table_id table_id;
+       struct efx_filter_table *table;
+       efx_oword_t filter;
+       int filter_idx;
+
+       spin_lock_bh(&state->lock);
+
+       for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
+               table = &state->table[table_id];
+               for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+                       if (!test_bit(filter_idx, table->used_bitmap))
+                               continue;
+                       efx_filter_build(&filter, &table->spec[filter_idx]);
+                       efx_writeo(efx, &filter,
+                                  table->offset + table->step * filter_idx);
+               }
+       }
+
+       efx_filter_push_rx_limits(efx);
+
+       spin_unlock_bh(&state->lock);
+}
+
+int efx_probe_filters(struct efx_nic *efx)
+{
+       struct efx_filter_state *state;
+       struct efx_filter_table *table;
+       unsigned table_id;
+
+       state = kzalloc(sizeof(*efx->filter_state), GFP_KERNEL);
+       if (!state)
+               return -ENOMEM;
+       efx->filter_state = state;
+
+       spin_lock_init(&state->lock);
+
+       if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+               table = &state->table[EFX_FILTER_TABLE_RX_IP];
+               table->offset = FR_BZ_RX_FILTER_TBL0;
+               table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
+               table->step = FR_BZ_RX_FILTER_TBL0_STEP;
+       }
+
+       if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
+               table = &state->table[EFX_FILTER_TABLE_RX_MAC];
+               table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
+               table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
+               table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
+       }
+
+       for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
+               table = &state->table[table_id];
+               if (table->size == 0)
+                       continue;
+               table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
+                                            sizeof(unsigned long),
+                                            GFP_KERNEL);
+               if (!table->used_bitmap)
+                       goto fail;
+               table->spec = vmalloc(table->size * sizeof(*table->spec));
+               if (!table->spec)
+                       goto fail;
+               memset(table->spec, 0, table->size * sizeof(*table->spec));
+       }
+
+       return 0;
+
+fail:
+       efx_remove_filters(efx);
+       return -ENOMEM;
+}
+
+void efx_remove_filters(struct efx_nic *efx)
+{
+       struct efx_filter_state *state = efx->filter_state;
+       enum efx_filter_table_id table_id;
+
+       for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
+               kfree(state->table[table_id].used_bitmap);
+               vfree(state->table[table_id].spec);
+       }
+       kfree(state);
+}
diff --git a/drivers/net/sfc/filter.h b/drivers/net/sfc/filter.h
new file mode 100644 (file)
index 0000000..a53319d
--- /dev/null
@@ -0,0 +1,189 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_FILTER_H
+#define EFX_FILTER_H
+
+#include <linux/types.h>
+
+enum efx_filter_table_id {
+       EFX_FILTER_TABLE_RX_IP = 0,
+       EFX_FILTER_TABLE_RX_MAC,
+       EFX_FILTER_TABLE_COUNT,
+};
+
+/**
+ * enum efx_filter_type - type of hardware filter
+ * @EFX_FILTER_RX_TCP_FULL: RX, matching TCP/IPv4 4-tuple
+ * @EFX_FILTER_RX_TCP_WILD: RX, matching TCP/IPv4 destination (host, port)
+ * @EFX_FILTER_RX_UDP_FULL: RX, matching UDP/IPv4 4-tuple
+ * @EFX_FILTER_RX_UDP_WILD: RX, matching UDP/IPv4 destination (host, port)
+ * @EFX_FILTER_RX_MAC_FULL: RX, matching Ethernet destination MAC address, VID
+ * @EFX_FILTER_RX_MAC_WILD: RX, matching Ethernet destination MAC address
+ *
+ * Falcon NICs only support the RX TCP/IPv4 and UDP/IPv4 filter types.
+ */
+enum efx_filter_type {
+       EFX_FILTER_RX_TCP_FULL = 0,
+       EFX_FILTER_RX_TCP_WILD,
+       EFX_FILTER_RX_UDP_FULL,
+       EFX_FILTER_RX_UDP_WILD,
+       EFX_FILTER_RX_MAC_FULL = 4,
+       EFX_FILTER_RX_MAC_WILD,
+       EFX_FILTER_TYPE_COUNT,
+};
+
+/**
+ * enum efx_filter_priority - priority of a hardware filter specification
+ * @EFX_FILTER_PRI_HINT: Performance hint
+ * @EFX_FILTER_PRI_MANUAL: Manually configured filter
+ * @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour
+ */
+enum efx_filter_priority {
+       EFX_FILTER_PRI_HINT = 0,
+       EFX_FILTER_PRI_MANUAL,
+       EFX_FILTER_PRI_REQUIRED,
+};
+
+/**
+ * enum efx_filter_flags - flags for hardware filter specifications
+ * @EFX_FILTER_FLAG_RX_RSS: Use RSS to spread across multiple queues.
+ *     By default, matching packets will be delivered only to the
+ *     specified queue. If this flag is set, they will be delivered
+ *     to a range of queues offset from the specified queue number
+ *     according to the indirection table.
+ * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
+ *     queue.
+ * @EFX_FILTER_FLAG_RX_OVERRIDE_IP: Enables a MAC filter to override
+ *     any IP filter that matches the same packet.  By default, IP
+ *     filters take precedence.
+ *
+ * Currently, no flags are defined for TX filters.
+ */
+enum efx_filter_flags {
+       EFX_FILTER_FLAG_RX_RSS = 0x01,
+       EFX_FILTER_FLAG_RX_SCATTER = 0x02,
+       EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04,
+};
+
+/**
+ * struct efx_filter_spec - specification for a hardware filter
+ * @type: Type of match to be performed, from &enum efx_filter_type
+ * @priority: Priority of the filter, from &enum efx_filter_priority
+ * @flags: Miscellaneous flags, from &enum efx_filter_flags
+ * @dmaq_id: Source/target queue index
+ * @data: Match data (type-dependent)
+ *
+ * Use the efx_filter_set_*() functions to initialise the @type and
+ * @data fields.
+ */
+struct efx_filter_spec {
+       u8      type:4;
+       u8      priority:4;
+       u8      flags;
+       u16     dmaq_id;
+       u32     data[3];
+};
+
+/**
+ * efx_filter_set_rx_tcp_full - specify RX filter with TCP/IPv4 full match
+ * @spec: Specification to initialise
+ * @shost: Source host address (host byte order)
+ * @sport: Source port (host byte order)
+ * @dhost: Destination host address (host byte order)
+ * @dport: Destination port (host byte order)
+ */
+static inline void
+efx_filter_set_rx_tcp_full(struct efx_filter_spec *spec,
+                          u32 shost, u16 sport, u32 dhost, u16 dport)
+{
+       spec->type = EFX_FILTER_RX_TCP_FULL;
+       spec->data[0] = sport | shost << 16;
+       spec->data[1] = dport << 16 | shost >> 16;
+       spec->data[2] = dhost;
+}
+
+/**
+ * efx_filter_set_rx_tcp_wild - specify RX filter with TCP/IPv4 wildcard match
+ * @spec: Specification to initialise
+ * @dhost: Destination host address (host byte order)
+ * @dport: Destination port (host byte order)
+ */
+static inline void
+efx_filter_set_rx_tcp_wild(struct efx_filter_spec *spec, u32 dhost, u16 dport)
+{
+       spec->type = EFX_FILTER_RX_TCP_WILD;
+       spec->data[0] = 0;
+       spec->data[1] = dport << 16;
+       spec->data[2] = dhost;
+}
+
+/**
+ * efx_filter_set_rx_udp_full - specify RX filter with UDP/IPv4 full match
+ * @spec: Specification to initialise
+ * @shost: Source host address (host byte order)
+ * @sport: Source port (host byte order)
+ * @dhost: Destination host address (host byte order)
+ * @dport: Destination port (host byte order)
+ */
+static inline void
+efx_filter_set_rx_udp_full(struct efx_filter_spec *spec,
+                          u32 shost, u16 sport, u32 dhost, u16 dport)
+{
+       spec->type = EFX_FILTER_RX_UDP_FULL;
+       spec->data[0] = sport | shost << 16;
+       spec->data[1] = dport << 16 | shost >> 16;
+       spec->data[2] = dhost;
+}
+
+/**
+ * efx_filter_set_rx_udp_wild - specify RX filter with UDP/IPv4 wildcard match
+ * @spec: Specification to initialise
+ * @dhost: Destination host address (host byte order)
+ * @dport: Destination port (host byte order)
+ */
+static inline void
+efx_filter_set_rx_udp_wild(struct efx_filter_spec *spec, u32 dhost, u16 dport)
+{
+       spec->type = EFX_FILTER_RX_UDP_WILD;
+       spec->data[0] = dport;
+       spec->data[1] = 0;
+       spec->data[2] = dhost;
+}
+
+/**
+ * efx_filter_set_rx_mac_full - specify RX filter with MAC full match
+ * @spec: Specification to initialise
+ * @vid: VLAN ID
+ * @addr: Destination MAC address
+ */
+static inline void efx_filter_set_rx_mac_full(struct efx_filter_spec *spec,
+                                             u16 vid, const u8 *addr)
+{
+       spec->type = EFX_FILTER_RX_MAC_FULL;
+       spec->data[0] = vid;
+       spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
+       spec->data[2] = addr[0] << 8 | addr[1];
+}
+
+/**
+ * efx_filter_set_rx_mac_full - specify RX filter with MAC wildcard match
+ * @spec: Specification to initialise
+ * @addr: Destination MAC address
+ */
+static inline void efx_filter_set_rx_mac_wild(struct efx_filter_spec *spec,
+                                             const u8 *addr)
+{
+       spec->type = EFX_FILTER_RX_MAC_WILD;
+       spec->data[0] = 0;
+       spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
+       spec->data[2] = addr[0] << 8 | addr[1];
+}
+
+#endif /* EFX_FILTER_H */
index 152342dbff298b66c3ee679109fead85cc84cf38..b3f2bf436735206ba914565ab0d26754ced4e0cc 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/device.h>
 #include <linux/highmem.h>
 #include <linux/workqueue.h>
+#include <linux/vmalloc.h>
 #include <linux/i2c.h>
 
 #include "enum.h"
@@ -619,6 +620,8 @@ union efx_multicast_hash {
        efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
 };
 
+struct efx_filter_state;
+
 /**
  * struct efx_nic - an Efx NIC
  * @name: Device name (net device name or bus id before net device registered)
@@ -799,6 +802,8 @@ struct efx_nic {
        u64 loopback_modes;
 
        void *loopback_selftest;
+
+       struct efx_filter_state *filter_state;
 };
 
 static inline int efx_dev_registered(struct efx_nic *efx)
index 6c5c0cefa9d85451dc12f530cd83161b154e53ae..c4de0014441c9942ea4f94f714e4e3335804a11a 100644 (file)
@@ -1849,8 +1849,7 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
        REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
        REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
        REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
-       /* The register buffer is allocated with slab, so we can't
-        * reasonably read all of the buffer table (up to 8MB!).
+       /* We can't reasonably read all of the buffer table (up to 8MB!).
         * However this driver will only use a few entries.  Reading
         * 1K entries allows for some expansion of queue count and
         * size before we need to change the version. */
@@ -1858,7 +1857,6 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
                                  A, A, 8, 1024),
        REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
                                  B, Z, 8, 1024),
-       /* RX_FILTER_TBL{0,1} is huge and not used by this driver */
        REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
        REGISTER_TABLE_BB_CZ(TIMER_TBL),
        REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
@@ -1868,6 +1866,7 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
        REGISTER_TABLE_CZ(MC_TREG_SMEM),
        /* MSIX_PBA_TABLE is not mapped */
        /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
+       REGISTER_TABLE_BZ(RX_FILTER_TBL0),
 };
 
 size_t efx_nic_get_regs_len(struct efx_nic *efx)
index 18a3be428348754028fc1556f0e06813f8081197..96430ed81c36712a3dec2134f35f241f91466a07 100644 (file)
 #define        FRF_AB_XX_FORCE_SIG_WIDTH 8
 #define        FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff
 
+/* RX_MAC_FILTER_TBL0 */
+/* RMFT_DEST_MAC is wider than 32 bits */
+#define FRF_CZ_RMFT_DEST_MAC_LO_LBN 12
+#define FRF_CZ_RMFT_DEST_MAC_LO_WIDTH 32
+#define FRF_CZ_RMFT_DEST_MAC_HI_LBN 44
+#define FRF_CZ_RMFT_DEST_MAC_HI_WIDTH 16
+
+/* TX_MAC_FILTER_TBL0 */
+/* TMFT_SRC_MAC is wider than 32 bits */
+#define FRF_CZ_TMFT_SRC_MAC_LO_LBN 12
+#define FRF_CZ_TMFT_SRC_MAC_LO_WIDTH 32
+#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44
+#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16
+
 /* DRIVER_EV */
 /* Sub-fields of an RX flush completion event */
 #define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
index 9f5368049694d18d9cf39faab3eb5526a0799420..2115f95ddc88b7337a892e97a4407f43d73c79f8 100644 (file)
@@ -651,6 +651,6 @@ struct efx_nic_type siena_a0_nic_type = {
        .tx_dc_base = 0x88000,
        .rx_dc_base = 0x68000,
        .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-                            NETIF_F_RXHASH),
+                            NETIF_F_RXHASH | NETIF_F_NTUPLE),
        .reset_world_flags = ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT,
 };
index 7dfdbee878e83d4b8019a1df9e1bcd7eb0175dbe..8b5aeca24d5db9860f3e027a899746afece41e8a 100644 (file)
@@ -96,6 +96,7 @@ static char *media[MAX_UNITS];
 #include <asm/io.h>
 #include <linux/delay.h>
 #include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
 #ifndef _COMPAT_WITH_OLD_KERNEL
 #include <linux/crc32.h>
 #include <linux/ethtool.h>
@@ -523,13 +524,15 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
        tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
        tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
 
-       ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
+       ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
+                       &ring_dma, GFP_KERNEL);
        if (!ring_space)
                goto err_out_cleardev;
        np->tx_ring = (struct netdev_desc *)ring_space;
        np->tx_ring_dma = ring_dma;
 
-       ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
+       ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
+                       &ring_dma, GFP_KERNEL);
        if (!ring_space)
                goto err_out_unmap_tx;
        np->rx_ring = (struct netdev_desc *)ring_space;
@@ -663,9 +666,11 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
 err_out_unregister:
        unregister_netdev(dev);
 err_out_unmap_rx:
-        pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
+       dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
+               np->rx_ring, np->rx_ring_dma);
 err_out_unmap_tx:
-        pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
+       dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
+               np->tx_ring, np->tx_ring_dma);
 err_out_cleardev:
        pci_set_drvdata(pdev, NULL);
        pci_iounmap(pdev, ioaddr);
@@ -1011,8 +1016,14 @@ static void init_ring(struct net_device *dev)
                skb->dev = dev;         /* Mark as being used by this device. */
                skb_reserve(skb, 2);    /* 16 byte align the IP header. */
                np->rx_ring[i].frag[0].addr = cpu_to_le32(
-                       pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
-                               PCI_DMA_FROMDEVICE));
+                       dma_map_single(&np->pci_dev->dev, skb->data,
+                               np->rx_buf_sz, DMA_FROM_DEVICE));
+               if (dma_mapping_error(&np->pci_dev->dev,
+                                       np->rx_ring[i].frag[0].addr)) {
+                       dev_kfree_skb(skb);
+                       np->rx_skbuff[i] = NULL;
+                       break;
+               }
                np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
        }
        np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
@@ -1063,9 +1074,11 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
 
        txdesc->next_desc = 0;
        txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
-       txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
-                                                       skb->len,
-                                                       PCI_DMA_TODEVICE));
+       txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
+                               skb->data, skb->len, DMA_TO_DEVICE));
+       if (dma_mapping_error(&np->pci_dev->dev,
+                               txdesc->frag[0].addr))
+                       goto drop_frame;
        txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
 
        /* Increment cur_tx before tasklet_schedule() */
@@ -1087,6 +1100,12 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
                        dev->name, np->cur_tx, entry);
        }
        return NETDEV_TX_OK;
+
+drop_frame:
+       dev_kfree_skb(skb);
+       np->tx_skbuff[entry] = NULL;
+       dev->stats.tx_dropped++;
+       return NETDEV_TX_OK;
 }
 
 /* Reset hardware tx and free all of tx buffers */
@@ -1097,7 +1116,6 @@ reset_tx (struct net_device *dev)
        void __iomem *ioaddr = np->base;
        struct sk_buff *skb;
        int i;
-       int irq = in_interrupt();
 
        /* Reset tx logic, TxListPtr will be cleaned */
        iowrite16 (TxDisable, ioaddr + MACCtrl1);
@@ -1109,13 +1127,10 @@ reset_tx (struct net_device *dev)
 
                skb = np->tx_skbuff[i];
                if (skb) {
-                       pci_unmap_single(np->pci_dev,
+                       dma_unmap_single(&np->pci_dev->dev,
                                le32_to_cpu(np->tx_ring[i].frag[0].addr),
-                               skb->len, PCI_DMA_TODEVICE);
-                       if (irq)
-                               dev_kfree_skb_irq (skb);
-                       else
-                               dev_kfree_skb (skb);
+                               skb->len, DMA_TO_DEVICE);
+                       dev_kfree_skb_any(skb);
                        np->tx_skbuff[i] = NULL;
                        dev->stats.tx_dropped++;
                }
@@ -1233,9 +1248,9 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
                                                break;
                                skb = np->tx_skbuff[entry];
                                /* Free the original skb. */
-                               pci_unmap_single(np->pci_dev,
+                               dma_unmap_single(&np->pci_dev->dev,
                                        le32_to_cpu(np->tx_ring[entry].frag[0].addr),
-                                       skb->len, PCI_DMA_TODEVICE);
+                                       skb->len, DMA_TO_DEVICE);
                                dev_kfree_skb_irq (np->tx_skbuff[entry]);
                                np->tx_skbuff[entry] = NULL;
                                np->tx_ring[entry].frag[0].addr = 0;
@@ -1252,9 +1267,9 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
                                        break;
                                skb = np->tx_skbuff[entry];
                                /* Free the original skb. */
-                               pci_unmap_single(np->pci_dev,
+                               dma_unmap_single(&np->pci_dev->dev,
                                        le32_to_cpu(np->tx_ring[entry].frag[0].addr),
-                                       skb->len, PCI_DMA_TODEVICE);
+                                       skb->len, DMA_TO_DEVICE);
                                dev_kfree_skb_irq (np->tx_skbuff[entry]);
                                np->tx_skbuff[entry] = NULL;
                                np->tx_ring[entry].frag[0].addr = 0;
@@ -1334,22 +1349,18 @@ static void rx_poll(unsigned long data)
                        if (pkt_len < rx_copybreak &&
                            (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
-                               pci_dma_sync_single_for_cpu(np->pci_dev,
-                                                           le32_to_cpu(desc->frag[0].addr),
-                                                           np->rx_buf_sz,
-                                                           PCI_DMA_FROMDEVICE);
-
+                               dma_sync_single_for_cpu(&np->pci_dev->dev,
+                                               le32_to_cpu(desc->frag[0].addr),
+                                               np->rx_buf_sz, DMA_FROM_DEVICE);
                                skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
-                               pci_dma_sync_single_for_device(np->pci_dev,
-                                                              le32_to_cpu(desc->frag[0].addr),
-                                                              np->rx_buf_sz,
-                                                              PCI_DMA_FROMDEVICE);
+                               dma_sync_single_for_device(&np->pci_dev->dev,
+                                               le32_to_cpu(desc->frag[0].addr),
+                                               np->rx_buf_sz, DMA_FROM_DEVICE);
                                skb_put(skb, pkt_len);
                        } else {
-                               pci_unmap_single(np->pci_dev,
+                               dma_unmap_single(&np->pci_dev->dev,
                                        le32_to_cpu(desc->frag[0].addr),
-                                       np->rx_buf_sz,
-                                       PCI_DMA_FROMDEVICE);
+                                       np->rx_buf_sz, DMA_FROM_DEVICE);
                                skb_put(skb = np->rx_skbuff[entry], pkt_len);
                                np->rx_skbuff[entry] = NULL;
                        }
@@ -1396,8 +1407,14 @@ static void refill_rx (struct net_device *dev)
                        skb->dev = dev;         /* Mark as being used by this device. */
                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
                        np->rx_ring[entry].frag[0].addr = cpu_to_le32(
-                               pci_map_single(np->pci_dev, skb->data,
-                                       np->rx_buf_sz, PCI_DMA_FROMDEVICE));
+                               dma_map_single(&np->pci_dev->dev, skb->data,
+                                       np->rx_buf_sz, DMA_FROM_DEVICE));
+                       if (dma_mapping_error(&np->pci_dev->dev,
+                                   np->rx_ring[entry].frag[0].addr)) {
+                           dev_kfree_skb_irq(skb);
+                           np->rx_skbuff[entry] = NULL;
+                           break;
+                       }
                }
                /* Perhaps we need not reset this field. */
                np->rx_ring[entry].frag[0].length =
@@ -1715,9 +1732,9 @@ static int netdev_close(struct net_device *dev)
                np->rx_ring[i].status = 0;
                skb = np->rx_skbuff[i];
                if (skb) {
-                       pci_unmap_single(np->pci_dev,
+                       dma_unmap_single(&np->pci_dev->dev,
                                le32_to_cpu(np->rx_ring[i].frag[0].addr),
-                               np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+                               np->rx_buf_sz, DMA_FROM_DEVICE);
                        dev_kfree_skb(skb);
                        np->rx_skbuff[i] = NULL;
                }
@@ -1727,9 +1744,9 @@ static int netdev_close(struct net_device *dev)
                np->tx_ring[i].next_desc = 0;
                skb = np->tx_skbuff[i];
                if (skb) {
-                       pci_unmap_single(np->pci_dev,
+                       dma_unmap_single(&np->pci_dev->dev,
                                le32_to_cpu(np->tx_ring[i].frag[0].addr),
-                               skb->len, PCI_DMA_TODEVICE);
+                               skb->len, DMA_TO_DEVICE);
                        dev_kfree_skb(skb);
                        np->tx_skbuff[i] = NULL;
                }
@@ -1743,17 +1760,16 @@ static void __devexit sundance_remove1 (struct pci_dev *pdev)
        struct net_device *dev = pci_get_drvdata(pdev);
 
        if (dev) {
-               struct netdev_private *np = netdev_priv(dev);
-
-               unregister_netdev(dev);
-               pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
-                       np->rx_ring_dma);
-               pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
-                       np->tx_ring_dma);
-               pci_iounmap(pdev, np->base);
-               pci_release_regions(pdev);
-               free_netdev(dev);
-               pci_set_drvdata(pdev, NULL);
+           struct netdev_private *np = netdev_priv(dev);
+           unregister_netdev(dev);
+           dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
+                   np->rx_ring, np->rx_ring_dma);
+           dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
+                   np->tx_ring, np->tx_ring_dma);
+           pci_iounmap(pdev, np->base);
+           pci_release_regions(pdev);
+           free_netdev(dev);
+           pci_set_drvdata(pdev, NULL);
        }
 }
 
index d4c7c0c0a3d64f405be97a242500a1a77d4df6fa..d3e788a9cd1c9ea90c29fd41f31d37e1dd5f9ebe 100644 (file)
@@ -125,18 +125,16 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
        dev->irq        = pci_irq_line;
        dev->dma        = 0;
 
-       printk("%s: %s\n", dev->name, cardinfo->name);
-       printk("%s:    IO: %#4lx  IRQ: %d\n",
-              dev->name, dev->base_addr, dev->irq);
+       dev_info(&pdev->dev, "%s\n", cardinfo->name);
+       dev_info(&pdev->dev, "    IO: %#4lx  IRQ: %d\n", dev->base_addr, dev->irq);
                
        tms_pci_read_eeprom(dev);
 
-       printk("%s:    Ring Station Address: %pM\n",
-              dev->name, dev->dev_addr);
+       dev_info(&pdev->dev, "    Ring Station Address: %pM\n", dev->dev_addr);
                
        ret = tmsdev_init(dev, &pdev->dev);
        if (ret) {
-               printk("%s: unable to get memory for dev->priv.\n", dev->name);
+               dev_info(&pdev->dev, "unable to get memory for dev->priv.\n");
                goto err_out_region;
        }
 
index 7434a8353e2301dcb548ad94965ef0e73390335f..7187bd8a75f62c23b13ac73f4416488a0e42a0c7 100644 (file)
@@ -165,8 +165,10 @@ enum {
        DCCPO_TIMESTAMP_ECHO = 42,
        DCCPO_ELAPSED_TIME = 43,
        DCCPO_MAX = 45,
-       DCCPO_MIN_CCID_SPECIFIC = 128,
-       DCCPO_MAX_CCID_SPECIFIC = 255,
+       DCCPO_MIN_RX_CCID_SPECIFIC = 128,       /* from sender to receiver */
+       DCCPO_MAX_RX_CCID_SPECIFIC = 191,
+       DCCPO_MIN_TX_CCID_SPECIFIC = 192,       /* from receiver to sender */
+       DCCPO_MAX_TX_CCID_SPECIFIC = 255,
 };
 /* maximum size of a single TLV-encoded DCCP option (sans type/len bytes) */
 #define DCCP_SINGLE_OPT_MAXLEN 253
index b67af60a8890538f9e1568f035f9fade93855433..8a3338ceb4380c397249198076ad61a57a578f3b 100644 (file)
@@ -14,6 +14,7 @@
 #define _LINUX_ETHTOOL_H
 
 #include <linux/types.h>
+#include <linux/if_ether.h>
 
 /* This should work for both 32 and 64 bit userland. */
 struct ethtool_cmd {
@@ -391,6 +392,7 @@ struct ethtool_rx_flow_spec {
                struct ethtool_ah_espip4_spec           ah_ip4_spec;
                struct ethtool_ah_espip4_spec           esp_ip4_spec;
                struct ethtool_usrip4_spec              usr_ip4_spec;
+               struct ethhdr                           ether_spec;
                __u8                                    hdata[72];
        } h_u, m_u;
        __u64           ring_cookie;
@@ -483,6 +485,7 @@ struct ethtool_rx_ntuple_flow_spec {
                struct ethtool_ah_espip4_spec           ah_ip4_spec;
                struct ethtool_ah_espip4_spec           esp_ip4_spec;
                struct ethtool_usrip4_spec              usr_ip4_spec;
+               struct ethhdr                           ether_spec;
                __u8                                    hdata[72];
        } h_u, m_u;
 
@@ -492,11 +495,12 @@ struct ethtool_rx_ntuple_flow_spec {
        __u64           data_mask;
 
        __s32           action;
-#define ETHTOOL_RXNTUPLE_ACTION_DROP -1                /* drop packet */
+#define ETHTOOL_RXNTUPLE_ACTION_DROP   (-1)    /* drop packet */
+#define ETHTOOL_RXNTUPLE_ACTION_CLEAR  (-2)    /* clear filter */
 };
 
 /**
- * struct ethtool_rx_ntuple - command to set RX flow filter
+ * struct ethtool_rx_ntuple - command to set or clear RX flow filter
  * @cmd: Command number - %ETHTOOL_SRXNTUPLE
  * @fs: Flow filter specification
  */
@@ -840,7 +844,7 @@ struct ethtool_ops {
 #define WAKE_MAGIC             (1 << 5)
 #define WAKE_MAGICSECURE       (1 << 6) /* only meaningful if WAKE_MAGIC */
 
-/* L3-L4 network traffic flow types */
+/* L2-L4 network traffic flow types */
 #define        TCP_V4_FLOW     0x01    /* hash or spec (tcp_ip4_spec) */
 #define        UDP_V4_FLOW     0x02    /* hash or spec (udp_ip4_spec) */
 #define        SCTP_V4_FLOW    0x03    /* hash or spec (sctp_ip4_spec) */
@@ -856,6 +860,7 @@ struct ethtool_ops {
 #define        IP_USER_FLOW    0x0d    /* spec only (usr_ip4_spec) */
 #define        IPV4_FLOW       0x10    /* hash only */
 #define        IPV6_FLOW       0x11    /* hash only */
+#define        ETHER_FLOW      0x12    /* spec only (ether_spec) */
 
 /* L3-L4 network traffic flow hash options */
 #define        RXH_L2DA        (1 << 1)
index 29bcd55851ebed4dd2113965c2277f1e4c6bb9e6..397921b09ef9b792153e84fa8af24c8a96c98dc1 100644 (file)
@@ -204,7 +204,7 @@ struct pppox_proto {
        struct module   *owner;
 };
 
-extern int register_pppox_proto(int proto_num, struct pppox_proto *pp);
+extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
 extern void unregister_pppox_proto(int proto_num);
 extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
 extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
index 45375b41a2a0d44e62a370813ab4fdcbe046e3b3..7d178a758acff984e16cb71562c979a602c8459d 100644 (file)
@@ -174,20 +174,32 @@ extern int ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
 extern int register_inet6addr_notifier(struct notifier_block *nb);
 extern int unregister_inet6addr_notifier(struct notifier_block *nb);
 
-static inline struct inet6_dev *
-__in6_dev_get(struct net_device *dev)
+/**
+ * __in6_dev_get - get inet6_dev pointer from netdevice
+ * @dev: network device
+ *
+ * Caller must hold rcu_read_lock or RTNL, because this function
+ * does not take a reference on the inet6_dev.
+ */
+static inline struct inet6_dev *__in6_dev_get(const struct net_device *dev)
 {
-       return rcu_dereference_check(dev->ip6_ptr,
-                                    rcu_read_lock_held() ||
-                                    lockdep_rtnl_is_held());
+       return rcu_dereference_rtnl(dev->ip6_ptr);
 }
 
-static inline struct inet6_dev *
-in6_dev_get(struct net_device *dev)
+/**
+ * in6_dev_get - get inet6_dev pointer from netdevice
+ * @dev: network device
+ *
+ * This version can be used in any context, and takes a reference
+ * on the inet6_dev. Callers must use in6_dev_put() later to
+ * release this reference.
+ */
+static inline struct inet6_dev *in6_dev_get(const struct net_device *dev)
 {
-       struct inet6_dev *idev = NULL;
+       struct inet6_dev *idev;
+
        rcu_read_lock();
-       idev = __in6_dev_get(dev);
+       idev = rcu_dereference(dev->ip6_ptr);
        if (idev)
                atomic_inc(&idev->refcnt);
        rcu_read_unlock();
@@ -196,16 +208,21 @@ in6_dev_get(struct net_device *dev)
 
 extern void in6_dev_finish_destroy(struct inet6_dev *idev);
 
-static inline void
-in6_dev_put(struct inet6_dev *idev)
+static inline void in6_dev_put(struct inet6_dev *idev)
 {
        if (atomic_dec_and_test(&idev->refcnt))
                in6_dev_finish_destroy(idev);
 }
 
-#define __in6_dev_put(idev)  atomic_dec(&(idev)->refcnt)
-#define in6_dev_hold(idev)   atomic_inc(&(idev)->refcnt)
+static inline void __in6_dev_put(struct inet6_dev *idev)
+{
+       atomic_dec(&idev->refcnt);
+}
 
+static inline void in6_dev_hold(struct inet6_dev *idev)
+{
+       atomic_inc(&idev->refcnt);
+}
 
 extern void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp);
 
@@ -215,9 +232,15 @@ static inline void in6_ifa_put(struct inet6_ifaddr *ifp)
                inet6_ifa_finish_destroy(ifp);
 }
 
-#define __in6_ifa_put(ifp)     atomic_dec(&(ifp)->refcnt)
-#define in6_ifa_hold(ifp)      atomic_inc(&(ifp)->refcnt)
+static inline void __in6_ifa_put(struct inet6_ifaddr *ifp)
+{
+       atomic_dec(&ifp->refcnt);
+}
 
+static inline void in6_ifa_hold(struct inet6_ifaddr *ifp)
+{
+       atomic_inc(&ifp->refcnt);
+}
 
 
 /*
@@ -240,23 +263,23 @@ static inline int ipv6_addr_is_multicast(const struct in6_addr *addr)
 
 static inline int ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr)
 {
-       return (((addr->s6_addr32[0] ^ htonl(0xff020000)) |
+       return ((addr->s6_addr32[0] ^ htonl(0xff020000)) |
                addr->s6_addr32[1] | addr->s6_addr32[2] |
-               (addr->s6_addr32[3] ^ htonl(0x00000001))) == 0);
+               (addr->s6_addr32[3] ^ htonl(0x00000001))) == 0;
 }
 
 static inline int ipv6_addr_is_ll_all_routers(const struct in6_addr *addr)
 {
-       return (((addr->s6_addr32[0] ^ htonl(0xff020000)) |
+       return ((addr->s6_addr32[0] ^ htonl(0xff020000)) |
                addr->s6_addr32[1] | addr->s6_addr32[2] |
-               (addr->s6_addr32[3] ^ htonl(0x00000002))) == 0);
+               (addr->s6_addr32[3] ^ htonl(0x00000002))) == 0;
 }
 
 extern int __ipv6_isatap_ifid(u8 *eui, __be32 addr);
 
 static inline int ipv6_addr_is_isatap(const struct in6_addr *addr)
 {
-       return ((addr->s6_addr32[2] | htonl(0x02000000)) == htonl(0x02005EFE));
+       return (addr->s6_addr32[2] | htonl(0x02000000)) == htonl(0x02005EFE);
 }
 
 #ifdef CONFIG_PROC_FS
index 8d9503ad01daa65729212de33f579b32aeffc1dc..b26ce343072c65a6a1776e1b42d35a0373f40fd6 100644 (file)
@@ -25,6 +25,7 @@ struct vlan_priority_tci_mapping {
  *     @rx_multicast: number of received multicast packets
  *     @syncp: synchronization point for 64bit counters
  *     @rx_errors: number of errors
+ *     @rx_dropped: number of dropped packets
  */
 struct vlan_rx_stats {
        u64                     rx_packets;
@@ -32,6 +33,7 @@ struct vlan_rx_stats {
        u64                     rx_multicast;
        struct u64_stats_sync   syncp;
        unsigned long           rx_errors;
+       unsigned long           rx_dropped;
 };
 
 /**
index 3bccdd12a2642a06e1c5078b5c4065419a57428a..94a1feddeb4961482e26cc4b23ce3911de35d65a 100644 (file)
@@ -225,7 +225,10 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
                }
        }
 
-       netif_rx(skb);
+       if (unlikely(netif_rx(skb) == NET_RX_DROP)) {
+               if (rx_stats)
+                       rx_stats->rx_dropped++;
+       }
        rcu_read_unlock();
        return NET_RX_SUCCESS;
 
@@ -843,13 +846,15 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st
                        accum.rx_packets += rxpackets;
                        accum.rx_bytes   += rxbytes;
                        accum.rx_multicast += rxmulticast;
-                       /* rx_errors is an ulong, not protected by syncp */
+                       /* rx_errors, rx_dropped are ulong, not protected by syncp */
                        accum.rx_errors  += p->rx_errors;
+                       accum.rx_dropped += p->rx_dropped;
                }
                stats->rx_packets = accum.rx_packets;
                stats->rx_bytes   = accum.rx_bytes;
                stats->rx_errors  = accum.rx_errors;
                stats->multicast  = accum.rx_multicast;
+               stats->rx_dropped = accum.rx_dropped;
        }
        return stats;
 }
index 0fd01dd17c48c86339db4565a3b5b8c39a436b93..b99369a055d13df6414421a28cfaee5d000c6c72 100644 (file)
@@ -173,7 +173,7 @@ static int receive(struct sk_buff *skb, struct net_device *dev,
        net = dev_net(dev);
        pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
        caifd = caif_get(dev);
-       if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
+       if (!caifd || !caifd->layer.up || !caifd->layer.up->receive)
                return NET_RX_DROP;
 
        if (caifd->layer.up->receive(caifd->layer.up, pkt))
index fd1f5df0827c782a45c873691e6411da4240a446..4d918f8f4e67d4948636b707c979eb5a4d47effa 100644 (file)
@@ -30,9 +30,6 @@
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_NETPROTO(AF_CAIF);
 
-#define CAIF_DEF_SNDBUF (4096*10)
-#define CAIF_DEF_RCVBUF (4096*100)
-
 /*
  * CAIF state is re-using the TCP socket states.
  * caif_states stored in sk_state reflect the state as reported by
@@ -159,9 +156,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
        if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
                (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
-               trace_printk("CAIF: %s(): "
-                       "sending flow OFF (queue len = %d %d)\n",
-                       __func__,
+               pr_debug("sending flow OFF (queue len = %d %d)\n",
                        atomic_read(&cf_sk->sk.sk_rmem_alloc),
                        sk_rcvbuf_lowwater(cf_sk));
                set_rx_flow_off(cf_sk);
@@ -174,9 +169,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                return err;
        if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
                set_rx_flow_off(cf_sk);
-               trace_printk("CAIF: %s(): "
-                       "sending flow OFF due to rmem_schedule\n",
-                       __func__);
+               pr_debug("sending flow OFF due to rmem_schedule\n");
                dbfs_atomic_inc(&cnt.num_rx_flow_off);
                caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
        }
@@ -1122,10 +1115,6 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
        /* Store the protocol */
        sk->sk_protocol = (unsigned char) protocol;
 
-       /* Sendbuf dictates the amount of outbound packets not yet sent */
-       sk->sk_sndbuf = CAIF_DEF_SNDBUF;
-       sk->sk_rcvbuf = CAIF_DEF_RCVBUF;
-
        /*
         * Lock in order to try to stop someone from opening the socket
         * too early.
index ef93a131310b32096788fe2cee2b7ec4d7d753a5..41adafd1891422ab1aa32a95a46aee7d9ae77b82 100644 (file)
@@ -197,7 +197,7 @@ int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
        caif_assert(adap_layer != NULL);
        channel_id = adap_layer->id;
        if (adap_layer->dn == NULL || channel_id == 0) {
-               pr_err("adap_layer->id is 0\n");
+               pr_err("adap_layer->dn == NULL or adap_layer->id is 0\n");
                ret = -ENOTCONN;
                goto end;
        }
index 86aac24b02256cb0413f36423b7c0a5212243eb9..84a422c989414d31c502de65d13b74d51c6a8f0c 100644 (file)
@@ -30,9 +30,6 @@
 #define CONNECT_TIMEOUT (5 * HZ)
 #define CAIF_NET_DEFAULT_QUEUE_LEN 500
 
-#undef pr_debug
-#define pr_debug pr_warn
-
 /*This list is protected by the rtnl lock. */
 static LIST_HEAD(chnl_net_list);
 
index 91ffce20c36b2f4bf4c87f95ce6fc6c528a8cb46..7d7e572cedc7edb36b54f28de1c327629746d5b4 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/netdevice.h>
 #include <linux/bitops.h>
 #include <linux/uaccess.h>
+#include <linux/vmalloc.h>
 #include <linux/slab.h>
 
 /*
@@ -815,7 +816,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
        if (regs.len > reglen)
                regs.len = reglen;
 
-       regbuf = kmalloc(reglen, GFP_USER);
+       regbuf = vmalloc(reglen);
        if (!regbuf)
                return -ENOMEM;
 
@@ -830,7 +831,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
        ret = 0;
 
  out:
-       kfree(regbuf);
+       vfree(regbuf);
        return ret;
 }
 
index 386c2283f14ec580824e8cac355b7da568259abc..2c0df0f95b3d488c8e6f8818857f19c02726e200 100644 (file)
@@ -729,16 +729,14 @@ static int hex32_arg(const char __user *user_buffer, unsigned long maxlen,
        *num = 0;
 
        for (; i < maxlen; i++) {
+               int value;
                char c;
                *num <<= 4;
                if (get_user(c, &user_buffer[i]))
                        return -EFAULT;
-               if ((c >= '0') && (c <= '9'))
-                       *num |= c - '0';
-               else if ((c >= 'a') && (c <= 'f'))
-                       *num |= c - 'a' + 10;
-               else if ((c >= 'A') && (c <= 'F'))
-                       *num |= c - 'A' + 10;
+               value = hex_to_bin(c);
+               if (value >= 0)
+                       *num |= value;
                else
                        break;
        }
index f41854470539855adee387a4c488f4be66a017a2..ec6bb322f3722f8e4070e7eb66ddbc5d2a88548b 100644 (file)
@@ -92,18 +92,19 @@ EXPORT_SYMBOL(in_aton);
 
 static inline int xdigit2bin(char c, int delim)
 {
+       int val;
+
        if (c == delim || c == '\0')
                return IN6PTON_DELIM;
        if (c == ':')
                return IN6PTON_COLON_MASK;
        if (c == '.')
                return IN6PTON_DOT;
-       if (c >= '0' && c <= '9')
-               return (IN6PTON_XDIGIT | IN6PTON_DIGIT| (c - '0'));
-       if (c >= 'a' && c <= 'f')
-               return (IN6PTON_XDIGIT | (c - 'a' + 10));
-       if (c >= 'A' && c <= 'F')
-               return (IN6PTON_XDIGIT | (c - 'A' + 10));
+
+       val = hex_to_bin(c);
+       if (val >= 0)
+               return val | IN6PTON_XDIGIT | (val < 10 ? IN6PTON_DIGIT : 0);
+
        if (delim == -1)
                return IN6PTON_DELIM;
        return IN6PTON_UNKNOWN;
index 6df6f8ac963664e2174b55c890ce7f45cbe05128..6d16a9070ff0efdad03c61b4237f7c64601ee2e1 100644 (file)
@@ -62,18 +62,14 @@ struct ccid_operations {
        void            (*ccid_hc_tx_exit)(struct sock *sk);
        void            (*ccid_hc_rx_packet_recv)(struct sock *sk,
                                                  struct sk_buff *skb);
-       int             (*ccid_hc_rx_parse_options)(struct sock *sk,
-                                                   unsigned char option,
-                                                   unsigned char len, u16 idx,
-                                                   unsigned char* value);
+       int             (*ccid_hc_rx_parse_options)(struct sock *sk, u8 pkt,
+                                                   u8 opt, u8 *val, u8 len);
        int             (*ccid_hc_rx_insert_options)(struct sock *sk,
                                                     struct sk_buff *skb);
        void            (*ccid_hc_tx_packet_recv)(struct sock *sk,
                                                  struct sk_buff *skb);
-       int             (*ccid_hc_tx_parse_options)(struct sock *sk,
-                                                   unsigned char option,
-                                                   unsigned char len, u16 idx,
-                                                   unsigned char* value);
+       int             (*ccid_hc_tx_parse_options)(struct sock *sk, u8 pkt,
+                                                   u8 opt, u8 *val, u8 len);
        int             (*ccid_hc_tx_send_packet)(struct sock *sk,
                                                  struct sk_buff *skb);
        void            (*ccid_hc_tx_packet_sent)(struct sock *sk,
@@ -168,27 +164,31 @@ static inline void ccid_hc_tx_packet_recv(struct ccid *ccid, struct sock *sk,
                ccid->ccid_ops->ccid_hc_tx_packet_recv(sk, skb);
 }
 
+/**
+ * ccid_hc_tx_parse_options  -  Parse CCID-specific options sent by the receiver
+ * @pkt: type of packet that @opt appears on (RFC 4340, 5.1)
+ * @opt: the CCID-specific option type (RFC 4340, 5.8 and 10.3)
+ * @val: value of @opt
+ * @len: length of @val in bytes
+ */
 static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
-                                          unsigned char option,
-                                          unsigned char len, u16 idx,
-                                          unsigned char* value)
+                                          u8 pkt, u8 opt, u8 *val, u8 len)
 {
-       int rc = 0;
-       if (ccid->ccid_ops->ccid_hc_tx_parse_options != NULL)
-               rc = ccid->ccid_ops->ccid_hc_tx_parse_options(sk, option, len, idx,
-                                                   value);
-       return rc;
+       if (ccid->ccid_ops->ccid_hc_tx_parse_options == NULL)
+               return 0;
+       return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len);
 }
 
+/**
+ * ccid_hc_rx_parse_options  -  Parse CCID-specific options sent by the sender
+ * Arguments are analogous to ccid_hc_tx_parse_options()
+ */
 static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk,
-                                          unsigned char option,
-                                          unsigned char len, u16 idx,
-                                          unsigned char* value)
+                                          u8 pkt, u8 opt, u8 *val, u8 len)
 {
-       int rc = 0;
-       if (ccid->ccid_ops->ccid_hc_rx_parse_options != NULL)
-               rc = ccid->ccid_ops->ccid_hc_rx_parse_options(sk, option, len, idx, value);
-       return rc;
+       if (ccid->ccid_ops->ccid_hc_rx_parse_options == NULL)
+               return 0;
+       return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len);
 }
 
 static inline int ccid_hc_rx_insert_options(struct ccid *ccid, struct sock *sk,
index ce80591300707c86b91a213922b6b392973b3c40..c3f3a25bbd7aa4f111bda3f56f196e9e5a621f4e 100644 (file)
@@ -54,7 +54,6 @@ static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state)
        [TFRC_SSTATE_NO_SENT]  = "NO_SENT",
        [TFRC_SSTATE_NO_FBACK] = "NO_FBACK",
        [TFRC_SSTATE_FBACK]    = "FBACK",
-       [TFRC_SSTATE_TERM]     = "TERM",
        };
 
        return ccid3_state_names[state];
@@ -208,10 +207,13 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
        ccid3_pr_debug("%s(%p, state=%s) - entry\n", dccp_role(sk), sk,
                       ccid3_tx_state_name(hc->tx_state));
 
+       /* Ignore and do not restart after leaving the established state */
+       if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
+               goto out;
+
+       /* Reset feedback state to "no feedback received" */
        if (hc->tx_state == TFRC_SSTATE_FBACK)
                ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
-       else if (hc->tx_state != TFRC_SSTATE_NO_FBACK)
-               goto out;
 
        /*
         * Determine new allowed sending rate X as per draft rfc3448bis-00, 4.4
@@ -287,8 +289,7 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
        if (unlikely(skb->len == 0))
                return -EBADMSG;
 
-       switch (hc->tx_state) {
-       case TFRC_SSTATE_NO_SENT:
+       if (hc->tx_state == TFRC_SSTATE_NO_SENT) {
                sk_reset_timer(sk, &hc->tx_no_feedback_timer, (jiffies +
                               usecs_to_jiffies(TFRC_INITIAL_TIMEOUT)));
                hc->tx_last_win_count   = 0;
@@ -323,9 +324,8 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
                ccid3_update_send_interval(hc);
 
                ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
-               break;
-       case TFRC_SSTATE_NO_FBACK:
-       case TFRC_SSTATE_FBACK:
+
+       } else {
                delay = ktime_us_delta(hc->tx_t_nom, now);
                ccid3_pr_debug("delay=%ld\n", (long)delay);
                /*
@@ -340,10 +340,6 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
                        return (u32)delay / USEC_PER_MSEC;
 
                ccid3_hc_tx_update_win_count(hc, now);
-               break;
-       case TFRC_SSTATE_TERM:
-               DCCP_BUG("%s(%p) - Illegal state TERM", dccp_role(sk), sk);
-               return -EINVAL;
        }
 
        /* prepare to send now (add options etc.) */
@@ -369,21 +365,15 @@ static void ccid3_hc_tx_packet_sent(struct sock *sk, int more,
 static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
 {
        struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
-       struct ccid3_options_received *opt_recv = &hc->tx_options_received;
        struct tfrc_tx_hist_entry *acked;
        ktime_t now;
        unsigned long t_nfb;
-       u32 pinv, r_sample;
+       u32 r_sample;
 
        /* we are only interested in ACKs */
        if (!(DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK ||
              DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_DATAACK))
                return;
-       /* ... and only in the established state */
-       if (hc->tx_state != TFRC_SSTATE_FBACK &&
-           hc->tx_state != TFRC_SSTATE_NO_FBACK)
-               return;
-
        /*
         * Locate the acknowledged packet in the TX history.
         *
@@ -403,17 +393,6 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
        r_sample  = dccp_sample_rtt(sk, ktime_us_delta(now, acked->stamp));
        hc->tx_rtt = tfrc_ewma(hc->tx_rtt, r_sample, 9);
 
-       /* Update receive rate in units of 64 * bytes/second */
-       hc->tx_x_recv = opt_recv->ccid3or_receive_rate;
-       hc->tx_x_recv <<= 6;
-
-       /* Update loss event rate (which is scaled by 1e6) */
-       pinv = opt_recv->ccid3or_loss_event_rate;
-       if (pinv == ~0U || pinv == 0)          /* see RFC 4342, 8.5   */
-               hc->tx_p = 0;
-       else                                   /* can not exceed 100% */
-               hc->tx_p = scaled_div(1, pinv);
-
        /*
         * Update allowed sending rate X as per draft rfc3448bis-00, 4.2/3
         */
@@ -481,30 +460,36 @@ done_computing_x:
                           jiffies + usecs_to_jiffies(t_nfb));
 }
 
-static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
-                                    unsigned char len, u16 idx,
-                                    unsigned char *value)
+static int ccid3_hc_tx_parse_options(struct sock *sk, u8 packet_type,
+                                    u8 option, u8 *optval, u8 optlen)
 {
        struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
-       struct ccid3_options_received *opt_recv = &hc->tx_options_received;
        __be32 opt_val;
 
        switch (option) {
        case TFRC_OPT_RECEIVE_RATE:
        case TFRC_OPT_LOSS_EVENT_RATE:
-               if (unlikely(len != 4)) {
+               /* Must be ignored on Data packets, cf. RFC 4342 8.3 and 8.5 */
+               if (packet_type == DCCP_PKT_DATA)
+                       break;
+               if (unlikely(optlen != 4)) {
                        DCCP_WARN("%s(%p), invalid len %d for %u\n",
-                                 dccp_role(sk), sk, len, option);
+                                 dccp_role(sk), sk, optlen, option);
                        return -EINVAL;
                }
-               opt_val = ntohl(get_unaligned((__be32 *)value));
+               opt_val = ntohl(get_unaligned((__be32 *)optval));
 
                if (option == TFRC_OPT_RECEIVE_RATE) {
-                       opt_recv->ccid3or_receive_rate = opt_val;
+                       /* Receive Rate is kept in units of 64 bytes/second */
+                       hc->tx_x_recv = opt_val;
+                       hc->tx_x_recv <<= 6;
+
                        ccid3_pr_debug("%s(%p), RECEIVE_RATE=%u\n",
                                       dccp_role(sk), sk, opt_val);
                } else {
-                       opt_recv->ccid3or_loss_event_rate = opt_val;
+                       /* Update the fixpoint Loss Event Rate fraction */
+                       hc->tx_p = tfrc_invert_loss_event_rate(opt_val);
+
                        ccid3_pr_debug("%s(%p), LOSS_EVENT_RATE=%u\n",
                                       dccp_role(sk), sk, opt_val);
                }
@@ -527,9 +512,7 @@ static void ccid3_hc_tx_exit(struct sock *sk)
 {
        struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
 
-       ccid3_hc_tx_set_state(sk, TFRC_SSTATE_TERM);
        sk_stop_timer(sk, &hc->tx_no_feedback_timer);
-
        tfrc_tx_hist_purge(&hc->tx_hist);
 }
 
@@ -588,7 +571,6 @@ static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state)
        static const char *const ccid3_rx_state_names[] = {
        [TFRC_RSTATE_NO_DATA] = "NO_DATA",
        [TFRC_RSTATE_DATA]    = "DATA",
-       [TFRC_RSTATE_TERM]    = "TERM",
        };
 
        return ccid3_rx_state_names[state];
@@ -614,14 +596,9 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
 {
        struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
        struct dccp_sock *dp = dccp_sk(sk);
-       ktime_t now;
+       ktime_t now = ktime_get_real();
        s64 delta = 0;
 
-       if (unlikely(hc->rx_state == TFRC_RSTATE_TERM))
-               return;
-
-       now = ktime_get_real();
-
        switch (fbtype) {
        case CCID3_FBACK_INITIAL:
                hc->rx_x_recv = 0;
@@ -825,8 +802,6 @@ static void ccid3_hc_rx_exit(struct sock *sk)
 {
        struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
 
-       ccid3_hc_rx_set_state(sk, TFRC_RSTATE_TERM);
-
        tfrc_rx_hist_purge(&hc->rx_hist);
        tfrc_lh_cleanup(&hc->rx_li_hist);
 }
@@ -851,8 +826,7 @@ static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len,
                        return -EINVAL;
                rx_info.tfrcrx_x_recv = hc->rx_x_recv;
                rx_info.tfrcrx_rtt    = hc->rx_rtt;
-               rx_info.tfrcrx_p      = hc->rx_pinv == 0 ? ~0U :
-                                          scaled_div(1, hc->rx_pinv);
+               rx_info.tfrcrx_p      = tfrc_invert_loss_event_rate(hc->rx_pinv);
                len = sizeof(rx_info);
                val = &rx_info;
                break;
index 9eb90b863abd5ae10e1105d15f302016a8366ff0..1a9933c29672bfa3d1e8396a33437a5712970b5d 100644 (file)
@@ -67,17 +67,11 @@ enum ccid3_options {
        TFRC_OPT_RECEIVE_RATE    = 194,
 };
 
-struct ccid3_options_received {
-       u32 ccid3or_loss_event_rate;
-       u32 ccid3or_receive_rate;
-};
-
 /* TFRC sender states */
 enum ccid3_hc_tx_states {
        TFRC_SSTATE_NO_SENT = 1,
        TFRC_SSTATE_NO_FBACK,
        TFRC_SSTATE_FBACK,
-       TFRC_SSTATE_TERM,
 };
 
 /**
@@ -98,7 +92,6 @@ enum ccid3_hc_tx_states {
  * @tx_t_ld:             Time last doubled during slow start
  * @tx_t_nom:            Nominal send time of next packet
  * @tx_hist:             Packet history
- * @tx_options_received:  Parsed set of retrieved options
  */
 struct ccid3_hc_tx_sock {
        u64                             tx_x;
@@ -116,7 +109,6 @@ struct ccid3_hc_tx_sock {
        ktime_t                         tx_t_ld;
        ktime_t                         tx_t_nom;
        struct tfrc_tx_hist_entry       *tx_hist;
-       struct ccid3_options_received   tx_options_received;
 };
 
 static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk)
@@ -130,7 +122,6 @@ static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk)
 enum ccid3_hc_rx_states {
        TFRC_RSTATE_NO_DATA = 1,
        TFRC_RSTATE_DATA,
-       TFRC_RSTATE_TERM    = 127,
 };
 
 /**
index 01bb48e96c2ed0b6955c50ee348817e1415f61fd..f8ee3f5497702c300c43c6529358c4dd5ee137f3 100644 (file)
@@ -57,6 +57,7 @@ static inline u32 tfrc_ewma(const u32 avg, const u32 newval, const u8 weight)
 
 extern u32  tfrc_calc_x(u16 s, u32 R, u32 p);
 extern u32  tfrc_calc_x_reverse_lookup(u32 fvalue);
+extern u32  tfrc_invert_loss_event_rate(u32 loss_event_rate);
 
 extern int  tfrc_tx_packet_history_init(void);
 extern void tfrc_tx_packet_history_exit(void);
index 22ca1cf0eb5503fe6ed2db69172771f0e207b182..a052a4377e262a6f2e1af8e354cfd649f006e343 100644 (file)
@@ -687,3 +687,17 @@ u32 tfrc_calc_x_reverse_lookup(u32 fvalue)
        index = tfrc_binsearch(fvalue, 0);
        return (index + 1) * 1000000 / TFRC_CALC_X_ARRSIZE;
 }
+
+/**
+ * tfrc_invert_loss_event_rate  -  Compute p so that 10^6 corresponds to 100%
+ * When @loss_event_rate is large, there is a chance that p is truncated to 0.
+ * To avoid re-entering slow-start in that case, we set p = TFRC_SMALLEST_P > 0.
+ */
+u32 tfrc_invert_loss_event_rate(u32 loss_event_rate)
+{
+       if (loss_event_rate == UINT_MAX)                /* see RFC 4342, 8.5 */
+               return 0;
+       if (unlikely(loss_event_rate == 0))             /* map 1/0 into 100% */
+               return 1000000;
+       return max_t(u32, scaled_div(1, loss_event_rate), TFRC_SMALLEST_P);
+}
index bfda087bd90dd792f8190840878379d304152c1d..92718511eac5af1d1896196c2591f5621dd22d38 100644 (file)
@@ -96,18 +96,11 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
                }
 
                /*
-                * CCID-Specific Options (from RFC 4340, sec. 10.3):
-                *
-                * Option numbers 128 through 191 are for options sent from the
-                * HC-Sender to the HC-Receiver; option numbers 192 through 255
-                * are for options sent from the HC-Receiver to the HC-Sender.
-                *
                 * CCID-specific options are ignored during connection setup, as
                 * negotiation may still be in progress (see RFC 4340, 10.3).
                 * The same applies to Ack Vectors, as these depend on the CCID.
-                *
                 */
-               if (dreq != NULL && (opt >= 128 ||
+               if (dreq != NULL && (opt >= DCCPO_MIN_RX_CCID_SPECIFIC ||
                    opt == DCCPO_ACK_VECTOR_0 || opt == DCCPO_ACK_VECTOR_1))
                        goto ignore_option;
 
@@ -226,23 +219,15 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
                        dccp_pr_debug("%s rx opt: ELAPSED_TIME=%d\n",
                                      dccp_role(sk), elapsed_time);
                        break;
-               case 128 ... 191: {
-                       const u16 idx = value - options;
-
+               case DCCPO_MIN_RX_CCID_SPECIFIC ... DCCPO_MAX_RX_CCID_SPECIFIC:
                        if (ccid_hc_rx_parse_options(dp->dccps_hc_rx_ccid, sk,
-                                                    opt, len, idx,
-                                                    value) != 0)
+                                                    pkt_type, opt, value, len))
                                goto out_invalid_option;
-               }
                        break;
-               case 192 ... 255: {
-                       const u16 idx = value - options;
-
+               case DCCPO_MIN_TX_CCID_SPECIFIC ... DCCPO_MAX_TX_CCID_SPECIFIC:
                        if (ccid_hc_tx_parse_options(dp->dccps_hc_tx_ccid, sk,
-                                                    opt, len, idx,
-                                                    value) != 0)
+                                                    pkt_type, opt, value, len))
                                goto out_invalid_option;
-               }
                        break;
                default:
                        DCCP_CRIT("DCCP(%p): option %d(len=%d) not "
index fc20e687e933ba2557ab36bd1a3c57d8a9374d7f..714b6a80361df2cb616f2682cae04c20ad463b68 100644 (file)
@@ -647,9 +647,11 @@ static int ipgre_rcv(struct sk_buff *skb)
                skb_reset_network_header(skb);
                ipgre_ecn_decapsulate(iph, skb);
 
-               netif_rx(skb);
+               if (netif_rx(skb) == NET_RX_DROP)
+                       stats->rx_dropped++;
+
                rcu_read_unlock();
-               return(0);
+               return 0;
        }
        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 
index 8de8888dc95a62e4d567f85a6673d2c6aa9aa253..babd2527810694f28507da734ea3f3db7e665d7d 100644 (file)
@@ -377,7 +377,10 @@ static int ipip_rcv(struct sk_buff *skb)
                skb_tunnel_rx(skb, tunnel->dev);
 
                ipip_ecn_decapsulate(iph, skb);
-               netif_rx(skb);
+
+               if (netif_rx(skb) == NET_RX_DROP)
+                       tunnel->dev->stats.rx_dropped++;
+
                rcu_read_unlock();
                return 0;
        }
index 9289cecac4de291ba94487c04f3cdbb03313729b..f6d9f683543eaee96284139f3f2c8ca0936f7fb7 100644 (file)
@@ -725,7 +725,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
                skb_tunnel_rx(skb, t->dev);
 
                dscp_ecn_decapsulate(t, ipv6h, skb);
-               netif_rx(skb);
+
+               if (netif_rx(skb) == NET_RX_DROP)
+                       t->dev->stats.rx_dropped++;
+
                rcu_read_unlock();
                return 0;
        }
index 66078dad7fe8c353bd4de83115dc4eee73ba978f..2640c9be589dd62805dab83ed910ba4cbad694d6 100644 (file)
@@ -666,7 +666,9 @@ static int pim6_rcv(struct sk_buff *skb)
 
        skb_tunnel_rx(skb, reg_dev);
 
-       netif_rx(skb);
+       if (netif_rx(skb) == NET_RX_DROP)
+               reg_dev->stats.rx_dropped++;
+
        dev_put(reg_dev);
        return 0;
  drop:
index 6822481ff766ea4105b46f4c29889cf78e6ee9a3..8a039982223060f1a2e24dade7f03372ad448c49 100644 (file)
@@ -564,7 +564,10 @@ static int ipip6_rcv(struct sk_buff *skb)
                skb_tunnel_rx(skb, tunnel->dev);
 
                ipip6_ecn_decapsulate(iph, skb);
-               netif_rx(skb);
+
+               if (netif_rx(skb) == NET_RX_DROP)
+                       tunnel->dev->stats.rx_dropped++;
+
                rcu_read_unlock();
                return 0;
        }
index ff954b3e94b6b3d6daab8bf7a538c15101bad782..39a21d0c61c48f2506bc7d24e7eeeee1610a11ae 100644 (file)
@@ -1768,7 +1768,7 @@ static const struct proto_ops pppol2tp_ops = {
        .ioctl          = pppox_ioctl,
 };
 
-static struct pppox_proto pppol2tp_proto = {
+static const struct pppox_proto pppol2tp_proto = {
        .create         = pppol2tp_create,
        .ioctl          = pppol2tp_ioctl
 };