]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-next-2.6
authorDavid S. Miller <davem@davemloft.net>
Tue, 15 Jun 2010 20:49:24 +0000 (13:49 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 15 Jun 2010 20:49:24 +0000 (13:49 -0700)
31 files changed:
drivers/net/ksz884x.c
drivers/net/macvlan.c
drivers/staging/batman-adv/hard-interface.c
include/linux/if.h
include/linux/netdevice.h
include/linux/netpoll.h
include/net/tcp.h
net/bridge/br_device.c
net/bridge/br_fdb.c
net/bridge/br_forward.c
net/bridge/br_if.c
net/bridge/br_input.c
net/bridge/br_netfilter.c
net/bridge/br_netlink.c
net/bridge/br_notify.c
net/bridge/br_private.h
net/bridge/br_stp_bpdu.c
net/bridge/netfilter/ebt_redirect.c
net/bridge/netfilter/ebt_ulog.c
net/bridge/netfilter/ebtables.c
net/core/dev.c
net/core/netpoll.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue.c
net/netfilter/xt_TCPMSS.c
net/wireless/nl80211.c
net/wireless/util.c

index 7805bbf1d53a6ceeaf02f8c68f4e3740e2c4939d..62362b4a8c56951aeacf3dcd8597f5246a2d4abc 100644 (file)
@@ -5718,7 +5718,7 @@ static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv,
                 * from the bridge.
                 */
                if ((hw->features & STP_SUPPORT) && !promiscuous &&
-                               dev->br_port) {
+                   (dev->priv_flags & IFF_BRIDGE_PORT)) {
                        struct ksz_switch *sw = hw->ksz_switch;
                        int port = priv->port.first_port;
 
index 59c315556a3007635307b8aebf39c77521a8b960..e096875aa05527ac144856e7f593b36913a6b977 100644 (file)
@@ -40,6 +40,11 @@ struct macvlan_port {
        struct rcu_head         rcu;
 };
 
+#define macvlan_port_get_rcu(dev) \
+       ((struct macvlan_port *) rcu_dereference(dev->rx_handler_data))
+#define macvlan_port_get(dev) ((struct macvlan_port *) dev->rx_handler_data)
+#define macvlan_port_exists(dev) (dev->priv_flags & IFF_MACVLAN_PORT)
+
 static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
                                               const unsigned char *addr)
 {
@@ -155,7 +160,7 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
        struct net_device *dev;
        unsigned int len;
 
-       port = rcu_dereference(skb->dev->macvlan_port);
+       port = macvlan_port_get_rcu(skb->dev);
        if (is_multicast_ether_addr(eth->h_dest)) {
                src = macvlan_hash_lookup(port, eth->h_source);
                if (!src)
@@ -530,14 +535,12 @@ static int macvlan_port_create(struct net_device *dev)
        INIT_LIST_HEAD(&port->vlans);
        for (i = 0; i < MACVLAN_HASH_SIZE; i++)
                INIT_HLIST_HEAD(&port->vlan_hash[i]);
-       rcu_assign_pointer(dev->macvlan_port, port);
 
-       err = netdev_rx_handler_register(dev, macvlan_handle_frame);
-       if (err) {
-               rcu_assign_pointer(dev->macvlan_port, NULL);
+       err = netdev_rx_handler_register(dev, macvlan_handle_frame, port);
+       if (err)
                kfree(port);
-       }
 
+       dev->priv_flags |= IFF_MACVLAN_PORT;
        return err;
 }
 
@@ -551,10 +554,10 @@ static void macvlan_port_rcu_free(struct rcu_head *head)
 
 static void macvlan_port_destroy(struct net_device *dev)
 {
-       struct macvlan_port *port = dev->macvlan_port;
+       struct macvlan_port *port = macvlan_port_get(dev);
 
+       dev->priv_flags &= ~IFF_MACVLAN_PORT;
        netdev_rx_handler_unregister(dev);
-       rcu_assign_pointer(dev->macvlan_port, NULL);
        call_rcu(&port->rcu, macvlan_port_rcu_free);
 }
 
@@ -633,12 +636,12 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
        if (!tb[IFLA_ADDRESS])
                random_ether_addr(dev->dev_addr);
 
-       if (lowerdev->macvlan_port == NULL) {
+       if (!macvlan_port_exists(lowerdev)) {
                err = macvlan_port_create(lowerdev);
                if (err < 0)
                        return err;
        }
-       port = lowerdev->macvlan_port;
+       port = macvlan_port_get(lowerdev);
 
        vlan->lowerdev = lowerdev;
        vlan->dev      = dev;
@@ -748,10 +751,11 @@ static int macvlan_device_event(struct notifier_block *unused,
        struct macvlan_dev *vlan, *next;
        struct macvlan_port *port;
 
-       port = dev->macvlan_port;
-       if (port == NULL)
+       if (!macvlan_port_exists(dev))
                return NOTIFY_DONE;
 
+       port = macvlan_port_get(dev);
+
        switch (event) {
        case NETDEV_CHANGE:
                list_for_each_entry(vlan, &port->vlans, list)
index 7a582e80de188bccaea0e8a8e051a850a6ba409a..5ede9c255094a391e50b4e5640c9d5c0c514dddb 100644 (file)
@@ -71,7 +71,7 @@ static int is_valid_iface(struct net_device *net_dev)
 #endif
 
        /* Device is being bridged */
-       /* if (net_dev->br_port != NULL)
+       /* if (net_dev->priv_flags & IFF_BRIDGE_PORT)
                return 0; */
 
        return 1;
index be350e62a905438533b0231439d8d65fd53d7de4..53558ec59e1b16d4272f9cdd51e1a0ba4957facc 100644 (file)
@@ -73,6 +73,8 @@
 #define IFF_DONT_BRIDGE 0x800          /* disallow bridging this ether dev */
 #define IFF_IN_NETPOLL 0x1000          /* whether we are processing netpoll */
 #define IFF_DISABLE_NETPOLL    0x2000  /* disable netpoll at run-time */
+#define IFF_MACVLAN_PORT       0x4000  /* device used as macvlan port */
+#define IFF_BRIDGE_PORT        0x8000          /* device used as bridge port */
 
 #define IF_GET_IFACE   0x0001          /* for querying only */
 #define IF_GET_PROTO   0x0002
index 4fbccc5f609af0af89514c55a9c535110570aee5..a7e0458029b584f2bfe66996aca81304aa789c0f 100644 (file)
@@ -744,6 +744,8 @@ struct net_device_ops {
                                                        unsigned short vid);
 #ifdef CONFIG_NET_POLL_CONTROLLER
        void                    (*ndo_poll_controller)(struct net_device *dev);
+       int                     (*ndo_netpoll_setup)(struct net_device *dev,
+                                                    struct netpoll_info *info);
        void                    (*ndo_netpoll_cleanup)(struct net_device *dev);
 #endif
        int                     (*ndo_set_vf_mac)(struct net_device *dev,
@@ -977,6 +979,7 @@ struct net_device {
 
        struct netdev_queue     rx_queue;
        rx_handler_func_t       *rx_handler;
+       void                    *rx_handler_data;
 
        struct netdev_queue     *_tx ____cacheline_aligned_in_smp;
 
@@ -1044,10 +1047,6 @@ struct net_device {
        /* mid-layer private */
        void                    *ml_priv;
 
-       /* bridge stuff */
-       struct net_bridge_port  *br_port;
-       /* macvlan */
-       struct macvlan_port     *macvlan_port;
        /* GARP */
        struct garp_port        *garp_port;
 
@@ -1710,7 +1709,8 @@ static inline void napi_free_frags(struct napi_struct *napi)
 }
 
 extern int netdev_rx_handler_register(struct net_device *dev,
-                                     rx_handler_func_t *rx_handler);
+                                     rx_handler_func_t *rx_handler,
+                                     void *rx_handler_data);
 extern void netdev_rx_handler_unregister(struct net_device *dev);
 
 extern void            netif_nit_deliver(struct sk_buff *skb);
index e9e231215865bd6218abd37c320784ab3976235e..4c77fe78ceff34842d0a55806b36819398b7d234 100644 (file)
@@ -46,9 +46,11 @@ void netpoll_poll(struct netpoll *np);
 void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
 void netpoll_print_options(struct netpoll *np);
 int netpoll_parse_options(struct netpoll *np, char *opt);
+int __netpoll_setup(struct netpoll *np);
 int netpoll_setup(struct netpoll *np);
 int netpoll_trap(void);
 void netpoll_set_trap(int trap);
+void __netpoll_cleanup(struct netpoll *np);
 void netpoll_cleanup(struct netpoll *np);
 int __netpoll_rx(struct sk_buff *skb);
 void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
@@ -57,12 +59,15 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
 #ifdef CONFIG_NETPOLL
 static inline bool netpoll_rx(struct sk_buff *skb)
 {
-       struct netpoll_info *npinfo = skb->dev->npinfo;
+       struct netpoll_info *npinfo;
        unsigned long flags;
        bool ret = false;
 
+       rcu_read_lock_bh();
+       npinfo = rcu_dereference(skb->dev->npinfo);
+
        if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
-               return false;
+               goto out;
 
        spin_lock_irqsave(&npinfo->rx_lock, flags);
        /* check rx_flags again with the lock held */
@@ -70,12 +75,14 @@ static inline bool netpoll_rx(struct sk_buff *skb)
                ret = true;
        spin_unlock_irqrestore(&npinfo->rx_lock, flags);
 
+out:
+       rcu_read_unlock_bh();
        return ret;
 }
 
 static inline int netpoll_rx_on(struct sk_buff *skb)
 {
-       struct netpoll_info *npinfo = skb->dev->npinfo;
+       struct netpoll_info *npinfo = rcu_dereference(skb->dev->npinfo);
 
        return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
 }
@@ -91,7 +98,6 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi)
 {
        struct net_device *dev = napi->dev;
 
-       rcu_read_lock(); /* deal with race on ->npinfo */
        if (dev && dev->npinfo) {
                spin_lock(&napi->poll_lock);
                napi->poll_owner = smp_processor_id();
@@ -108,7 +114,11 @@ static inline void netpoll_poll_unlock(void *have)
                napi->poll_owner = -1;
                spin_unlock(&napi->poll_lock);
        }
-       rcu_read_unlock();
+}
+
+static inline int netpoll_tx_running(struct net_device *dev)
+{
+       return irqs_disabled();
 }
 
 #else
@@ -134,6 +144,10 @@ static inline void netpoll_poll_unlock(void *have)
 static inline void netpoll_netdev_init(struct net_device *dev)
 {
 }
+static inline int netpoll_tx_running(struct net_device *dev)
+{
+       return 0;
+}
 #endif
 
 #endif
index 5731664844131f5a8081367fc9282cb9ad2bbcf2..9e68e25c8b822d8715b95f047362d6fef5795ad0 100644 (file)
@@ -602,6 +602,17 @@ extern u32 __tcp_select_window(struct sock *sk);
  */
 #define tcp_time_stamp         ((__u32)(jiffies))
 
+#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
+
+#define TCPHDR_FIN 0x01
+#define TCPHDR_SYN 0x02
+#define TCPHDR_RST 0x04
+#define TCPHDR_PSH 0x08
+#define TCPHDR_ACK 0x10
+#define TCPHDR_URG 0x20
+#define TCPHDR_ECE 0x40
+#define TCPHDR_CWR 0x80
+
 /* This is what the send packet queuing engine uses to pass
  * TCP per-packet control information to the transmission
  * code.  We also store the host-order sequence numbers in
@@ -620,19 +631,6 @@ struct tcp_skb_cb {
        __u32           end_seq;        /* SEQ + FIN + SYN + datalen    */
        __u32           when;           /* used to compute rtt's        */
        __u8            flags;          /* TCP header flags.            */
-
-       /* NOTE: These must match up to the flags byte in a
-        *       real TCP header.
-        */
-#define TCPCB_FLAG_FIN         0x01
-#define TCPCB_FLAG_SYN         0x02
-#define TCPCB_FLAG_RST         0x04
-#define TCPCB_FLAG_PSH         0x08
-#define TCPCB_FLAG_ACK         0x10
-#define TCPCB_FLAG_URG         0x20
-#define TCPCB_FLAG_ECE         0x40
-#define TCPCB_FLAG_CWR         0x80
-
        __u8            sacked;         /* State flags for SACK/FACK.   */
 #define TCPCB_SACKED_ACKED     0x01    /* SKB ACK'd by a SACK block    */
 #define TCPCB_SACKED_RETRANS   0x02    /* SKB retransmitted            */
index b898364beaf502ab4b0e8da6aeb991a9c70c3497..6f3a9279be3047554d1c67c3b9e447a597037793 100644 (file)
@@ -47,6 +47,10 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
        skb_pull(skb, ETH_HLEN);
 
        if (is_multicast_ether_addr(dest)) {
+               if (unlikely(netpoll_tx_running(dev))) {
+                       br_flood_deliver(br, skb);
+                       goto out;
+               }
                if (br_multicast_rcv(br, NULL, skb))
                        goto out;
 
@@ -199,73 +203,81 @@ static int br_set_tx_csum(struct net_device *dev, u32 data)
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-static bool br_devices_support_netpoll(struct net_bridge *br)
+static void br_poll_controller(struct net_device *br_dev)
 {
-       struct net_bridge_port *p;
-       bool ret = true;
-       int count = 0;
-       unsigned long flags;
-
-       spin_lock_irqsave(&br->lock, flags);
-       list_for_each_entry(p, &br->port_list, list) {
-               count++;
-               if ((p->dev->priv_flags & IFF_DISABLE_NETPOLL) ||
-                   !p->dev->netdev_ops->ndo_poll_controller)
-                       ret = false;
-       }
-       spin_unlock_irqrestore(&br->lock, flags);
-       return count != 0 && ret;
 }
 
-static void br_poll_controller(struct net_device *br_dev)
+static void br_netpoll_cleanup(struct net_device *dev)
 {
-       struct netpoll *np = br_dev->npinfo->netpoll;
+       struct net_bridge *br = netdev_priv(dev);
+       struct net_bridge_port *p, *n;
 
-       if (np->real_dev != br_dev)
-               netpoll_poll_dev(np->real_dev);
+       list_for_each_entry_safe(p, n, &br->port_list, list) {
+               br_netpoll_disable(p);
+       }
 }
 
-void br_netpoll_cleanup(struct net_device *dev)
+static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
 {
        struct net_bridge *br = netdev_priv(dev);
        struct net_bridge_port *p, *n;
-       const struct net_device_ops *ops;
+       int err = 0;
 
-       br->dev->npinfo = NULL;
        list_for_each_entry_safe(p, n, &br->port_list, list) {
-               if (p->dev) {
-                       ops = p->dev->netdev_ops;
-                       if (ops->ndo_netpoll_cleanup)
-                               ops->ndo_netpoll_cleanup(p->dev);
-                       else
-                               p->dev->npinfo = NULL;
-               }
+               if (!p->dev)
+                       continue;
+
+               err = br_netpoll_enable(p);
+               if (err)
+                       goto fail;
        }
+
+out:
+       return err;
+
+fail:
+       br_netpoll_cleanup(dev);
+       goto out;
 }
 
-void br_netpoll_disable(struct net_bridge *br,
-                       struct net_device *dev)
+int br_netpoll_enable(struct net_bridge_port *p)
 {
-       if (br_devices_support_netpoll(br))
-               br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
-       if (dev->netdev_ops->ndo_netpoll_cleanup)
-               dev->netdev_ops->ndo_netpoll_cleanup(dev);
-       else
-               dev->npinfo = NULL;
+       struct netpoll *np;
+       int err = 0;
+
+       np = kzalloc(sizeof(*p->np), GFP_KERNEL);
+       err = -ENOMEM;
+       if (!np)
+               goto out;
+
+       np->dev = p->dev;
+
+       err = __netpoll_setup(np);
+       if (err) {
+               kfree(np);
+               goto out;
+       }
+
+       p->np = np;
+
+out:
+       return err;
 }
 
-void br_netpoll_enable(struct net_bridge *br,
-                      struct net_device *dev)
+void br_netpoll_disable(struct net_bridge_port *p)
 {
-       if (br_devices_support_netpoll(br)) {
-               br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
-               if (br->dev->npinfo)
-                       dev->npinfo = br->dev->npinfo;
-       } else if (!(br->dev->priv_flags & IFF_DISABLE_NETPOLL)) {
-               br->dev->priv_flags |= IFF_DISABLE_NETPOLL;
-               br_info(br,"new device %s does not support netpoll (disabling)",
-                       dev->name);
-       }
+       struct netpoll *np = p->np;
+
+       if (!np)
+               return;
+
+       p->np = NULL;
+
+       /* Wait for transmitting packets to finish before freeing. */
+       synchronize_rcu_bh();
+
+       __netpoll_cleanup(np);
+       kfree(np);
 }
 
 #endif
@@ -294,6 +306,7 @@ static const struct net_device_ops br_netdev_ops = {
        .ndo_change_mtu          = br_change_mtu,
        .ndo_do_ioctl            = br_dev_ioctl,
 #ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_netpoll_setup       = br_netpoll_setup,
        .ndo_netpoll_cleanup     = br_netpoll_cleanup,
        .ndo_poll_controller     = br_poll_controller,
 #endif
index 26637439965bef745542f7baa4242d5249a33d9d..6818e609b2c02d1314bc2dac22f057a79e398e20 100644 (file)
@@ -242,11 +242,11 @@ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
        struct net_bridge_fdb_entry *fdb;
        int ret;
 
-       if (!dev->br_port)
+       if (!br_port_exists(dev))
                return 0;
 
        rcu_read_lock();
-       fdb = __br_fdb_get(dev->br_port->br, addr);
+       fdb = __br_fdb_get(br_port_get_rcu(dev)->br, addr);
        ret = fdb && fdb->dst->dev != dev &&
                fdb->dst->state == BR_STATE_FORWARDING;
        rcu_read_unlock();
index a98ef13930979a129a8d0195e5a94dee3ec7bd2a..6e97711fd2c54fac0899447d2c15651709cadc78 100644 (file)
@@ -50,14 +50,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
                        kfree_skb(skb);
                else {
                        skb_push(skb, ETH_HLEN);
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-                       if (unlikely(skb->dev->priv_flags & IFF_IN_NETPOLL)) {
-                               netpoll_send_skb(skb->dev->npinfo->netpoll, skb);
-                               skb->dev->priv_flags &= ~IFF_IN_NETPOLL;
-                       } else
-#endif
-                               dev_queue_xmit(skb);
+                       dev_queue_xmit(skb);
                }
        }
 
@@ -73,23 +66,20 @@ int br_forward_finish(struct sk_buff *skb)
 
 static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
 {
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       struct net_bridge *br = to->br;
-       if (unlikely(br->dev->priv_flags & IFF_IN_NETPOLL)) {
-               struct netpoll *np;
-               to->dev->npinfo = skb->dev->npinfo;
-               np = skb->dev->npinfo->netpoll;
-               np->real_dev = np->dev = to->dev;
-               to->dev->priv_flags |= IFF_IN_NETPOLL;
-       }
-#endif
        skb->dev = to->dev;
+
+       if (unlikely(netpoll_tx_running(to->dev))) {
+               if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
+                       kfree_skb(skb);
+               else {
+                       skb_push(skb, ETH_HLEN);
+                       br_netpoll_send_skb(to, skb);
+               }
+               return;
+       }
+
        NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
                br_forward_finish);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       if (skb->dev->npinfo)
-               skb->dev->npinfo->netpoll->dev = br->dev;
-#endif
 }
 
 static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
index d9242342837e4d4857e0e89bc8b51bb7f0f7b27e..c03d2c3ff03ed6cb99dc05d7a4cde5033d951190 100644 (file)
@@ -147,15 +147,17 @@ static void del_nbp(struct net_bridge_port *p)
 
        list_del_rcu(&p->list);
 
+       dev->priv_flags &= ~IFF_BRIDGE_PORT;
+
        netdev_rx_handler_unregister(dev);
-       rcu_assign_pointer(dev->br_port, NULL);
 
        br_multicast_del_port(p);
 
        kobject_uevent(&p->kobj, KOBJ_REMOVE);
        kobject_del(&p->kobj);
 
-       br_netpoll_disable(br, dev);
+       br_netpoll_disable(p);
+
        call_rcu(&p->rcu, destroy_nbp_rcu);
 }
 
@@ -168,8 +170,6 @@ static void del_br(struct net_bridge *br, struct list_head *head)
                del_nbp(p);
        }
 
-       br_netpoll_cleanup(br->dev);
-
        del_timer_sync(&br->gc_timer);
 
        br_sysfs_delbr(br->dev);
@@ -401,7 +401,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
                return -ELOOP;
 
        /* Device is already being bridged */
-       if (dev->br_port != NULL)
+       if (br_port_exists(dev))
                return -EBUSY;
 
        /* No bridging devices that dislike that (e.g. wireless) */
@@ -429,12 +429,15 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
        if (err)
                goto err2;
 
-       rcu_assign_pointer(dev->br_port, p);
+       if (br_netpoll_info(br) && ((err = br_netpoll_enable(p))))
+               goto err3;
 
-       err = netdev_rx_handler_register(dev, br_handle_frame);
+       err = netdev_rx_handler_register(dev, br_handle_frame, p);
        if (err)
                goto err3;
 
+       dev->priv_flags |= IFF_BRIDGE_PORT;
+
        dev_disable_lro(dev);
 
        list_add_rcu(&p->list, &br->port_list);
@@ -454,11 +457,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
 
        kobject_uevent(&p->kobj, KOBJ_ADD);
 
-       br_netpoll_enable(br, dev);
-
        return 0;
 err3:
-       rcu_assign_pointer(dev->br_port, NULL);
+       sysfs_remove_link(br->ifobj, p->dev->name);
 err2:
        br_fdb_delete_by_port(br, p, 1);
 err1:
@@ -475,9 +476,13 @@ put_back:
 /* called with RTNL */
 int br_del_if(struct net_bridge *br, struct net_device *dev)
 {
-       struct net_bridge_port *p = dev->br_port;
+       struct net_bridge_port *p;
+
+       if (!br_port_exists(dev))
+               return -EINVAL;
 
-       if (!p || p->br != br)
+       p = br_port_get(dev);
+       if (p->br != br)
                return -EINVAL;
 
        del_nbp(p);
index 99647d8f95c84e420f90e40c8b794284851076fa..f076c9d79d5e0f7e443528cc4ffcab0d9011025c 100644 (file)
@@ -41,7 +41,7 @@ static int br_pass_frame_up(struct sk_buff *skb)
 int br_handle_frame_finish(struct sk_buff *skb)
 {
        const unsigned char *dest = eth_hdr(skb)->h_dest;
-       struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
+       struct net_bridge_port *p = br_port_get_rcu(skb->dev);
        struct net_bridge *br;
        struct net_bridge_fdb_entry *dst;
        struct net_bridge_mdb_entry *mdst;
@@ -111,10 +111,9 @@ drop:
 /* note: already called with rcu_read_lock (preempt_disabled) */
 static int br_handle_local_finish(struct sk_buff *skb)
 {
-       struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
+       struct net_bridge_port *p = br_port_get_rcu(skb->dev);
 
-       if (p)
-               br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
+       br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
        return 0;        /* process further */
 }
 
@@ -151,7 +150,7 @@ struct sk_buff *br_handle_frame(struct sk_buff *skb)
        if (!skb)
                return NULL;
 
-       p = rcu_dereference(skb->dev->br_port);
+       p = br_port_get_rcu(skb->dev);
 
        if (unlikely(is_link_local(dest))) {
                /* Pause frames shouldn't be passed up by driver anyway */
index 6bb6f7c9e6e1afe4b00fdfadc1caef68b01d1c7e..84060bc48f11b86f6d09220c7ca5ec5dd1026843 100644 (file)
@@ -127,16 +127,17 @@ void br_netfilter_rtable_init(struct net_bridge *br)
 
 static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
 {
-       struct net_bridge_port *port = rcu_dereference(dev->br_port);
-
-       return port ? &port->br->fake_rtable : NULL;
+       if (!br_port_exists(dev))
+               return NULL;
+       return &br_port_get_rcu(dev)->br->fake_rtable;
 }
 
 static inline struct net_device *bridge_parent(const struct net_device *dev)
 {
-       struct net_bridge_port *port = rcu_dereference(dev->br_port);
+       if (!br_port_exists(dev))
+               return NULL;
 
-       return port ? port->br->dev : NULL;
+       return br_port_get_rcu(dev)->br->dev;
 }
 
 static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
index fe0a79018ab238e0b1adf826b87cf3a97184723b..4a6a378c84e357d06f45ae70fad7cd808c4e8bc8 100644 (file)
@@ -120,10 +120,11 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
        idx = 0;
        for_each_netdev(net, dev) {
                /* not a bridge port */
-               if (dev->br_port == NULL || idx < cb->args[0])
+               if (!br_port_exists(dev) || idx < cb->args[0])
                        goto skip;
 
-               if (br_fill_ifinfo(skb, dev->br_port, NETLINK_CB(cb->skb).pid,
+               if (br_fill_ifinfo(skb, br_port_get(dev),
+                                  NETLINK_CB(cb->skb).pid,
                                   cb->nlh->nlmsg_seq, RTM_NEWLINK,
                                   NLM_F_MULTI) < 0)
                        break;
@@ -168,9 +169,9 @@ static int br_rtm_setlink(struct sk_buff *skb,  struct nlmsghdr *nlh, void *arg)
        if (!dev)
                return -ENODEV;
 
-       p = dev->br_port;
-       if (!p)
+       if (!br_port_exists(dev))
                return -EINVAL;
+       p = br_port_get(dev);
 
        /* if kernel STP is running, don't allow changes */
        if (p->br->stp_enabled == BR_KERNEL_STP)
index 717e1fd6133cbfcd3815e7f13d912597524faa9f..404d4e14c6a7702293521a668914765c3ec59daa 100644 (file)
@@ -32,14 +32,15 @@ struct notifier_block br_device_notifier = {
 static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
 {
        struct net_device *dev = ptr;
-       struct net_bridge_port *p = dev->br_port;
+       struct net_bridge_port *p = br_port_get(dev);
        struct net_bridge *br;
        int err;
 
        /* not a port of a bridge */
-       if (p == NULL)
+       if (!br_port_exists(dev))
                return NOTIFY_DONE;
 
+       p = br_port_get(dev);
        br = p->br;
 
        switch (event) {
index c83519b555bb49085336f745653fb339b6dc8093..f6bc979b1135514e5c46e21422454bae839e876d 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/netdevice.h>
 #include <linux/if_bridge.h>
+#include <linux/netpoll.h>
 #include <net/route.h>
 
 #define BR_HASH_BITS 8
@@ -143,8 +144,17 @@ struct net_bridge_port
 #ifdef CONFIG_SYSFS
        char                            sysfs_name[IFNAMSIZ];
 #endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       struct netpoll                  *np;
+#endif
 };
 
+#define br_port_get_rcu(dev) \
+       ((struct net_bridge_port *) rcu_dereference(dev->rx_handler_data))
+#define br_port_get(dev) ((struct net_bridge_port *) dev->rx_handler_data)
+#define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT)
+
 struct br_cpu_netstats {
        unsigned long   rx_packets;
        unsigned long   rx_bytes;
@@ -273,16 +283,41 @@ extern void br_dev_setup(struct net_device *dev);
 extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
                               struct net_device *dev);
 #ifdef CONFIG_NET_POLL_CONTROLLER
-extern void br_netpoll_cleanup(struct net_device *dev);
-extern void br_netpoll_enable(struct net_bridge *br,
-                             struct net_device *dev);
-extern void br_netpoll_disable(struct net_bridge *br,
-                              struct net_device *dev);
+static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br)
+{
+       return br->dev->npinfo;
+}
+
+static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
+                                      struct sk_buff *skb)
+{
+       struct netpoll *np = p->np;
+
+       if (np)
+               netpoll_send_skb(np, skb);
+}
+
+extern int br_netpoll_enable(struct net_bridge_port *p);
+extern void br_netpoll_disable(struct net_bridge_port *p);
 #else
-#define br_netpoll_cleanup(br)
-#define br_netpoll_enable(br, dev)
-#define br_netpoll_disable(br, dev)
+static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br)
+{
+       return NULL;
+}
+
+static inline void br_netpoll_send_skb(struct net_bridge_port *p,
+                                      struct sk_buff *skb)
+{
+}
 
+static inline int br_netpoll_enable(struct net_bridge_port *p)
+{
+       return 0;
+}
+
+static inline void br_netpoll_disable(struct net_bridge_port *p)
+{
+}
 #endif
 
 /* br_fdb.c */
index 217bd225a42f1390deb46605039941bd6ea81d1f..70aecb48fb69b80e608e65af295645f984b3acd4 100644 (file)
@@ -137,12 +137,13 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
                struct net_device *dev)
 {
        const unsigned char *dest = eth_hdr(skb)->h_dest;
-       struct net_bridge_port *p = rcu_dereference(dev->br_port);
+       struct net_bridge_port *p;
        struct net_bridge *br;
        const unsigned char *buf;
 
-       if (!p)
+       if (!br_port_exists(dev))
                goto err;
+       p = br_port_get_rcu(dev);
 
        if (!pskb_may_pull(skb, 4))
                goto err;
index 9e19166ba4534321ad2a9139960fd247dd958e36..46624bb6d9be5f0ca26b2b845f44f1a857499291 100644 (file)
@@ -24,8 +24,9 @@ ebt_redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
                return EBT_DROP;
 
        if (par->hooknum != NF_BR_BROUTING)
+               /* rcu_read_lock()ed by nf_hook_slow */
                memcpy(eth_hdr(skb)->h_dest,
-                      par->in->br_port->br->dev->dev_addr, ETH_ALEN);
+                      br_port_get_rcu(par->in)->br->dev->dev_addr, ETH_ALEN);
        else
                memcpy(eth_hdr(skb)->h_dest, par->in->dev_addr, ETH_ALEN);
        skb->pkt_type = PACKET_HOST;
index ae3c7cef1484ff16c0a5ff187e23ea4c799de775..26377e96fa1cf129d2b495c872647ca74281b5fd 100644 (file)
@@ -177,8 +177,9 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
        if (in) {
                strcpy(pm->physindev, in->name);
                /* If in isn't a bridge, then physindev==indev */
-               if (in->br_port)
-                       strcpy(pm->indev, in->br_port->br->dev->name);
+               if (br_port_exists(in))
+                       /* rcu_read_lock()ed by nf_hook_slow */
+                       strcpy(pm->indev, br_port_get_rcu(in)->br->dev->name);
                else
                        strcpy(pm->indev, in->name);
        } else
@@ -187,7 +188,8 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
        if (out) {
                /* If out exists, then out is a bridge port */
                strcpy(pm->physoutdev, out->name);
-               strcpy(pm->outdev, out->br_port->br->dev->name);
+               /* rcu_read_lock()ed by nf_hook_slow */
+               strcpy(pm->outdev, br_port_get_rcu(out)->br->dev->name);
        } else
                pm->outdev[0] = pm->physoutdev[0] = '\0';
 
index 59ca00e40dec2401b483bad4a7799775766e9f0d..bcc102e3be4daa2c5f09ee27710cc5ed03b096fa 100644 (file)
@@ -140,11 +140,14 @@ ebt_basic_match(const struct ebt_entry *e, const struct ethhdr *h,
                return 1;
        if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
                return 1;
-       if ((!in || !in->br_port) ? 0 : FWINV2(ebt_dev_check(
-          e->logical_in, in->br_port->br->dev), EBT_ILOGICALIN))
+       /* rcu_read_lock()ed by nf_hook_slow */
+       if (in && br_port_exists(in) &&
+           FWINV2(ebt_dev_check(e->logical_in, br_port_get_rcu(in)->br->dev),
+                  EBT_ILOGICALIN))
                return 1;
-       if ((!out || !out->br_port) ? 0 : FWINV2(ebt_dev_check(
-          e->logical_out, out->br_port->br->dev), EBT_ILOGICALOUT))
+       if (out && br_port_exists(out) &&
+           FWINV2(ebt_dev_check(e->logical_out, br_port_get_rcu(out)->br->dev),
+                  EBT_ILOGICALOUT))
                return 1;
 
        if (e->bitmask & EBT_SOURCEMAC) {
index a1abc10db08add402b3a41c940f5252fd517400e..5902426ef585a3e9ba629bcce0d5969e479bc503 100644 (file)
@@ -2703,6 +2703,7 @@ void netif_nit_deliver(struct sk_buff *skb)
  *     netdev_rx_handler_register - register receive handler
  *     @dev: device to register a handler for
  *     @rx_handler: receive handler to register
+ *     @rx_handler_data: data pointer that is used by rx handler
  *
  *     Register a receive hander for a device. This handler will then be
  *     called from __netif_receive_skb. A negative errno code is returned
@@ -2711,13 +2712,15 @@ void netif_nit_deliver(struct sk_buff *skb)
  *     The caller must hold the rtnl_mutex.
  */
 int netdev_rx_handler_register(struct net_device *dev,
-                              rx_handler_func_t *rx_handler)
+                              rx_handler_func_t *rx_handler,
+                              void *rx_handler_data)
 {
        ASSERT_RTNL();
 
        if (dev->rx_handler)
                return -EBUSY;
 
+       rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
        rcu_assign_pointer(dev->rx_handler, rx_handler);
 
        return 0;
@@ -2737,6 +2740,7 @@ void netdev_rx_handler_unregister(struct net_device *dev)
 
        ASSERT_RTNL();
        rcu_assign_pointer(dev->rx_handler, NULL);
+       rcu_assign_pointer(dev->rx_handler_data, NULL);
 }
 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
 
@@ -2761,7 +2765,8 @@ int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
        if (master->priv_flags & IFF_MASTER_ARPMON)
                dev->last_rx = jiffies;
 
-       if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
+       if ((master->priv_flags & IFF_MASTER_ALB) &&
+           (master->priv_flags & IFF_BRIDGE_PORT)) {
                /* Do address unmangle. The local destination address
                 * will be always the one master has. Provides the right
                 * functionality in a bridge.
index e034342c819c6c5b1d16619386ff602d3472ddac..560297ee55b40124c9da83a2d676e349b19eca51 100644 (file)
@@ -261,6 +261,7 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
        unsigned long tries;
        struct net_device *dev = np->dev;
        const struct net_device_ops *ops = dev->netdev_ops;
+       /* It is up to the caller to keep npinfo alive. */
        struct netpoll_info *npinfo = np->dev->npinfo;
 
        if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
@@ -692,29 +693,27 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
        return -1;
 }
 
-int netpoll_setup(struct netpoll *np)
+int __netpoll_setup(struct netpoll *np)
 {
-       struct net_device *ndev = NULL;
-       struct in_device *in_dev;
+       struct net_device *ndev = np->dev;
        struct netpoll_info *npinfo;
-       struct netpoll *npe, *tmp;
+       const struct net_device_ops *ops;
        unsigned long flags;
        int err;
 
-       if (np->dev_name)
-               ndev = dev_get_by_name(&init_net, np->dev_name);
-       if (!ndev) {
-               printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
+       if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
+           !ndev->netdev_ops->ndo_poll_controller) {
+               printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
                       np->name, np->dev_name);
-               return -ENODEV;
+               err = -ENOTSUPP;
+               goto out;
        }
 
-       np->dev = ndev;
        if (!ndev->npinfo) {
                npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
                if (!npinfo) {
                        err = -ENOMEM;
-                       goto put;
+                       goto out;
                }
 
                npinfo->rx_flags = 0;
@@ -726,6 +725,13 @@ int netpoll_setup(struct netpoll *np)
                INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
 
                atomic_set(&npinfo->refcnt, 1);
+
+               ops = np->dev->netdev_ops;
+               if (ops->ndo_netpoll_setup) {
+                       err = ops->ndo_netpoll_setup(ndev, npinfo);
+                       if (err)
+                               goto free_npinfo;
+               }
        } else {
                npinfo = ndev->npinfo;
                atomic_inc(&npinfo->refcnt);
@@ -733,12 +739,38 @@ int netpoll_setup(struct netpoll *np)
 
        npinfo->netpoll = np;
 
-       if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
-           !ndev->netdev_ops->ndo_poll_controller) {
-               printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
+       if (np->rx_hook) {
+               spin_lock_irqsave(&npinfo->rx_lock, flags);
+               npinfo->rx_flags |= NETPOLL_RX_ENABLED;
+               list_add_tail(&np->rx, &npinfo->rx_np);
+               spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+       }
+
+       /* last thing to do is link it to the net device structure */
+       rcu_assign_pointer(ndev->npinfo, npinfo);
+       rtnl_unlock();
+
+       return 0;
+
+free_npinfo:
+       kfree(npinfo);
+out:
+       return err;
+}
+EXPORT_SYMBOL_GPL(__netpoll_setup);
+
+int netpoll_setup(struct netpoll *np)
+{
+       struct net_device *ndev = NULL;
+       struct in_device *in_dev;
+       int err;
+
+       if (np->dev_name)
+               ndev = dev_get_by_name(&init_net, np->dev_name);
+       if (!ndev) {
+               printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
                       np->name, np->dev_name);
-               err = -ENOTSUPP;
-               goto release;
+               return -ENODEV;
        }
 
        if (!netif_running(ndev)) {
@@ -754,7 +786,7 @@ int netpoll_setup(struct netpoll *np)
                if (err) {
                        printk(KERN_ERR "%s: failed to open %s\n",
                               np->name, ndev->name);
-                       goto release;
+                       goto put;
                }
 
                atleast = jiffies + HZ/10;
@@ -791,7 +823,7 @@ int netpoll_setup(struct netpoll *np)
                        printk(KERN_ERR "%s: no IP address for %s, aborting\n",
                               np->name, np->dev_name);
                        err = -EDESTADDRREQ;
-                       goto release;
+                       goto put;
                }
 
                np->local_ip = in_dev->ifa_list->ifa_local;
@@ -799,34 +831,20 @@ int netpoll_setup(struct netpoll *np)
                printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
        }
 
-       if (np->rx_hook) {
-               spin_lock_irqsave(&npinfo->rx_lock, flags);
-               npinfo->rx_flags |= NETPOLL_RX_ENABLED;
-               list_add_tail(&np->rx, &npinfo->rx_np);
-               spin_unlock_irqrestore(&npinfo->rx_lock, flags);
-       }
+       np->dev = ndev;
 
        /* fill up the skb queue */
        refill_skbs();
 
-       /* last thing to do is link it to the net device structure */
-       ndev->npinfo = npinfo;
+       rtnl_lock();
+       err = __netpoll_setup(np);
+       rtnl_unlock();
 
-       /* avoid racing with NAPI reading npinfo */
-       synchronize_rcu();
+       if (err)
+               goto put;
 
        return 0;
 
- release:
-       if (!ndev->npinfo) {
-               spin_lock_irqsave(&npinfo->rx_lock, flags);
-               list_for_each_entry_safe(npe, tmp, &npinfo->rx_np, rx) {
-                       npe->dev = NULL;
-               }
-               spin_unlock_irqrestore(&npinfo->rx_lock, flags);
-
-               kfree(npinfo);
-       }
 put:
        dev_put(ndev);
        return err;
@@ -839,42 +857,56 @@ static int __init netpoll_init(void)
 }
 core_initcall(netpoll_init);
 
-void netpoll_cleanup(struct netpoll *np)
+void __netpoll_cleanup(struct netpoll *np)
 {
        struct netpoll_info *npinfo;
        unsigned long flags;
 
-       if (np->dev) {
-               npinfo = np->dev->npinfo;
-               if (npinfo) {
-                       if (!list_empty(&npinfo->rx_np)) {
-                               spin_lock_irqsave(&npinfo->rx_lock, flags);
-                               list_del(&np->rx);
-                               if (list_empty(&npinfo->rx_np))
-                                       npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
-                               spin_unlock_irqrestore(&npinfo->rx_lock, flags);
-                       }
+       npinfo = np->dev->npinfo;
+       if (!npinfo)
+               return;
 
-                       if (atomic_dec_and_test(&npinfo->refcnt)) {
-                               const struct net_device_ops *ops;
-                               skb_queue_purge(&npinfo->arp_tx);
-                               skb_queue_purge(&npinfo->txq);
-                               cancel_rearming_delayed_work(&npinfo->tx_work);
-
-                               /* clean after last, unfinished work */
-                               __skb_queue_purge(&npinfo->txq);
-                               kfree(npinfo);
-                               ops = np->dev->netdev_ops;
-                               if (ops->ndo_netpoll_cleanup)
-                                       ops->ndo_netpoll_cleanup(np->dev);
-                               else
-                                       np->dev->npinfo = NULL;
-                       }
-               }
+       if (!list_empty(&npinfo->rx_np)) {
+               spin_lock_irqsave(&npinfo->rx_lock, flags);
+               list_del(&np->rx);
+               if (list_empty(&npinfo->rx_np))
+                       npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
+               spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+       }
+
+       if (atomic_dec_and_test(&npinfo->refcnt)) {
+               const struct net_device_ops *ops;
+
+               ops = np->dev->netdev_ops;
+               if (ops->ndo_netpoll_cleanup)
+                       ops->ndo_netpoll_cleanup(np->dev);
 
-               dev_put(np->dev);
+               rcu_assign_pointer(np->dev->npinfo, NULL);
+
+               /* avoid racing with NAPI reading npinfo */
+               synchronize_rcu_bh();
+
+               skb_queue_purge(&npinfo->arp_tx);
+               skb_queue_purge(&npinfo->txq);
+               cancel_rearming_delayed_work(&npinfo->tx_work);
+
+               /* clean after last, unfinished work */
+               __skb_queue_purge(&npinfo->txq);
+               kfree(npinfo);
        }
+}
+EXPORT_SYMBOL_GPL(__netpoll_cleanup);
+
+void netpoll_cleanup(struct netpoll *np)
+{
+       if (!np->dev)
+               return;
+
+       rtnl_lock();
+       __netpoll_cleanup(np);
+       rtnl_unlock();
 
+       dev_put(np->dev);
        np->dev = NULL;
 }
 
index 49d0d2b8900c2699e14e677972579ff33822e231..779d40c3b96efbe76eace0b45b66ada4fe9c4b78 100644 (file)
@@ -511,7 +511,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
 
 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
 {
-       TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
+       TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
        tp->pushed_seq = tp->write_seq;
 }
 
@@ -527,7 +527,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
 
        skb->csum    = 0;
        tcb->seq     = tcb->end_seq = tp->write_seq;
-       tcb->flags   = TCPCB_FLAG_ACK;
+       tcb->flags   = TCPHDR_ACK;
        tcb->sacked  = 0;
        skb_header_release(skb);
        tcp_add_write_queue_tail(sk, skb);
@@ -815,7 +815,7 @@ new_segment:
                skb_shinfo(skb)->gso_segs = 0;
 
                if (!copied)
-                       TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
+                       TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
 
                copied += copy;
                poffset += copy;
@@ -1061,7 +1061,7 @@ new_segment:
                        }
 
                        if (!copied)
-                               TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
+                               TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
 
                        tp->write_seq += copy;
                        TCP_SKB_CB(skb)->end_seq += copy;
index 548d575e6cc684673ef9c7a7a613e2f444ec509c..04334661fa2880c11c7e028ea23764445a62e94b 100644 (file)
@@ -3286,7 +3286,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                 * connection startup slow start one packet too
                 * quickly.  This is severely frowned upon behavior.
                 */
-               if (!(scb->flags & TCPCB_FLAG_SYN)) {
+               if (!(scb->flags & TCPHDR_SYN)) {
                        flag |= FLAG_DATA_ACKED;
                } else {
                        flag |= FLAG_SYN_ACKED;
index b4ed957f201a6f3f2ebf8d5564edec567b6435a6..51d316dbb058f7f79c77b1d2bc123b17f2b73b6f 100644 (file)
@@ -294,9 +294,9 @@ static u16 tcp_select_window(struct sock *sk)
 /* Packet ECN state for a SYN-ACK */
 static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb)
 {
-       TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR;
+       TCP_SKB_CB(skb)->flags &= ~TCPHDR_CWR;
        if (!(tp->ecn_flags & TCP_ECN_OK))
-               TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE;
+               TCP_SKB_CB(skb)->flags &= ~TCPHDR_ECE;
 }
 
 /* Packet ECN state for a SYN.  */
@@ -306,7 +306,7 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
 
        tp->ecn_flags = 0;
        if (sysctl_tcp_ecn == 1) {
-               TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE | TCPCB_FLAG_CWR;
+               TCP_SKB_CB(skb)->flags |= TCPHDR_ECE | TCPHDR_CWR;
                tp->ecn_flags = TCP_ECN_OK;
        }
 }
@@ -361,7 +361,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
        skb_shinfo(skb)->gso_type = 0;
 
        TCP_SKB_CB(skb)->seq = seq;
-       if (flags & (TCPCB_FLAG_SYN | TCPCB_FLAG_FIN))
+       if (flags & (TCPHDR_SYN | TCPHDR_FIN))
                seq++;
        TCP_SKB_CB(skb)->end_seq = seq;
 }
@@ -820,7 +820,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        tcb = TCP_SKB_CB(skb);
        memset(&opts, 0, sizeof(opts));
 
-       if (unlikely(tcb->flags & TCPCB_FLAG_SYN))
+       if (unlikely(tcb->flags & TCPHDR_SYN))
                tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
        else
                tcp_options_size = tcp_established_options(sk, skb, &opts,
@@ -843,7 +843,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        *(((__be16 *)th) + 6)   = htons(((tcp_header_size >> 2) << 12) |
                                        tcb->flags);
 
-       if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
+       if (unlikely(tcb->flags & TCPHDR_SYN)) {
                /* RFC1323: The window in SYN & SYN/ACK segments
                 * is never scaled.
                 */
@@ -866,7 +866,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        }
 
        tcp_options_write((__be32 *)(th + 1), tp, &opts);
-       if (likely((tcb->flags & TCPCB_FLAG_SYN) == 0))
+       if (likely((tcb->flags & TCPHDR_SYN) == 0))
                TCP_ECN_send(sk, skb, tcp_header_size);
 
 #ifdef CONFIG_TCP_MD5SIG
@@ -880,7 +880,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
 
        icsk->icsk_af_ops->send_check(sk, skb);
 
-       if (likely(tcb->flags & TCPCB_FLAG_ACK))
+       if (likely(tcb->flags & TCPHDR_ACK))
                tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
 
        if (skb->len != tcp_header_size)
@@ -1023,7 +1023,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
 
        /* PSH and FIN should only be set in the second packet. */
        flags = TCP_SKB_CB(skb)->flags;
-       TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH);
+       TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
        TCP_SKB_CB(buff)->flags = flags;
        TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
 
@@ -1328,8 +1328,7 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp,
        u32 in_flight, cwnd;
 
        /* Don't be strict about the congestion window for the final FIN.  */
-       if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
-           tcp_skb_pcount(skb) == 1)
+       if ((TCP_SKB_CB(skb)->flags & TCPHDR_FIN) && tcp_skb_pcount(skb) == 1)
                return 1;
 
        in_flight = tcp_packets_in_flight(tp);
@@ -1398,7 +1397,7 @@ static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
         * Nagle can be ignored during F-RTO too (see RFC4138).
         */
        if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
-           (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN))
+           (TCP_SKB_CB(skb)->flags & TCPHDR_FIN))
                return 1;
 
        if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
@@ -1487,7 +1486,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
 
        /* PSH and FIN should only be set in the second packet. */
        flags = TCP_SKB_CB(skb)->flags;
-       TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH);
+       TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
        TCP_SKB_CB(buff)->flags = flags;
 
        /* This packet was never sent out yet, so no SACK bits. */
@@ -1518,7 +1517,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
        const struct inet_connection_sock *icsk = inet_csk(sk);
        u32 send_win, cong_win, limit, in_flight;
 
-       if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
+       if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)
                goto send_now;
 
        if (icsk->icsk_ca_state != TCP_CA_Open)
@@ -1644,7 +1643,7 @@ static int tcp_mtu_probe(struct sock *sk)
 
        TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
        TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
-       TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK;
+       TCP_SKB_CB(nskb)->flags = TCPHDR_ACK;
        TCP_SKB_CB(nskb)->sacked = 0;
        nskb->csum = 0;
        nskb->ip_summed = skb->ip_summed;
@@ -1669,7 +1668,7 @@ static int tcp_mtu_probe(struct sock *sk)
                        sk_wmem_free_skb(sk, skb);
                } else {
                        TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
-                                                  ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
+                                                  ~(TCPHDR_FIN|TCPHDR_PSH);
                        if (!skb_shinfo(skb)->nr_frags) {
                                skb_pull(skb, copy);
                                if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -2020,7 +2019,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
 
        if (!sysctl_tcp_retrans_collapse)
                return;
-       if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN)
+       if (TCP_SKB_CB(skb)->flags & TCPHDR_SYN)
                return;
 
        tcp_for_write_queue_from_safe(skb, tmp, sk) {
@@ -2112,7 +2111,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
         * since it is cheap to do so and saves bytes on the network.
         */
        if (skb->len > 0 &&
-           (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
+           (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) &&
            tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
                if (!pskb_trim(skb, 0)) {
                        /* Reuse, even though it does some unnecessary work */
@@ -2301,7 +2300,7 @@ void tcp_send_fin(struct sock *sk)
        mss_now = tcp_current_mss(sk);
 
        if (tcp_send_head(sk) != NULL) {
-               TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
+               TCP_SKB_CB(skb)->flags |= TCPHDR_FIN;
                TCP_SKB_CB(skb)->end_seq++;
                tp->write_seq++;
        } else {
@@ -2318,7 +2317,7 @@ void tcp_send_fin(struct sock *sk)
                skb_reserve(skb, MAX_TCP_HEADER);
                /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
                tcp_init_nondata_skb(skb, tp->write_seq,
-                                    TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
+                                    TCPHDR_ACK | TCPHDR_FIN);
                tcp_queue_skb(sk, skb);
        }
        __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
@@ -2343,7 +2342,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
        /* Reserve space for headers and prepare control bits. */
        skb_reserve(skb, MAX_TCP_HEADER);
        tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
-                            TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
+                            TCPHDR_ACK | TCPHDR_RST);
        /* Send it off. */
        TCP_SKB_CB(skb)->when = tcp_time_stamp;
        if (tcp_transmit_skb(sk, skb, 0, priority))
@@ -2363,11 +2362,11 @@ int tcp_send_synack(struct sock *sk)
        struct sk_buff *skb;
 
        skb = tcp_write_queue_head(sk);
-       if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN)) {
+       if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPHDR_SYN)) {
                printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
                return -EFAULT;
        }
-       if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_ACK)) {
+       if (!(TCP_SKB_CB(skb)->flags & TCPHDR_ACK)) {
                if (skb_cloned(skb)) {
                        struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
                        if (nskb == NULL)
@@ -2381,7 +2380,7 @@ int tcp_send_synack(struct sock *sk)
                        skb = nskb;
                }
 
-               TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK;
+               TCP_SKB_CB(skb)->flags |= TCPHDR_ACK;
                TCP_ECN_send_synack(tcp_sk(sk), skb);
        }
        TCP_SKB_CB(skb)->when = tcp_time_stamp;
@@ -2460,7 +2459,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
         * not even correctly set)
         */
        tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
-                            TCPCB_FLAG_SYN | TCPCB_FLAG_ACK);
+                            TCPHDR_SYN | TCPHDR_ACK);
 
        if (OPTION_COOKIE_EXTENSION & opts.options) {
                if (s_data_desired) {
@@ -2592,7 +2591,7 @@ int tcp_connect(struct sock *sk)
        skb_reserve(buff, MAX_TCP_HEADER);
 
        tp->snd_nxt = tp->write_seq;
-       tcp_init_nondata_skb(buff, tp->write_seq++, TCPCB_FLAG_SYN);
+       tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
        TCP_ECN_send_syn(sk, buff);
 
        /* Send it off. */
@@ -2698,7 +2697,7 @@ void tcp_send_ack(struct sock *sk)
 
        /* Reserve space for headers and prepare control bits. */
        skb_reserve(buff, MAX_TCP_HEADER);
-       tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPCB_FLAG_ACK);
+       tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
 
        /* Send it off, this clears delayed acks for us. */
        TCP_SKB_CB(buff)->when = tcp_time_stamp;
@@ -2732,7 +2731,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
         * end to send an ack.  Don't queue or clone SKB, just
         * send it.
         */
-       tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPCB_FLAG_ACK);
+       tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
        TCP_SKB_CB(skb)->when = tcp_time_stamp;
        return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
 }
@@ -2762,13 +2761,13 @@ int tcp_write_wakeup(struct sock *sk)
                if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
                    skb->len > mss) {
                        seg_size = min(seg_size, mss);
-                       TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
+                       TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
                        if (tcp_fragment(sk, skb, seg_size, mss))
                                return -1;
                } else if (!tcp_skb_pcount(skb))
                        tcp_set_skb_tso_segs(sk, skb, mss);
 
-               TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
+               TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
                TCP_SKB_CB(skb)->when = tcp_time_stamp;
                err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
                if (!err)
index 9dd8cd4fb6e64bf1df3c24850c51ac6955d655c8..802dbffae8b42bfeeeeb805e079547cf263b2aad 100644 (file)
@@ -736,27 +736,19 @@ static bool tcp_in_window(const struct nf_conn *ct,
        return res;
 }
 
-#define        TH_FIN  0x01
-#define        TH_SYN  0x02
-#define        TH_RST  0x04
-#define        TH_PUSH 0x08
-#define        TH_ACK  0x10
-#define        TH_URG  0x20
-#define        TH_ECE  0x40
-#define        TH_CWR  0x80
-
 /* table of valid flag combinations - PUSH, ECE and CWR are always valid */
-static const u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_ACK|TH_URG) + 1] =
+static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK|
+                                TCPHDR_URG) + 1] =
 {
-       [TH_SYN]                        = 1,
-       [TH_SYN|TH_URG]                 = 1,
-       [TH_SYN|TH_ACK]                 = 1,
-       [TH_RST]                        = 1,
-       [TH_RST|TH_ACK]                 = 1,
-       [TH_FIN|TH_ACK]                 = 1,
-       [TH_FIN|TH_ACK|TH_URG]          = 1,
-       [TH_ACK]                        = 1,
-       [TH_ACK|TH_URG]                 = 1,
+       [TCPHDR_SYN]                            = 1,
+       [TCPHDR_SYN|TCPHDR_URG]                 = 1,
+       [TCPHDR_SYN|TCPHDR_ACK]                 = 1,
+       [TCPHDR_RST]                            = 1,
+       [TCPHDR_RST|TCPHDR_ACK]                 = 1,
+       [TCPHDR_FIN|TCPHDR_ACK]                 = 1,
+       [TCPHDR_FIN|TCPHDR_ACK|TCPHDR_URG]      = 1,
+       [TCPHDR_ACK]                            = 1,
+       [TCPHDR_ACK|TCPHDR_URG]                 = 1,
 };
 
 /* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c.  */
@@ -803,7 +795,7 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl,
        }
 
        /* Check TCP flags. */
-       tcpflags = (((u_int8_t *)th)[13] & ~(TH_ECE|TH_CWR|TH_PUSH));
+       tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH));
        if (!tcp_valid_flags[tcpflags]) {
                if (LOG_INVALID(net, IPPROTO_TCP))
                        nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
index fb86a51bb65aa04e553c9ffa4c26331a2d7512fa..6a1572b0ab416a65425abc135cca9d93a4769911 100644 (file)
@@ -413,8 +413,9 @@ __build_packet_message(struct nfulnl_instance *inst,
                        NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
                                     htonl(indev->ifindex));
                        /* this is the bridge group "brX" */
+                       /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
                        NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
-                                    htonl(indev->br_port->br->dev->ifindex));
+                                    htonl(br_port_get_rcu(indev)->br->dev->ifindex));
                } else {
                        /* Case 2: indev is bridge group, we need to look for
                         * physical device (when called from ipv4) */
@@ -440,8 +441,9 @@ __build_packet_message(struct nfulnl_instance *inst,
                        NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
                                     htonl(outdev->ifindex));
                        /* this is the bridge group "brX" */
+                       /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
                        NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
-                                    htonl(outdev->br_port->br->dev->ifindex));
+                                    htonl(br_port_get_rcu(outdev)->br->dev->ifindex));
                } else {
                        /* Case 2: indev is a bridge group, we need to look
                         * for physical device (when called from ipv4) */
index d05605b38f6f60fd5e6a6675abc7e11a1ebe768a..68e67d19724d83f844c25a5aa0f603156421f57e 100644 (file)
@@ -291,8 +291,9 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
                        NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
                                     htonl(indev->ifindex));
                        /* this is the bridge group "brX" */
+                       /* rcu_read_lock()ed by __nf_queue */
                        NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
-                                    htonl(indev->br_port->br->dev->ifindex));
+                                    htonl(br_port_get_rcu(indev)->br->dev->ifindex));
                } else {
                        /* Case 2: indev is bridge group, we need to look for
                         * physical device (when called from ipv4) */
@@ -316,8 +317,9 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
                        NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
                                     htonl(outdev->ifindex));
                        /* this is the bridge group "brX" */
+                       /* rcu_read_lock()ed by __nf_queue */
                        NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
-                                    htonl(outdev->br_port->br->dev->ifindex));
+                                    htonl(br_port_get_rcu(outdev)->br->dev->ifindex));
                } else {
                        /* Case 2: outdev is bridge group, we need to look for
                         * physical output device (when called from ipv4) */
index 1841388c770a37e637d143d26e7bf883d16f6be5..eb81c380da1ba11d2f212499f43636ef09f9a327 100644 (file)
@@ -220,15 +220,13 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 }
 #endif
 
-#define TH_SYN 0x02
-
 /* Must specify -p tcp --syn */
 static inline bool find_syn_match(const struct xt_entry_match *m)
 {
        const struct xt_tcp *tcpinfo = (const struct xt_tcp *)m->data;
 
        if (strcmp(m->u.kernel.match->name, "tcp") == 0 &&
-           tcpinfo->flg_cmp & TH_SYN &&
+           tcpinfo->flg_cmp & TCPHDR_SYN &&
            !(tcpinfo->invflags & XT_TCP_INV_FLAGS))
                return true;
 
index 90ab3c8519bec695f471e5d095fb1673e450b385..3a7b8a2f2d5aafbd2e2ca4305566dd7c87672e22 100644 (file)
@@ -1107,7 +1107,7 @@ static int nl80211_valid_4addr(struct cfg80211_registered_device *rdev,
                               enum nl80211_iftype iftype)
 {
        if (!use_4addr) {
-               if (netdev && netdev->br_port)
+               if (netdev && (netdev->priv_flags & IFF_BRIDGE_PORT))
                        return -EBUSY;
                return 0;
        }
index 3416373a9c0c80e91c21f687eb61ec7b91daa626..0c8a1e8b76903313ef0c413cc36261ec8af9399b 100644 (file)
@@ -770,8 +770,8 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
                return -EOPNOTSUPP;
 
        /* if it's part of a bridge, reject changing type to station/ibss */
-       if (dev->br_port && (ntype == NL80211_IFTYPE_ADHOC ||
-                            ntype == NL80211_IFTYPE_STATION))
+       if ((dev->priv_flags & IFF_BRIDGE_PORT) &&
+           (ntype == NL80211_IFTYPE_ADHOC || ntype == NL80211_IFTYPE_STATION))
                return -EBUSY;
 
        if (ntype != otype) {